code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# Copyright (c) 2017, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
Forest Fire Parameter Exploration
=================================
This example shows parallel execution of multiple forest fire
simulations with parameters swept over a range of values to
collect and display statistics about the model.
In this example, we use a modified version of a forest fire
simulation with the following states:
- Empty cell: 0
- Healthy tree: 1
- Burning tree: 2
- Moldy tree: 3
Every tick:
- an empty cell can grow a tree
- fires are randomly started and burn down all connected trees
- crowded trees have a chance of contracting and dying from mold
infection
The script runs the simulation with different values for the
likelihood of mold infection. As the probability grows, a qualitative
decrease can be seen in the size and effect of fires, as the deaths
due to mold have the effect of breaking up large groups of trees into
less-connected groves, making it harder for fire to spread.
"""
import numpy as np
from cellular_automata.automata_recorder import AutomataRecorder, count_states
from cellular_automata.cellular_automaton import CellularAutomaton
from cellular_automata.rules.change_state_rule import ChangeStateRule
from cellular_automata.rules.forest import BurnGrovesRule, MoldRule
# State values
EMPTY = 0
TREE = 1
FIRE = 2
MOLD = 3
def simulation(p_mold, size, steps):
""" Perform a simulation of a moldy forest, returning statistics.
Parameters
----------
p_mold : probability
The probability that a crowded tree dies of mold.
size : size tuple
The number of cells in each direction for the simulation.
steps : int
The number of ticks to run the simulation for.
Returns
-------
counts : array
Array with shape (4, steps) of counts of each state at
each tick.
"""
np.random.seed(None)
# trees grow
grow = ChangeStateRule(
from_state=EMPTY,
to_state=TREE,
p_change=0.0025
)
# fires are started, and all connected trees burn
burn_groves = BurnGrovesRule()
# crowded trees have a chance to be infected with mold
mold = MoldRule(dead_state=MOLD, p_mold=p_mold)
# trees which are infected with mold die
mold_die = ChangeStateRule(
from_state=MOLD,
to_state=EMPTY,
p_change=1.0
)
# fires are extinguished
fire_out = ChangeStateRule(
from_state=FIRE,
to_state=EMPTY,
p_change=1.0
)
forest = CellularAutomaton(
shape=size,
rules=[mold_die, fire_out, grow, burn_groves, mold],
)
# record the number of each state
recorder = AutomataRecorder(automaton=forest, transform=count_states)
forest.start()
for i in range(steps):
forest.step()
return recorder.as_array()
if __name__ == '__main__':
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
SHAPE = (256, 256)
N_STEPS = 4096
N_SIMULATIONS = 16
results = Parallel(n_jobs=4)(
delayed(simulation)(p_mold, SHAPE, N_STEPS, count_states)
for p_mold in np.logspace(-4, -1, N_SIMULATIONS)
)
for i, result in enumerate(results):
# plot count of each non-empty state over time
plt.subplot(N_SIMULATIONS, 2, 2*i+1)
for state, color in [(TREE, 'g'), (FIRE, 'r'), (MOLD, 'c')]:
plt.plot(result[state, :], c=color)
# plot histogram
plt.subplot(N_SIMULATIONS, 2, 2*i+2)
plt.hist(
np.log(result[result[1] != 0, 1]),
bins=np.linspace(0, 10, 21)
)
plt.show()
|
[
"cellular_automata.rules.forest.BurnGrovesRule",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.logspace",
"joblib.Parallel",
"matplotlib.pyplot.subplot",
"cellular_automata.rules.forest.MoldRule",
"numpy.linspace",
"numpy.random.seed",
"cellular_automata.automata_recorder.AutomataRecorder",
"joblib.delayed",
"cellular_automata.rules.change_state_rule.ChangeStateRule",
"cellular_automata.cellular_automaton.CellularAutomaton",
"matplotlib.pyplot.show"
] |
[((2204, 2224), 'numpy.random.seed', 'np.random.seed', (['None'], {}), '(None)\n', (2218, 2224), True, 'import numpy as np\n'), ((2254, 2319), 'cellular_automata.rules.change_state_rule.ChangeStateRule', 'ChangeStateRule', ([], {'from_state': 'EMPTY', 'to_state': 'TREE', 'p_change': '(0.0025)'}), '(from_state=EMPTY, to_state=TREE, p_change=0.0025)\n', (2269, 2319), False, 'from cellular_automata.rules.change_state_rule import ChangeStateRule\n'), ((2423, 2439), 'cellular_automata.rules.forest.BurnGrovesRule', 'BurnGrovesRule', ([], {}), '()\n', (2437, 2439), False, 'from cellular_automata.rules.forest import BurnGrovesRule, MoldRule\n'), ((2511, 2551), 'cellular_automata.rules.forest.MoldRule', 'MoldRule', ([], {'dead_state': 'MOLD', 'p_mold': 'p_mold'}), '(dead_state=MOLD, p_mold=p_mold)\n', (2519, 2551), False, 'from cellular_automata.rules.forest import BurnGrovesRule, MoldRule\n'), ((2613, 2675), 'cellular_automata.rules.change_state_rule.ChangeStateRule', 'ChangeStateRule', ([], {'from_state': 'MOLD', 'to_state': 'EMPTY', 'p_change': '(1.0)'}), '(from_state=MOLD, to_state=EMPTY, p_change=1.0)\n', (2628, 2675), False, 'from cellular_automata.rules.change_state_rule import ChangeStateRule\n'), ((2751, 2813), 'cellular_automata.rules.change_state_rule.ChangeStateRule', 'ChangeStateRule', ([], {'from_state': 'FIRE', 'to_state': 'EMPTY', 'p_change': '(1.0)'}), '(from_state=FIRE, to_state=EMPTY, p_change=1.0)\n', (2766, 2813), False, 'from cellular_automata.rules.change_state_rule import ChangeStateRule\n'), ((2858, 2944), 'cellular_automata.cellular_automaton.CellularAutomaton', 'CellularAutomaton', ([], {'shape': 'size', 'rules': '[mold_die, fire_out, grow, burn_groves, mold]'}), '(shape=size, rules=[mold_die, fire_out, grow, burn_groves,\n mold])\n', (2875, 2944), False, 'from cellular_automata.cellular_automaton import CellularAutomaton\n'), ((3018, 3076), 'cellular_automata.automata_recorder.AutomataRecorder', 'AutomataRecorder', ([], {'automaton': 'forest', 'transform': 'count_states'}), '(automaton=forest, transform=count_states)\n', (3034, 3076), False, 'from cellular_automata.automata_recorder import AutomataRecorder, count_states\n'), ((3963, 3973), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3971, 3973), True, 'import matplotlib.pyplot as plt\n'), ((3365, 3383), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(4)'}), '(n_jobs=4)\n', (3373, 3383), False, 'from joblib import Parallel, delayed\n'), ((3619, 3659), 'matplotlib.pyplot.subplot', 'plt.subplot', (['N_SIMULATIONS', '(2)', '(2 * i + 1)'], {}), '(N_SIMULATIONS, 2, 2 * i + 1)\n', (3630, 3659), True, 'import matplotlib.pyplot as plt\n'), ((3807, 3847), 'matplotlib.pyplot.subplot', 'plt.subplot', (['N_SIMULATIONS', '(2)', '(2 * i + 2)'], {}), '(N_SIMULATIONS, 2, 2 * i + 2)\n', (3818, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3737, 3772), 'matplotlib.pyplot.plot', 'plt.plot', (['result[state, :]'], {'c': 'color'}), '(result[state, :], c=color)\n', (3745, 3772), True, 'import matplotlib.pyplot as plt\n'), ((3874, 3907), 'numpy.log', 'np.log', (['result[result[1] != 0, 1]'], {}), '(result[result[1] != 0, 1])\n', (3880, 3907), True, 'import numpy as np\n'), ((3393, 3412), 'joblib.delayed', 'delayed', (['simulation'], {}), '(simulation)\n', (3400, 3412), False, 'from joblib import Parallel, delayed\n'), ((3473, 3507), 'numpy.logspace', 'np.logspace', (['(-4)', '(-1)', 'N_SIMULATIONS'], {}), '(-4, -1, N_SIMULATIONS)\n', (3484, 3507), True, 'import numpy as np\n'), ((3926, 3948), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(21)'], {}), '(0, 10, 21)\n', (3937, 3948), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 11:02:21 2019
@author: elizabethhutton
"""
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from wordcloud import WordCloud
from yellowbrick.cluster import KElbowVisualizer
from sklearn.neighbors import NearestNeighbors
import spacy
import iterate
class Cluster():
def __init__(self,corpus,num_clusters):
"""Perform k-means clustering on corpus.
Keyword Arguments:
corpus -- document corpus as Corpus object
num_clusters -- k clusters to search for
"""
self.k = num_clusters
self.top_words = None
word_vectors = corpus.vectors
kmeans_clustering = KMeans(n_clusters = num_clusters, init='k-means++')
self.model = kmeans_clustering
idx = kmeans_clustering.fit_predict(word_vectors)
self.centers = kmeans_clustering.cluster_centers_
#update corpus vectors with cluster labels
corpus.clusters = pd.DataFrame(idx,columns=['clusterid'],index=word_vectors.index)
return
def get_top_words(self, corpus, knn):
"""Get knn top words for each cluster.
Keyword Arguments:
corpus -- pandas df of words and their vectors
knn -- (int) num words to find per cluster
"""
word_vectors = corpus.vectors
neigh = NearestNeighbors(n_neighbors=knn, metric= 'cosine')
neigh.fit(word_vectors)
top_word_idxs = list()
for center in self.centers:
center = center.reshape(1,-1)
top_n = neigh.kneighbors(center,n_neighbors=knn,return_distance=False)
top_word_idxs.append(top_n)
top_n_words = pd.DataFrame()
for i, cluster in enumerate(top_word_idxs):
cluster_name = 'Cluster ' + str(i)
words = list()
for idx in cluster[0]:
word = word_vectors.iloc[idx].name
words.append(word)
top_n_words[cluster_name] = words
self.top_words = top_n_words
return top_n_words
def iterate_kmeans(clean_corpus,elbow):
#prep for clustering
clean_corpus.vectorize()
#iterate kmeans over num topics
#methods = 'var','dist','c_h'
elbow.elbow_kmeans_variance(clean_corpus)
elbow.elbow_kmeans_inertia(clean_corpus)
elbow.elbow_kmeans_ch(clean_corpus)
elbow.elbow_kmeans_dist(clean_corpus)
return
#fix
def plot_tsne(word_vectors):
tsne = TSNE(n_components=2, random_state=0, n_iter=5000, perplexity=3)
np.set_printoptions(suppress=True)
T = tsne.fit_transform(word_vectors)
labels = word_vectors.index
plt.figure(figsize=(12, 6))
plt.scatter(T[:, 0], T[:, 1], c='orange', edgecolors='r')
for label, x, y in zip(labels, T[:, 0], T[:, 1]):
plt.annotate(label, xy=(x+1, y+1), xytext=(0, 0), textcoords='offset points')
return
def get_kmeans(clean_corpus,num_topics):
cluster_model = Cluster(clean_corpus,num_topics)
top_words_kmeans = cluster_model.get_top_words(clean_corpus, knn=10)
return cluster_model,top_words_kmeans
|
[
"sklearn.cluster.KMeans",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.annotate",
"matplotlib.pyplot.figure",
"sklearn.neighbors.NearestNeighbors",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"numpy.set_printoptions"
] |
[((829, 878), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_clusters', 'init': '"""k-means++"""'}), "(n_clusters=num_clusters, init='k-means++')\n", (835, 878), False, 'from sklearn.cluster import KMeans\n'), ((1131, 1197), 'pandas.DataFrame', 'pd.DataFrame', (['idx'], {'columns': "['clusterid']", 'index': 'word_vectors.index'}), "(idx, columns=['clusterid'], index=word_vectors.index)\n", (1143, 1197), True, 'import pandas as pd\n'), ((1528, 1578), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'knn', 'metric': '"""cosine"""'}), "(n_neighbors=knn, metric='cosine')\n", (1544, 1578), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((1878, 1892), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1890, 1892), True, 'import pandas as pd\n'), ((2721, 2784), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(0)', 'n_iter': '(5000)', 'perplexity': '(3)'}), '(n_components=2, random_state=0, n_iter=5000, perplexity=3)\n', (2725, 2784), False, 'from sklearn.manifold import TSNE\n'), ((2793, 2827), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (2812, 2827), True, 'import numpy as np\n'), ((2926, 2953), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2936, 2953), True, 'from matplotlib import pyplot as plt\n'), ((2962, 3019), 'matplotlib.pyplot.scatter', 'plt.scatter', (['T[:, 0]', 'T[:, 1]'], {'c': '"""orange"""', 'edgecolors': '"""r"""'}), "(T[:, 0], T[:, 1], c='orange', edgecolors='r')\n", (2973, 3019), True, 'from matplotlib import pyplot as plt\n'), ((3090, 3176), 'matplotlib.pyplot.annotate', 'plt.annotate', (['label'], {'xy': '(x + 1, y + 1)', 'xytext': '(0, 0)', 'textcoords': '"""offset points"""'}), "(label, xy=(x + 1, y + 1), xytext=(0, 0), textcoords=\n 'offset points')\n", (3102, 3176), True, 'from matplotlib import pyplot as plt\n')]
|
"""
Code for the optimization and gaming component of the Baselining work.
@author: <NAME>, <NAME>
@date Mar 2, 2016
"""
import numpy as np
import pandas as pd
import logging
from gurobipy import GRB, Model, quicksum, LinExpr
from pandas.tseries.holiday import USFederalHolidayCalendar
from datetime import datetime
from .utils import (get_energy_charges, get_demand_charge, dem_charges, dem_charges_yearly,
get_pdp_demand_credit, get_DR_rewards, powerset, E19,
carbon_costs)
# define some string formatters
psform = '%Y-%m-%d %H:%M'
dsform = '%Y-%m-%d'
class BLModel(object):
"""
Abstract base class for Baselining models.
"""
def __init__(self, name):
"""
Construct an abstract dynamical system object based on the
gurobipy Model object 'model'.
"""
self._name = name
self._model = Model()
def get_model(self):
"""
Returns the underlying gurobiy Model object.
"""
return self._model
def set_dynsys(self, dynsys):
"""
Initialize dynamical system for underlying dynamics.
"""
self._dynsys = dynsys
def set_window(self, index):
"""
Set the window for the optimization. Here index is a pandas
DatetimeIndex.
"""
self._index = index
self._dynsys.set_window(index)
def energy_charges(self, tariff, isRT=False, LMP=None, isPDP=False,
twindow=None, carbon=False):
"""
Return total enery consumption charges (as determined by the
tariff's energy charge) as a gurobipy LinExpr.
"""
locidx = self._index.tz_convert('US/Pacific')
year = locidx[0].year
if isRT and isPDP:
raise Exception('Cannot combine RTP and PDP.')
nrg_charges = get_energy_charges(
self._index, tariff, isRT=isRT, LMP=LMP,
isPDP=isPDP, carbon=carbon, year=year)['EnergyCharge']
cons = self._dynsys.get_consumption()['energy']
if twindow is None:
# echrg_= quicksum([ec * con for ec, con in
# zip(nrg_charges.values, cons.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges.values, cons.values)]
echrg = pd.Series(echrg_, index=locidx)
else:
nrg_charges_ = nrg_charges.loc[twindow[0]:twindow[1]]
cons_ = cons.loc[twindow[0]:twindow[1]]
# echrg = quicksum([ec * con for ec, con in
# zip(nrg_charges_.values, cons_.values)])
echrg_ = [ec * con for ec, con in
zip(nrg_charges_.values, cons_.values)]
indx = locidx[locidx.get_loc(twindow[0]):
locidx.get_loc(twindow[1])+1]
echrg = pd.Series(echrg_, index=indx)
return echrg
def demand_charges(self, tariff, isPDP=False):
"""
Return the total demand charges under the tariff as a
gurobipy LinExpr.
"""
# determine which year/month combinations there is a demand charge,
# and create a variable for each of them
if hasattr(self, '_maxcon'):
for maxcon in self._maxcon.values():
self._model.remove(maxcon)
del self._maxcon
if hasattr(self, '_maxconbnd'):
for maxconbnd in self._maxconbnd.values():
self._model.remove(maxconbnd)
del self._maxconbnd
if hasattr(self, '_maxconppk'):
for maxconppk in self._maxconppk.values():
self._model.remove(maxconppk)
del self._maxconppk
if hasattr(self, '_maxconppkbnd'):
for maxconppkbnd in self._maxconppkbnd.values():
self._model.remove(maxconppkbnd)
del self._maxconppkbnd
if hasattr(self, '_maxconpk'):
for maxconpk in self._maxconpk.values():
self._model.remove(maxconpk)
del self._maxconpk
if hasattr(self, '_maxconpkbnd'):
for maxconpkbnd in self._maxconpkbnd.values():
self._model.remove(maxconpkbnd)
del self._maxconpkbnd
if hasattr(self, '_maxconpks'):
for maxconpks in self._maxconpks.values():
self._model.remove(maxconpks)
del self._maxconpks
if hasattr(self, '_maxconppkw'):
for maxconppkw in self._maxconppkw.values():
self._model.remove(maxconppkw)
del self._maxconppkw
if hasattr(self, '_maxconppkbndw'):
for maxconppkbndw in self._maxconppkbndw.values():
self._model.remove(maxconppkbndw)
del self._maxconppkbndw
if hasattr(self, '_maxconppks'):
for maxconppks in self._maxconppks.values():
self._model.remove(maxconppks)
del self._maxconppks
if hasattr(self, '_maxconppkbnds'):
for maxconppkbnds in self._maxconppkbnds.values():
self._model.remove(maxconppkbnds)
del self._maxconppkbnds
self._model.update()
locidx = self._index.tz_convert('US/Pacific')
ym_dict = {year: np.unique(locidx[locidx.year == year].month)
for year in np.unique(locidx.year)}
indx = []
for year, months in ym_dict.items():
for month in months:
indx.append(pd.Timestamp(datetime(year, month, 1),
tz='US/Pacific'))
if tariff in dem_charges:
if not(tariff in E19):
self._maxcon, self._maxconbnd = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# print locidx
# the following creates a dictionary with all years in the data
# as keys, and for each year the value is an array of (unique)
# months that appear during that year. This is used for keeping
# track of the peak consumpiton for the demand charge
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update()
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# dcharges += (get_demand_charge(tariff, month, isPDP)*
# self._maxcon[year, month])
dcharges.append(
(get_demand_charge(tariff, month, isPDP, year=year) *
self._maxcon[year, month]))
dcharges = pd.Series(dcharges, index=indx)
self._model.update()
return dcharges
else:
# for E19 tarrifs
idx_ = self._index.tz_convert('US/Pacific')
iswknd = idx_.dayofweek > 5
holidays = USFederalHolidayCalendar().holidays(
idx_.min(), idx_.max())
iswknd = iswknd | pd.DatetimeIndex(idx_.date).isin(holidays)
issummer = (idx_.month >= 5) & (idx_.month <= 10)
ToD = idx_.hour + idx_.minute / 60
ispeak = ~iswknd & issummer & (ToD >= 12) & (ToD < 18)
ispartial_summer = (~iswknd & issummer & (
((ToD >= 8.5) & (ToD < 12)) |
((ToD >= 18) & (ToD < 21.5))))
ispartial_winter = ~iswknd & ~issummer & (
(ToD >= 8.5) & (ToD < 21.5))
# create dictionaries for variables
self._maxcon, self._maxconbnd = {}, {}
self._maxconppks, self._maxconppkbnds = {}, {}
self._maxconpks, self._maxconpkbnds = {}, {}
self._maxconpk, self._maxconpkbnd = {}, {}
self._maxconppk, self._maxconppkbnd = {}, {}
self._maxconppkw, self._maxconppkbndw = {}, {}
# locidx = self._index.tz_convert('US/Pacific')
# ym_dict = {year: np.unique(locidx[locidx.year == year].month)
# for year in np.unique(locidx.year)}
# indx=[]
for year, months in ym_dict.items():
for month in months:
# declare variable for max consumption
self._maxcon[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxcon[{},{}]'.format(year, month))
# declare variable for part peak consumption
self._maxconppk[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxconppk[{},{}]'.format(year, month))
# declare variable for max peak only in summer
if (5 <= month) & (month <= 10):
# add variable for maximum peak usage in summer
self._maxconpk[year, month] = self._model.addVar(
vtype=GRB.CONTINUOUS,
name='maxconpk[{},{}]'.format(year, month))
# indx.append(pd.Timestamp(datetime(year,month,1),tz='US/Pacific'))
self._model.update() # update model
# now add in the necessary constraints and update objective
dcharges = []
cons = self._dynsys.get_consumption()['power']
for year, months in ym_dict.items():
for month in months:
dchrg = 0.0
# for peak summer less than max demand
if (month >= 5) & (month <= 10):
self._maxconpkbnd[year, month] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=self._maxconpk[year, month],
name='maxconpkbnd[{},{}]'.format(year, month))
# max partial peak summer greater than consumption
ppconsum = cons[(ispartial_summer) &
(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(ppconsum):
self._maxconppkbnds[year, month, i] = self._model.addConstr(
lhs=self._maxconppk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconppkbnds[{},{},{}]'.format(
year, month, i))
# max peak consumption summer
pconsum = cons[(ispeak) & (locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(pconsum):
self._maxconpkbnds[year, month, i] = self._model.addConstr(
lhs=self._maxconpk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconpkbnds[{},{},{}]'.format(
year, month, i))
# max partial peak winter
ppkconwin = cons[(ispartial_winter) &
(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(ppkconwin):
self._maxconppkbndw[year, month, i] = self._model.addConstr(
lhs=self._maxconppk[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con,
name='maxconppkbndw[{},{},{}]'.format(
year, month, i))
# max demand each month
relcons = cons[(locidx.year == year) &
(locidx.month == month)].values
for i, con in enumerate(relcons):
self._maxconbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=con, name='maxconbnd[{},{},{}]'.format(
year, month, i))
# max partial peaks (summer & winter) < than max demand
self._maxconppkbnd[year, month, i] = self._model.addConstr(
lhs=self._maxcon[year, month],
sense=GRB.GREATER_EQUAL,
rhs=self._maxconppk[year, month],
name='maxconppkbnd[{},{},{}]'.format(
year, month, i))
demchrg = get_demand_charge(tariff, month, year=year)
if (month >= 5) & (month <= 10):
mpeakchg = demchrg['mpeak']
ppeakchg = demchrg['ppeak']
maxchg = demchrg['max']
if isPDP:
pdpcred = get_pdp_demand_credit(tariff, month, year=year)
mpeakchg = mpeakchg - pdpcred['peak']
dchrg += mpeakchg * self._maxconpk[year, month]
# dcharges.append(mpeakchg * self._maxconpk[year, month])
else:
ppeakchg = demchrg['ppeak']
maxchg = demchrg['max']
# add partpeak and maximum demand charge
dcharges.append(
(maxchg * self._maxcon[year, month] +
ppeakchg * self._maxconppk[year, month])+dchrg)
self._model.update()
dcharges = pd.Series(dcharges, index=indx)
return dcharges
else:
return pd.Series([LinExpr(0.0) for ij in
range(0, np.size(indx, 0))], index=indx)
def DR_compensation(self, LMP, dr_periods, BL='CAISO', **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_periods is a pandas DatetimeIndex.
"""
# start by removing all variables (might be inefficient, but o/w it
# is a pain in the ass do deal with the multihour baselines etc.)
self._removeOld()
# no work if no DR events are specified
if (LMP is None) or (dr_periods is None):
return pd.Series([0.0], index=['None'])
# get DR rewards (in case we want LMP-G instead of LMP)
DR_rewards = get_DR_rewards(LMP, isLMPmG=kwargs.get('isLMPmG'),
tariff=kwargs.get('tariff'))
# populate optimization problem for proper BL choices
if BL == 'CAISO':
# print self._DR_comp_CAISO(DR_rewards, dr_periods)
return self._DR_comp_CAISO(DR_rewards, dr_periods)
elif BL == 'expMA':
return self._DR_comp_expMA(DR_rewards, dr_periods, **kwargs)
else:
raise NotImplementedError(
'Baseline type "{}" not known!'.format(BL))
def _DR_comp_CAISO(self, LMP, dr_periods):
"""
Return compensation for DR, i.e. reductions w.r.t. CAISO baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_periods is a pandas DatetimeIndex. Note that LMP may also be
LMP-G, i.e. the LMP minus the generation component of the tariff.
"""
valid_periods = dr_periods[dr_periods.isin(self._index)].tz_convert(
'US/Pacific')
locidx = self._index.tz_convert('US/Pacific')
grouped = valid_periods.groupby(valid_periods.date)
# define auxiliary variables for each possible dr period if none exist
self._red, self._z, self._bl = {}, {}, {}
self._redpos, self._redBL, self._red0, self._blcon = {}, {}, {}, {}
self._dr_periods = valid_periods
# add variables if there are days w/ multiple possible DR events
if np.max([len(grp) for grp in grouped.values()]) > 1:
self._zday, self._zdaysum, self._zdaymax = {}, {}, {}
# now create variables for different days and periods within each day
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zday[daystr] = self._model.addVar(
vtype=GRB.BINARY, name='zday[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
self._bl[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='bl[{}]'.format(perstr))
self._model.update() # this must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] - self._dynsys._opts['umin']),
axis=1).max()
# if u is not bounded the the above results in an NaN value. We need
# to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
# drcomp = 0.0
nrgcons = self._dynsys.get_consumption()['energy']
lmps = LMP.tz_convert('US/Pacific').loc[locidx] / 1000 # to $/kWh
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# add constraints on varible zday (if multiple periods per day)
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zdaysum[daystr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.LESS_EQUAL,
rhs=quicksum([self._z[ps] for ps in perstrs]),
name='zdaysum[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._zdaymax[perstr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.GREATER_EQUAL,
rhs=self._z[perstr],
name='zdaymax[{}]'.format(perstr))
self._model.update()
# formulate constaints and add terms to objective
drcomp_ = []
for i, day in enumerate(grouped):
periods = grouped[day]
# print('Formulating constraints for day {} of {}'.format(
# i, len(grouped)))
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
per_select = ((locidx < period) &
(locidx.hour == period.hour) &
(locidx.minute == period.minute))
if isBusiness.loc[period]:
nmax = 10
per_select = per_select & isBusiness.values
else:
nmax = 4
per_select = per_select & (~isBusiness.values)
similars = locidx[per_select].sort_values(ascending=False)
# now go through similar days sucessively
sim_nonDR, sim_DR, sim_DR_mult = [], [], []
for sim in similars:
if len(sim_nonDR) == nmax:
continue
if sim in self._dr_periods:
sim_DR += [sim]
if len(grouped[pd.Timestamp(sim.date())]) > 1:
sim_DR_mult += [sim]
else:
sim_nonDR += [sim]
sim_DR = pd.DatetimeIndex(
sim_DR).sort_values(ascending=False)
sim_DR_mult = pd.DatetimeIndex(
sim_DR_mult).sort_values(ascending=False)
sim_nonDR = pd.DatetimeIndex(
sim_nonDR).sort_values(ascending=False)
# get consumption variables
cons_nonDR = nrgcons.loc[sim_nonDR].values
# Now add constraits on the baseline variables
for idxset in powerset(range(len(sim_DR))):
K = [sim_DR[i] for i in idxset]
Kc = [sim_DR[i] for i in range(len(sim_DR))
if i not in idxset]
qK = nrgcons.loc[K].values.tolist()
# Need to make sure to use zday if there are multiple
# events possible that day!
zK, zKc = [], []
for k in K:
if k in sim_DR_mult:
zK.append(self._zday[k.strftime(dsform)])
else:
zK.append(self._z[k.strftime(psform)])
for kc in Kc:
if kc in sim_DR_mult:
zKc.append(self._zday[kc.strftime(dsform)])
else:
zKc.append(self._z[kc.strftime(psform)])
# the following uses that the "closest" days appear first
qD = cons_nonDR[:nmax-len(idxset)].tolist()
n = len(sim_nonDR)
if n == 0:
print('No non-DR day available for BL computation -' +
' too many DR events!')
bnd = (quicksum(qD + qK) / float(n) +
M * quicksum(zK) +
M * quicksum([(1-z) for z in zKc]))
self._blcon[perstr, idxset] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=bnd, name="blcon[{},{}]".format(perstr, idxset))
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr] - nrgcons.loc[period],
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M, name='red0[{}]'.format(perstr))
# add DR compensation to objective
# drcomp += lmps.loc[period] * self._red[perstr]
drcomp_.append(lmps.loc[period] * self._red[perstr])
drcomp = pd.Series(drcomp_, index=self._dr_periods)
self._model.update()
return drcomp
def _DR_comp_expMA(self, LMP, dr_periods, **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. CAISO baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
dr_hours is a pandas DatetimeIndex. Note that LMP may also be
LMP-G, i.e. the LMP minus the generation component of the tariff.
"""
# set default values for alphas if not passed as kwargs
if 'alpha_b' in kwargs:
alpha_b = kwargs['alpha_b']
else:
alpha_b = 0.175 # business day
if 'alpha_nb' in kwargs:
alpha_nb = kwargs['alpha_nb']
else:
alpha_nb = 0.25 # non-business day
valid_periods = dr_periods[dr_periods.isin(self._index)]
locidx = self._index.tz_convert('US/Pacific')
grouped = valid_periods.groupby(
valid_periods.tz_convert('US/Pacific').date)
# define auxiliary variables for each possible dr period if none exist
self._red, self._z, self._bl = {}, {}, {}
self._redpos, self._redBL, self._red0, self._blcon = {}, {}, {}, {}
self._dr_periods = valid_periods
# add variables if there are days w/ multiple possible DR events
if np.max([len(grp) for grp in grouped.values()]) > 1:
self._zday, self._zdaysum, self._zdaymax = {}, {}, {}
# now create variables for different days and periods within each day
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zday[daystr] = self._model.addVar(
vtype=GRB.BINARY, name='zday[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
# for the expMA we have to define a variable for the bl value
# for every period of the simulation range
for per in self._index:
perstr = per.strftime(psform)
self._bl[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='bl[{}]'.format(perstr))
self._model.update() # this must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] - self._dynsys._opts['umin']),
axis=1).max()
# if u is not bounded the the above results in an NaN value. We need
# to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
drcomp_ = []
nrgcons = self._dynsys.get_consumption()['energy']
lmps = LMP.tz_convert('US/Pacific').loc[locidx] / 1000 # to $/kWh
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# add constraints on varible zday (if multiple periods per day)
for day, periods in grouped.items():
daystr = day.strftime(dsform)
perstrs = [per.strftime(psform) for per in periods]
if len(periods) > 1:
self._zdaysum[daystr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.LESS_EQUAL,
rhs=quicksum([self._z[ps] for ps in perstrs]),
name='zdaysum[{}]'.format(daystr))
for period, perstr in zip(periods, perstrs):
self._zdaymax[perstr] = self._model.addConstr(
lhs=self._zday[daystr],
sense=GRB.GREATER_EQUAL,
rhs=self._z[perstr],
name='zdaymax[{}]'.format(perstr))
self._model.update()
# now add the constraints that define the baseline as well as a
# bunch of other stuff
for cons, alpha in zip([nrgcons[isBusiness], nrgcons[~isBusiness]],
[alpha_b, alpha_nb]):
# localize consumption index
considxloc = cons.index.tz_convert('US/Pacific')
# compute BLs for each hour separately
con_hrly = {hour: cons[considxloc.hour == hour].sort_index()
for hour in range(24)}
for hour, con in con_hrly.items():
# set the initial value of the BL to zero (this should not have
# an overly large effect of the course of a year or so...)
# NOTE: This assumes that the first occurrence of an hour (for
# both business and non-business days) is NOT a potential event
perstr_pre = con.index[0].strftime(psform)
self._blcon[perstr_pre, 'init'] = self._model.addConstr(
lhs=self._bl[perstr_pre], sense=GRB.EQUAL,
rhs=0.0, name='blcon[{}]'.format(perstr_pre))
# now loop through the rest
for period, q in con.iloc[1:].items():
perstr = period.strftime(psform)
# if the period under consideration is a DR period,
# we have to do some work ...
if period in valid_periods:
# need to use zday if this day has multiple DR events
dt = period.tz_convert('US/Pacific').date()
if len(grouped[dt]) > 1:
z = self._zday[dt.strftime(dsform)]
else:
z = self._z[perstr]
# add big M constraints on the bl
self._blcon[perstr, 'static'] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr_pre] + M * (1 - z),
name='blcon[{},static]'.format(perstr))
self._blcon[perstr, 'change'] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.LESS_EQUAL,
rhs=alpha*q + (1-alpha)*self._bl[perstr_pre] + M*z,
name='blcon[{},change]'.format(perstr))
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._bl[perstr] - q,
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M,
name='red0[{}]'.format(perstr))
# add DR compensation to objective
drcomp_.append(
(lmps.loc[period.tz_convert('US/Pacific')] *
self._red[perstr]))
# ... otherwise this is pretty straightforward
else:
self._blcon[perstr] = self._model.addConstr(
lhs=self._bl[perstr], sense=GRB.EQUAL,
rhs=alpha * q + (1 - alpha) * self._bl[perstr_pre],
name='blcon[{}]'.format(perstr))
# update and keep track of last bl variable
perstr_pre = perstr
drcomp = pd.Series(drcomp_, index=self._dr_periods)
self._model.update()
return drcomp
def DR_comp_blfix(self, LMP, bl_values, **kwargs):
"""
Return compensation for DR, i.e. reductions w.r.t. baseline.
Here LMP is a pandas Series (indexed by a tz-aware pandas
Datetimeindex containing all of the object's indices) and
bl_values is a pandas Series, whose index is a DatetimeIndex,
each entry of which represents a possible DR period, and whose
values are the baseline values for those periods (assumed fixed).
This is used for solving the baseline-taking equilibrium problem.
Note that LMP may also be LMP-G, i.e. the LMP minus the generation
component of the tariff.
"""
self._removeOld()
self._blvals = bl_values[
bl_values.index.isin(self._index)].tz_convert('US/Pacific')
locidx = self._index.tz_convert('US/Pacific')
self._grouped = self._blvals.index.groupby(self._blvals.index.date)
# define dictionaries to store variables in
self._red, self._z = {}, {}
self._redpos, self._redBL, self._red0 = {}, {}, {}
# create variables for different days and periods within each day
for day, periods in self._grouped.items():
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
self._red[perstr] = self._model.addVar(
vtype=GRB.CONTINUOUS, name='red[{}]'.format(perstr))
self._z[perstr] = self._model.addVar(
vtype=GRB.BINARY, name='z[{}]'.format(perstr))
self._model.update() # must be done before defining constaints
# determine "bigM" value from the bounds on the control variables
M = np.sum(np.asarray(self._dynsys._opts['nrg_coeffs']) *
(self._dynsys._opts['umax'] -
self._dynsys._opts['umin']), axis=1).max()
# if u is not bounded the the above results in an NaN value. We
# need to deal with this in a better way than the following:
if np.isnan(M):
M = 1e9
# perform some preparations for the constraints
self._drcomp = 0.0
nrgcons = self._dynsys.get_consumption()['energy']
DR_rewards = get_DR_rewards(LMP, isLMPmG=kwargs.get('isLMPmG'),
tariff=kwargs.get('tariff'))
# Pick out relevant dates and congvert to $/kWh
DR_rewards = DR_rewards.tz_convert('US/Pacific').loc[locidx] / 1000
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
# formulate constaints and add terms to objective
for i, day in enumerate(self._grouped):
periods = self._grouped[day]
perstrs = [per.strftime(psform) for per in periods]
for period, perstr in zip(periods, perstrs):
# add constraints on baseline reduction
self._redpos[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.GREATER_EQUAL,
rhs=0.0, name='redpos[{}]'.format(perstr))
self._redBL[perstr] = self._model.addConstr(
lhs=(self._red[perstr] + nrgcons.loc[period] -
(1-self._z[perstr]) * M),
sense=GRB.LESS_EQUAL, rhs=self._blvals.loc[period],
name='redBL[{}]'.format(perstr))
self._red0[perstr] = self._model.addConstr(
lhs=self._red[perstr], sense=GRB.LESS_EQUAL,
rhs=self._z[perstr] * M, name='red0[{}]'.format(
perstr))
# add DR compensation to objective
self._drcomp += DR_rewards.loc[period] * self._red[perstr]
self._model.update()
return self._drcomp
def compute_baseline(self, bl_periods, red_times=None, BL='CAISO',
**kwargs):
"""
Compute the CAISO baseline for all elements of the pandas
Datetimeindex bl_periods. If red_times is a Datetimeindex,
regard the associated days as "event days" (in addition to
weekend days and holidays).
"""
if BL == 'CAISO':
return self._BL_CAISO(bl_periods, red_times=red_times)
elif BL == 'expMA':
return self._BL_expMA(bl_periods, red_times=red_times,
**kwargs)
else:
raise NotImplementedError(
'Baseline type "{}" not known!'.format(BL))
def _BL_CAISO(self, bl_periods, red_times=None):
"""
Compute the CAISO baseline for all elements of the pandas
Datetimeindex bl_periods. If red_times is a Datetimeindex,
regard the associated days as "event days" (in addition to
weekend days and holidays).
"""
locidx = self._index.tz_convert('US/Pacific')
cons = self._dynsys.get_consumption()['energy'].tz_convert(
'US/Pacific')
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
if red_times is not None:
isEventDay = locidx.normalize().isin(red_times.tz_convert(
'US/Pacific').normalize())
blidx, blvals = bl_periods.tz_convert('US/Pacific'), []
for period in blidx:
per_select = ((locidx < period) &
(locidx.hour == period.hour) &
(locidx.minute == period.minute))
if isBusiness.loc[period]:
nmax = 10
per_select = per_select & isBusiness.values
else:
nmax = 4
per_select = per_select & (~isBusiness.values)
if red_times is not None:
per_select = per_select & (~isEventDay)
similars = locidx[per_select].sort_values(ascending=False)[:nmax]
blvals.append(np.sum([c.getValue() for c in cons.loc[similars]]) /
float(len(similars)))
return pd.Series(blvals, index=blidx.tz_convert('GMT'))
def _BL_expMA(self, bl_periods, red_times=None, alpha_b=0.14,
alpha_nb=0.32):
"""
Compute the expMA baseline for all elements of the pandas
Datetimeindex bl_periods using the smoothing parameter alpha.
If red_times is a Datetimeindex, regard the associated days as
"event days" (in addition to weekend days and holidays).
"""
locidx = self._index.tz_convert('US/Pacific')
cons = self._dynsys.get_consumption()['energy'].tz_convert(
'US/Pacific')
cons = pd.Series([c.getValue() for c in cons],
index=cons.index)
holidays = USFederalHolidayCalendar().holidays(
start=locidx.min(), end=locidx.max())
isBusiness = (locidx.dayofweek < 5) & (~locidx.isin(holidays))
isBusiness = pd.Series(isBusiness, index=locidx)
bls = []
for con, alpha in zip([cons[isBusiness], cons[~isBusiness]],
[alpha_b, alpha_nb]):
# determine intitial values for the BL from non-DR data
if red_times is not None:
nDRc = con[~con.index.isin(red_times)]
else:
nDRc = con
cmeans = nDRc.groupby(nDRc.index.hour).mean()
# compute BL for each hour separately
con_hrly = {hour: con[con.index.hour == hour]
for hour in range(24)}
bl_hrly = []
for hour, conhr in con_hrly.items():
blvals = [cmeans[hour]]
if red_times is not None:
for period, c in conhr.items():
if period in red_times:
blvals.append(blvals[-1])
else:
blvals.append(alpha*c + (1-alpha)*blvals[-1])
else:
for period, c in conhr.items():
blvals.append(alpha*c + (1-alpha)*blvals[-1])
bl_hrly.append(pd.Series(blvals[1:], index=conhr.index))
bls.append(pd.concat(bl_hrly).tz_convert('GMT'))
return pd.concat(bls).loc[bl_periods]
def optimize(self, tariff, LMP=None, dr_periods=None, BL='CAISO',
isRT=False, isPDP=False, carbon=False, **kwargs):
"""
Solve the participant's optimization problem. Pass in additional
Lin/Quad Expr of other objective terms with 'add_obj_term' kwarg
"""
if isRT and (dr_periods is not None):
raise Exception('Cannot combine DR with RTP.')
if isPDP and (dr_periods is not None):
raise Exception('Cannot combine DR with PDP.')
# extract additonal objective term if given
if 'add_obj_term' in kwargs:
add_obj_term = kwargs['add_obj_term']
else:
add_obj_term = 0
# energy charges are always included (demand charges
# are set to zero if tariff has none and DR_compensation is
# set to zero if there are no DR events ...)
# if (LMP is None) or (dr_periods is None):
# #print drc
# drc = 0.0
# else:
# #print self.DR_compensation(LMP, dr_periods, BL=BL,
# # tariff=tariff, **kwargs)
# drc=quicksum(self.DR_compensation(LMP, dr_periods, BL=BL,
# tariff=tariff, **kwargs).values.tolist())
self._model.setObjective(
self._dynsys.additional_cost_term(vals=False) +
quicksum(self.energy_charges(
tariff, isRT=isRT, LMP=LMP, isPDP=isPDP,
carbon=carbon).values) +
quicksum(self.demand_charges(tariff, isPDP=False).values) -
quicksum(self.DR_compensation(LMP, dr_periods, BL=BL,
tariff=tariff, **kwargs).values) +
add_obj_term)
self._model.optimize()
def optimize_blfixed(self, tariff, LMP, bl_values, carbon=False, **kwargs):
"""
Solve the participant's optimziation problem in case the baseline
values are fixed.
"""
# No option for RTPs. No biggie, since RTP and DR are alternatives.
# extract additonal objective term if given
if 'add_obj_term' in kwargs:
add_obj_term = kwargs['add_obj_term']
else:
add_obj_term = 0
self._model.setObjective(
quicksum(self.energy_charges(tariff, LMP=LMP,
carbon=carbon).values) +
self._dynsys.additional_cost_term(vals=False))
self._model.update()
# for some tariffs we also have demand charges
if tariff in dem_charges:
self._model.setObjective(
self._model.getObjective() +
quicksum(self.demand_charges(tariff).values))
else:
if hasattr(self, '_maxcon'):
for maxcon in self._maxcon.values():
self._model.remove(maxcon)
del self._maxcon
if hasattr(self, '_maxconbnd'):
for maxconbnd in self._maxconbnd.values():
self._model.remove(maxconbnd)
del self._maxconbnd
self._model.update()
self._nonDRobj = self._model.getObjective() + add_obj_term
self._model.setObjective(
self._nonDRobj - self.DR_comp_blfix(
LMP, bl_values, tariff=tariff, **kwargs))
self._model.optimize()
def generation_cost(self, LMP, carbon=False):
"""
Return the generation cost of the partipant's consumption (= price
of consuption according to the LMPs) as a gurobipy LinExpr.
"""
lmps = LMP.loc[self._index] / 1000 # select and convert price to $/kWh
if carbon:
lmps += pd.Series(carbon_costs).loc[self._index.tz_convert(
'US/Pacific').year].values / 1000.0
cons = self._dynsys.get_consumption()['energy']
return quicksum([lmp * con for lmp, con in
zip(lmps.values, cons.values)])
def get_results(self):
"""
Return results of optimziation problem.
"""
columns = {}
xopt, uopt = self._dynsys.get_optvals()
for i in range(xopt.shape[1]):
columns['x{}'.format(i+1)] = xopt[:-1, i]
for i in range(uopt.shape[1]):
columns['u{}'.format(i+1)] = uopt[:, i]
cons = self._dynsys.get_consumption()
columns['nrg_cons'] = np.array([e.getValue() for e in cons['energy']])
columns['pwr_cons'] = np.array([e.getValue() for e in cons['power']])
dfs = [pd.DataFrame(columns, index=self._index)]
if hasattr(self, '_z'):
perstrs, vals = [], []
for perstr, z in self._z.items():
perstrs.append(perstr)
vals.append(bool(z.X))
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'z': vals}, index=dtidx))
if hasattr(self, '_red'):
perstrs, vals = [], []
for perstr, red in self._red.items():
perstrs.append(perstr)
vals.append(red.X)
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'red': vals}, index=dtidx))
if hasattr(self, '_bl'):
perstrs, vals = [], []
for perstr, bl in self._bl.items():
perstrs.append(perstr)
vals.append(bl.X)
dtidx = pd.to_datetime(perstrs, format=psform).tz_localize(
'US/Pacific').tz_convert('GMT')
dfs.append(pd.DataFrame({'BL': vals}, index=dtidx))
return pd.concat(dfs, axis=1)
def _removeOld(self):
"""
Helper function removing all DR-related variables from the
underlying gurobipy optimization model.
"""
if hasattr(self, '_zday'):
for zday in self._zday.values():
self._model.remove(zday)
del self._zday
if hasattr(self, '_red'):
for red in self._red.values():
self._model.remove(red)
del self._red
if hasattr(self, '_z'):
for z in self._z.values():
self._model.remove(z)
del self._z
if hasattr(self, '_bl'):
for bl in self._bl.values():
self._model.remove(bl)
del self._bl
if hasattr(self, '_zdaysum'):
for zdaysum in self._zdaysum.values():
self._model.remove(zdaysum)
del self._zdaysum
if hasattr(self, '_zdaymax'):
for zdaymax in self._zdaymax.values():
self._model.remove(zdaymax)
del self._zdaymax
if hasattr(self, '_blcon'):
for blcon in self._blcon.values():
self._model.remove(blcon)
del self._blcon
if hasattr(self, '_redpos'):
for redpos in self._redpos.values():
self._model.remove(redpos)
del self._redpos
if hasattr(self, '_redBL'):
for redBL in self._redBL.values():
self._model.remove(redBL)
del self._redBL
if hasattr(self, '_red0'):
for red0 in self._red0.values():
self._model.remove(red0)
del self._red0
self._model.update()
def compute_BLtaking_eq(blmodel, tariff, LMP, dr_periods, BL='CAISO',
blinit='noDR', eps=1.0, maxiter=20, carbon=False,
**kwargs):
"""
Function used ot compute Baseline-taking equilibrium.
"""
if 'logger' in kwargs:
logger = kwargs['logger']
if 'isLMPmG' in kwargs:
logstr = BL + ' (LMP-G)'
else:
logstr = BL
logger.log(logging.INFO,
'Computing BL-taking eq. for ' '{} BL.'.format(logstr))
dfs, blvals, objs, gencosts, residuals = [], [], [], [], []
if blinit == 'gamed':
blmodel.optimize(tariff, LMP=LMP, dr_periods=dr_periods,
BL=BL, carbon=carbon, **kwargs)
elif blinit == 'noDR':
blmodel.optimize(tariff, LMP=LMP, carbon=carbon, **kwargs)
else:
errmsg = 'Unknown BL initialization parameter {}.'.format(blinit)
logger.log(logging.ERROR, errmsg)
raise NotImplementedError(errmsg)
# retrieve data from the solution for initialization
dfs.append(blmodel.get_results())
if 'red' in dfs[-1]:
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL, red_times=dfs[-1][dfs[-1]['red'] > 0].index))
else:
blvals.append(blmodel.compute_baseline(dr_periods, BL=BL))
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
residuals.append(np.NaN)
# solve the bl-taking problem for the first time using the bl values
# from the previous solution of the problem
blmodel.optimize_blfixed(tariff, LMP=LMP, bl_values=blvals[-1],
carbon=carbon, **kwargs)
dfs.append(blmodel.get_results())
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL, red_times=dfs[-1][dfs[-1]['red'] > 0].index))
logger.info(f"")
# todo: what are the units/magnitude of the residuals? I increased the MIPGap (2020-02-05), but that seems to
# have resulted in convergence failure. If the mipgap is too big relative to the convergence tolerance,
# that seems normal. I need to reason out the implications of a 1e-3 mipgap for the baseline residuals that
# implies
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
# blvalues are in kWh, on the order of 200kWh on average, max 660 for a building
# costs are on the order of 0.1 $/kWh.
# make the bl convergence in terms of decimal fraction, like the mipgap
# require the max deviation over periods to be within x percent of the mean. should be a couple kWh
# residuals.append(2*np.max(blvals[1] - blvals[0])/np.mean(blvals[1] + blvals[0]))
residuals.append(np.max(blvals[1] - blvals[0])) # had a div by 0 for above
n_iter = 0
while (residuals[-1] > eps) and (n_iter < maxiter):
if 'logger' in kwargs:
logger.log(logging.INFO,
'Residual: {:.2f}, '.format(residuals[-1]) +
'Continuing fixed point iteration.')
blmodel.optimize_blfixed(
tariff, LMP=LMP, bl_values=blvals[-1], carbon=carbon, **kwargs)
dfs.append(blmodel.get_results())
blvals.append(blmodel.compute_baseline(
dr_periods, BL=BL,
red_times=dfs[-1][dfs[-1]['red'] > 0].index))
objs.append(blmodel._model.getObjective().getValue())
gencosts.append(blmodel.generation_cost(LMP).getValue())
residuals.append(np.linalg.norm(blvals[-2] - blvals[-1]))
n_iter += 1
if 'logger' in kwargs:
if residuals[-1] <= eps:
logger.log(logging.INFO,
'Fixed-point iteration successful. ' +
'BL-taking eq. found.')
else:
logger.log(logging.WARNING,
'Fixed-point iteration failed.' +
'No BL-taking eq. found. ')
return dfs[-1]
|
[
"pandas.Series",
"datetime.datetime",
"numpy.unique",
"pandas.tseries.holiday.USFederalHolidayCalendar",
"pandas.DatetimeIndex",
"numpy.size",
"numpy.linalg.norm",
"numpy.asarray",
"numpy.max",
"gurobipy.quicksum",
"gurobipy.LinExpr",
"numpy.isnan",
"gurobipy.Model",
"pandas.DataFrame",
"pandas.concat",
"pandas.to_datetime"
] |
[((907, 914), 'gurobipy.Model', 'Model', ([], {}), '()\n', (912, 914), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((19530, 19541), 'numpy.isnan', 'np.isnan', (['M'], {}), '(M)\n', (19538, 19541), True, 'import numpy as np\n'), ((19974, 20009), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (19983, 20009), True, 'import pandas as pd\n'), ((25336, 25378), 'pandas.Series', 'pd.Series', (['drcomp_'], {'index': 'self._dr_periods'}), '(drcomp_, index=self._dr_periods)\n', (25345, 25378), True, 'import pandas as pd\n'), ((28369, 28380), 'numpy.isnan', 'np.isnan', (['M'], {}), '(M)\n', (28377, 28380), True, 'import numpy as np\n'), ((28811, 28846), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (28820, 28846), True, 'import pandas as pd\n'), ((33666, 33708), 'pandas.Series', 'pd.Series', (['drcomp_'], {'index': 'self._dr_periods'}), '(drcomp_, index=self._dr_periods)\n', (33675, 33708), True, 'import pandas as pd\n'), ((35856, 35867), 'numpy.isnan', 'np.isnan', (['M'], {}), '(M)\n', (35864, 35867), True, 'import numpy as np\n'), ((36498, 36533), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (36507, 36533), True, 'import pandas as pd\n'), ((39170, 39205), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (39179, 39205), True, 'import pandas as pd\n'), ((41061, 41096), 'pandas.Series', 'pd.Series', (['isBusiness'], {'index': 'locidx'}), '(isBusiness, index=locidx)\n', (41070, 41096), True, 'import pandas as pd\n'), ((48127, 48149), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (48136, 48149), True, 'import pandas as pd\n'), ((52639, 52668), 'numpy.max', 'np.max', (['(blvals[1] - blvals[0])'], {}), '(blvals[1] - blvals[0])\n', (52645, 52668), True, 'import numpy as np\n'), ((2370, 2401), 'pandas.Series', 'pd.Series', (['echrg_'], {'index': 'locidx'}), '(echrg_, index=locidx)\n', (2379, 2401), True, 'import pandas as pd\n'), ((2895, 2924), 'pandas.Series', 'pd.Series', (['echrg_'], {'index': 'indx'}), '(echrg_, index=indx)\n', (2904, 2924), True, 'import pandas as pd\n'), ((5309, 5353), 'numpy.unique', 'np.unique', (['locidx[locidx.year == year].month'], {}), '(locidx[locidx.year == year].month)\n', (5318, 5353), True, 'import numpy as np\n'), ((16453, 16485), 'pandas.Series', 'pd.Series', (['[0.0]'], {'index': "['None']"}), "([0.0], index=['None'])\n", (16462, 16485), True, 'import pandas as pd\n'), ((46945, 46985), 'pandas.DataFrame', 'pd.DataFrame', (['columns'], {'index': 'self._index'}), '(columns, index=self._index)\n', (46957, 46985), True, 'import pandas as pd\n'), ((53414, 53453), 'numpy.linalg.norm', 'np.linalg.norm', (['(blvals[-2] - blvals[-1])'], {}), '(blvals[-2] - blvals[-1])\n', (53428, 53453), True, 'import numpy as np\n'), ((5385, 5407), 'numpy.unique', 'np.unique', (['locidx.year'], {}), '(locidx.year)\n', (5394, 5407), True, 'import numpy as np\n'), ((7950, 7981), 'pandas.Series', 'pd.Series', (['dcharges'], {'index': 'indx'}), '(dcharges, index=indx)\n', (7959, 7981), True, 'import pandas as pd\n'), ((15600, 15631), 'pandas.Series', 'pd.Series', (['dcharges'], {'index': 'indx'}), '(dcharges, index=indx)\n', (15609, 15631), True, 'import pandas as pd\n'), ((19795, 19821), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (19819, 19821), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((28632, 28658), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (28656, 28658), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((36319, 36345), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (36343, 36345), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((38991, 39017), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (39015, 39017), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((40882, 40908), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (40906, 40908), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((42361, 42375), 'pandas.concat', 'pd.concat', (['bls'], {}), '(bls)\n', (42370, 42375), True, 'import pandas as pd\n'), ((47321, 47359), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': vals}"], {'index': 'dtidx'}), "({'z': vals}, index=dtidx)\n", (47333, 47359), True, 'import pandas as pd\n'), ((47697, 47737), 'pandas.DataFrame', 'pd.DataFrame', (["{'red': vals}"], {'index': 'dtidx'}), "({'red': vals}, index=dtidx)\n", (47709, 47737), True, 'import pandas as pd\n'), ((48071, 48110), 'pandas.DataFrame', 'pd.DataFrame', (["{'BL': vals}"], {'index': 'dtidx'}), "({'BL': vals}, index=dtidx)\n", (48083, 48110), True, 'import pandas as pd\n'), ((15708, 15720), 'gurobipy.LinExpr', 'LinExpr', (['(0.0)'], {}), '(0.0)\n', (15715, 15720), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((42243, 42283), 'pandas.Series', 'pd.Series', (['blvals[1:]'], {'index': 'conhr.index'}), '(blvals[1:], index=conhr.index)\n', (42252, 42283), True, 'import pandas as pd\n'), ((5546, 5570), 'datetime.datetime', 'datetime', (['year', 'month', '(1)'], {}), '(year, month, 1)\n', (5554, 5570), False, 'from datetime import datetime\n'), ((8234, 8260), 'pandas.tseries.holiday.USFederalHolidayCalendar', 'USFederalHolidayCalendar', ([], {}), '()\n', (8258, 8260), False, 'from pandas.tseries.holiday import USFederalHolidayCalendar\n'), ((19220, 19264), 'numpy.asarray', 'np.asarray', (["self._dynsys._opts['nrg_coeffs']"], {}), "(self._dynsys._opts['nrg_coeffs'])\n", (19230, 19264), True, 'import numpy as np\n'), ((20439, 20480), 'gurobipy.quicksum', 'quicksum', (['[self._z[ps] for ps in perstrs]'], {}), '([self._z[ps] for ps in perstrs])\n', (20447, 20480), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((22321, 22345), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['sim_DR'], {}), '(sim_DR)\n', (22337, 22345), True, 'import pandas as pd\n'), ((22426, 22455), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['sim_DR_mult'], {}), '(sim_DR_mult)\n', (22442, 22455), True, 'import pandas as pd\n'), ((22534, 22561), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['sim_nonDR'], {}), '(sim_nonDR)\n', (22550, 22561), True, 'import pandas as pd\n'), ((28059, 28103), 'numpy.asarray', 'np.asarray', (["self._dynsys._opts['nrg_coeffs']"], {}), "(self._dynsys._opts['nrg_coeffs'])\n", (28069, 28103), True, 'import numpy as np\n'), ((29276, 29317), 'gurobipy.quicksum', 'quicksum', (['[self._z[ps] for ps in perstrs]'], {}), '([self._z[ps] for ps in perstrs])\n', (29284, 29317), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((35545, 35589), 'numpy.asarray', 'np.asarray', (["self._dynsys._opts['nrg_coeffs']"], {}), "(self._dynsys._opts['nrg_coeffs'])\n", (35555, 35589), True, 'import numpy as np\n'), ((42308, 42326), 'pandas.concat', 'pd.concat', (['bl_hrly'], {}), '(bl_hrly)\n', (42317, 42326), True, 'import pandas as pd\n'), ((8349, 8376), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['idx_.date'], {}), '(idx_.date)\n', (8365, 8376), True, 'import pandas as pd\n'), ((15770, 15786), 'numpy.size', 'np.size', (['indx', '(0)'], {}), '(indx, 0)\n', (15777, 15786), True, 'import numpy as np\n'), ((24190, 24222), 'gurobipy.quicksum', 'quicksum', (['[(1 - z) for z in zKc]'], {}), '([(1 - z) for z in zKc])\n', (24198, 24222), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((46102, 46125), 'pandas.Series', 'pd.Series', (['carbon_costs'], {}), '(carbon_costs)\n', (46111, 46125), True, 'import pandas as pd\n'), ((47198, 47236), 'pandas.to_datetime', 'pd.to_datetime', (['perstrs'], {'format': 'psform'}), '(perstrs, format=psform)\n', (47212, 47236), True, 'import pandas as pd\n'), ((47574, 47612), 'pandas.to_datetime', 'pd.to_datetime', (['perstrs'], {'format': 'psform'}), '(perstrs, format=psform)\n', (47588, 47612), True, 'import pandas as pd\n'), ((47948, 47986), 'pandas.to_datetime', 'pd.to_datetime', (['perstrs'], {'format': 'psform'}), '(perstrs, format=psform)\n', (47962, 47986), True, 'import pandas as pd\n'), ((24082, 24099), 'gurobipy.quicksum', 'quicksum', (['(qD + qK)'], {}), '(qD + qK)\n', (24090, 24099), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n'), ((24144, 24156), 'gurobipy.quicksum', 'quicksum', (['zK'], {}), '(zK)\n', (24152, 24156), False, 'from gurobipy import GRB, Model, quicksum, LinExpr\n')]
|
# Front matter
##############
import os
from os import fdopen, remove
from tempfile import mkstemp
from shutil import move
import glob
import re
import time
import pandas as pd
import numpy as np
from scipy import constants
from scipy.optimize import curve_fit, fsolve
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
from scipy.interpolate import spline
import math
import seaborn as sns
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
start_time = time.time()
# Input scaling parameter results
##########################################
xi_filename = 'Results/scalingparameters.csv'
xi_df = pd.read_csv(xi_filename)
# Rename columns to avoid confusion
xi_df = xi_df.rename(columns={'Vi':'Vj', 'dVi':'dVj', 'V':'Vk','dV':'dVk',
'V/Vi':'Vk/Vj','xi':'xi(Vk/Vj)','dxi':'dxi(Vk/Vj)'})
# Transform scaling parameters to each reference volume
#######################################################
folder_list = xi_df.drop_duplicates(subset='Ref Folder')['Ref Folder'].values
for ref_folder in folder_list:
# for ref_folder in ['2009Oct_30GPa']:
print('Rescaling to '+ref_folder)
# Reference volume to scale everything to
Vi = xi_df[xi_df['Ref Folder']==ref_folder].iloc[-1]['Vj']
xi_rescaled_df = xi_df[['Vj','Vk','xi(Vk/Vj)','dxi(Vk/Vj)']].copy()
xi_rescaled_df['Vi'] = Vi*np.ones(len(xi_rescaled_df))
# rescaled xi(Vk/Vi) = xi(Vk/Vj) * complementary xi(Vj/Vi)
# Complementary xi needed to calculate rescaled xi:
xi_rescaled_df['xi(Vj/Vi)'] = [xi_rescaled_df[(xi_rescaled_df['Vj']==Vi) &
(xi_rescaled_df['Vk']==Vj)].iloc[-1]['xi(Vk/Vj)'] for Vj in xi_rescaled_df['Vj']]
xi_rescaled_df['dxi(Vj/Vi)'] = [xi_rescaled_df[(xi_rescaled_df['Vj']==Vi) &
(xi_rescaled_df['Vk']==Vj)].iloc[-1]['dxi(Vk/Vj)'] for Vj in xi_rescaled_df['Vj']]
xi_rescaled_df['Vk/Vi'] = xi_rescaled_df['Vk']/xi_rescaled_df['Vi']
# Calculate rescaled xi
xi_rescaled_df['xi(Vk/Vi)'] = xi_rescaled_df['xi(Vk/Vj)']*xi_rescaled_df['xi(Vj/Vi)']
# Calculate uncertainty on rescaled xi
# If c = a*b, dc = sqrt((b*da)^2 + (a*db)^2)
xi_rescaled_df['dxi(Vk/Vi)'] = np.sqrt(
(xi_rescaled_df['xi(Vj/Vi)']*xi_rescaled_df['dxi(Vk/Vj)'])**2 +
(xi_rescaled_df['xi(Vk/Vj)']*xi_rescaled_df['dxi(Vj/Vi)'])**2)
# Eliminate data points where Vi = Vk
xi_rescaled_df = xi_rescaled_df[xi_rescaled_df['Vk'] != Vi]
xi_rescaled_df = xi_rescaled_df.round(decimals=4)
xi_rescaled_df.to_csv(ref_folder+'/rescaledparameters.csv',index=False)
# Plot scaling parameters
fig, (ax0) = plt.subplots(nrows = 1, ncols=1, figsize=(6,4.5))
ax0.errorbar(xi_rescaled_df['Vk/Vi'],xi_rescaled_df['xi(Vk/Vi)'],
yerr=xi_rescaled_df['dxi(Vk/Vi)'],
marker = 'o', color = 'gray', mfc='lightgray', ms=6, markeredgewidth=1,
ls='none',elinewidth=1)
ax0.set_xlabel(r'$V/V_i$',fontsize = 16)
ax0.set_ylabel(r'$\xi$',fontsize = 16)
ax0.tick_params(direction='in',right='on',top='on')
fig.savefig(ref_folder+'/scalingparam.pdf', format='pdf',
bbox_inches='tight')
plt.close()
|
[
"numpy.sqrt",
"pandas.read_csv",
"seaborn.set_style",
"matplotlib.pyplot.close",
"matplotlib.rc",
"time.time",
"matplotlib.pyplot.subplots"
] |
[((509, 545), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick"""'], {'labelsize': '(16)'}), "('xtick', labelsize=16)\n", (522, 545), False, 'import matplotlib\n'), ((547, 583), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick"""'], {'labelsize': '(16)'}), "('ytick', labelsize=16)\n", (560, 583), False, 'import matplotlib\n'), ((765, 794), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {'rc': 'rc'}), "('ticks', rc=rc)\n", (778, 794), True, 'import seaborn as sns\n'), ((809, 820), 'time.time', 'time.time', ([], {}), '()\n', (818, 820), False, 'import time\n'), ((955, 979), 'pandas.read_csv', 'pd.read_csv', (['xi_filename'], {}), '(xi_filename)\n', (966, 979), True, 'import pandas as pd\n'), ((2411, 2557), 'numpy.sqrt', 'np.sqrt', (["((xi_rescaled_df['xi(Vj/Vi)'] * xi_rescaled_df['dxi(Vk/Vj)']) ** 2 + (\n xi_rescaled_df['xi(Vk/Vj)'] * xi_rescaled_df['dxi(Vj/Vi)']) ** 2)"], {}), "((xi_rescaled_df['xi(Vj/Vi)'] * xi_rescaled_df['dxi(Vk/Vj)']) ** 2 +\n (xi_rescaled_df['xi(Vk/Vj)'] * xi_rescaled_df['dxi(Vj/Vi)']) ** 2)\n", (2418, 2557), True, 'import numpy as np\n'), ((2820, 2868), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(6, 4.5)'}), '(nrows=1, ncols=1, figsize=(6, 4.5))\n', (2832, 2868), True, 'import matplotlib.pyplot as plt\n'), ((3294, 3305), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3303, 3305), True, 'import matplotlib.pyplot as plt\n')]
|
import cv2
import numpy as np
import picamera
import time
def identifySq(pt, w, h):
tlx = 80
tly = 210
ppx = 94
ppy = 82
sqx = (pt[0]-(tlx-ppx/2))/ppx
sqy = (pt[1]-(tly-ppy/2))/ppy
# print ("ID",pt, w, h, sqx, sqy)
if sqx < 0 or sqx >= 4 or sqy < 0 or sqy >= 4:
return 0, False
return sqy*4 + sqx, True
if __name__ == '__main__' :
# Acquire source image.
cam = picamera.PiCamera()
cam.capture('newimg.jpg')
# Read source image.
im_src = cv2.imread('newimg.jpg')
# Resize image
newWidth = 640.0
rat1 = newWidth / im_src.shape[1]
dim1 = (int(newWidth), int(im_src.shape[0] * rat1))
im_small = cv2.resize(im_src, dim1, interpolation = cv2.INTER_AREA)
# Four corners of the book in source image
pts_src = np.array([[57, 368], [98, 22], [585, 28], [626, 374]], dtype=float)
# Read destination image.
im_dst = cv2.imread('destimg2.jpg')
# Four corners of the book in destination image.
pts_dst = np.array([[0, 0], [511, 0], [511, 639], [0, 639]], dtype=float)
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_small, h, (im_dst.shape[1], im_dst.shape[0]))
im_grey = cv2.cvtColor(im_out, cv2.COLOR_BGR2GRAY)
cv2.imwrite('img23.png', im_out)
# Match to template tiles
tileFiles = ['tile000002.png', 'tile000004.png', 'tile000008.png',
'tile000016.png', 'tile000032.png', 'tile000064.png',
'tile000128.png', 'tile000256.png', 'tile000512.png',
'tile001024.png']
lineThicknessIdx = 1
tileVal = 2
boardCells = [0] * 16
for tileFile in tileFiles:
tile = cv2.imread(tileFile, 0)
w, h = tile.shape[::-1]
# Apply template Matching
method = cv2.TM_CCOEFF_NORMED
res = cv2.matchTemplate(im_grey, tile, method)
threshold = 0.8
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
sq, sqValid = identifySq(pt, w, h)
if sqValid:
if boardCells[sq] == 0:
boardCells[sq] = tileVal
cv2.putText(im_out, str(tileVal), (pt[0],pt[1]+h/3),cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 1, 0, 1)
#print(sq, tileVal)
# print(pt, tileVal, w, h)
#cv2.rectangle(im_out, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), lineThicknessIdx)
lineThicknessIdx += 1
# print("Found", len(zip(*loc[::-1])),"tiles of", tileVal)
tileVal *= 2
for cellIdx in range(len(boardCells)):
print(cellIdx, boardCells[cellIdx])
cv2.imshow("Matched One", im_out)
cv2.waitKey(1000)
# time.sleep(5)
|
[
"cv2.imwrite",
"cv2.findHomography",
"numpy.where",
"picamera.PiCamera",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.resize",
"cv2.matchTemplate",
"cv2.imread"
] |
[((417, 436), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (434, 436), False, 'import picamera\n'), ((506, 530), 'cv2.imread', 'cv2.imread', (['"""newimg.jpg"""'], {}), "('newimg.jpg')\n", (516, 530), False, 'import cv2\n'), ((682, 736), 'cv2.resize', 'cv2.resize', (['im_src', 'dim1'], {'interpolation': 'cv2.INTER_AREA'}), '(im_src, dim1, interpolation=cv2.INTER_AREA)\n', (692, 736), False, 'import cv2\n'), ((805, 872), 'numpy.array', 'np.array', (['[[57, 368], [98, 22], [585, 28], [626, 374]]'], {'dtype': 'float'}), '([[57, 368], [98, 22], [585, 28], [626, 374]], dtype=float)\n', (813, 872), True, 'import numpy as np\n'), ((917, 943), 'cv2.imread', 'cv2.imread', (['"""destimg2.jpg"""'], {}), "('destimg2.jpg')\n", (927, 943), False, 'import cv2\n'), ((1011, 1074), 'numpy.array', 'np.array', (['[[0, 0], [511, 0], [511, 639], [0, 639]]'], {'dtype': 'float'}), '([[0, 0], [511, 0], [511, 639], [0, 639]], dtype=float)\n', (1019, 1074), True, 'import numpy as np\n'), ((1119, 1155), 'cv2.findHomography', 'cv2.findHomography', (['pts_src', 'pts_dst'], {}), '(pts_src, pts_dst)\n', (1137, 1155), False, 'import cv2\n'), ((1229, 1297), 'cv2.warpPerspective', 'cv2.warpPerspective', (['im_small', 'h', '(im_dst.shape[1], im_dst.shape[0])'], {}), '(im_small, h, (im_dst.shape[1], im_dst.shape[0]))\n', (1248, 1297), False, 'import cv2\n'), ((1312, 1352), 'cv2.cvtColor', 'cv2.cvtColor', (['im_out', 'cv2.COLOR_BGR2GRAY'], {}), '(im_out, cv2.COLOR_BGR2GRAY)\n', (1324, 1352), False, 'import cv2\n'), ((1358, 1390), 'cv2.imwrite', 'cv2.imwrite', (['"""img23.png"""', 'im_out'], {}), "('img23.png', im_out)\n", (1369, 1390), False, 'import cv2\n'), ((2721, 2754), 'cv2.imshow', 'cv2.imshow', (['"""Matched One"""', 'im_out'], {}), "('Matched One', im_out)\n", (2731, 2754), False, 'import cv2\n'), ((2760, 2777), 'cv2.waitKey', 'cv2.waitKey', (['(1000)'], {}), '(1000)\n', (2771, 2777), False, 'import cv2\n'), ((1784, 1807), 'cv2.imread', 'cv2.imread', (['tileFile', '(0)'], {}), '(tileFile, 0)\n', (1794, 1807), False, 'import cv2\n'), ((1927, 1967), 'cv2.matchTemplate', 'cv2.matchTemplate', (['im_grey', 'tile', 'method'], {}), '(im_grey, tile, method)\n', (1944, 1967), False, 'import cv2\n'), ((2006, 2032), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (2014, 2032), True, 'import numpy as np\n')]
|
import numpy as np
from mpi4py import MPI
from tacs import TACS, elements, constitutive, functions
from static_analysis_base_test import StaticTestCase
'''
Create a two separate cantilevered plates connected by an RBE3 element.
Apply a load at the RBE2 center node and test KSFailure, StructuralMass,
and Compliance functions and sensitivities
----------- -----------
| |\ /| |
| | \ / | |
| Plate 1 |__\/__| Plate 2 |
| | /\ | |
| | / \ | |
| |/ \| |
------------ -----------
'''
FUNC_REFS = np.array([1.2600980396870352, 51400.0, 3767896.1409673616, 2.912191091671254])
# Length of plate in x/y direction
Lx = 10.0
Ly = 10.0
# Number of elements in x/y direction for each plate
nx = 4
ny = 4
# applied force at center node
applied_force = np.array([1e8, 0.0, 1.0e6, 0.0, 0.0, 1e8])
# KS function weight
ksweight = 10.0
class ProblemTest(StaticTestCase.StaticTest):
N_PROCS = 2 # this is how many MPI processes to use for this TestCase.
def setup_assembler(self, comm, dtype):
"""
Setup mesh and tacs assembler for problem we will be testing.
"""
# Overwrite default check values
if dtype == complex:
self.rtol = 1e-5
self.atol = 1e-8
self.dh = 1e-50
else:
self.rtol = 1e-1
self.atol = 1e-4
self.dh = 1e-5
# Create the stiffness object
props = constitutive.MaterialProperties(rho=2570.0, E=70e9, nu=0.3, ys=350e6)
stiff = constitutive.IsoShellConstitutive(props, t=0.1, tNum=0)
# Set up the element transform function
transform = elements.ShellNaturalTransform()
shell = elements.Quad4Shell(transform, stiff)
num_rbe_nodes = 0
# Allocate the TACSCreator object
vars_per_node = shell.getVarsPerNode()
creator = TACS.Creator(comm, vars_per_node)
if comm.rank == 0:
num_elems = nx * ny
num_nodes = (nx + 1) * (ny + 1)
# discretize (left) plate
x = np.linspace(0, Lx, nx + 1, dtype)
y = np.linspace(0, Ly, ny + 1, dtype)
left_xyz = np.zeros([nx + 1, ny + 1, 3], dtype)
left_xyz[:, :, 0], left_xyz[:, :, 1] = np.meshgrid(x, y, indexing='ij')
left_node_ids = np.arange(num_nodes, dtype=np.intc).reshape(nx + 1, ny + 1)
# Define right plate by copying left plate and shifting 2 m
right_xyz = left_xyz.copy()
right_xyz[:, :, 0] += 2.0 * Lx
right_node_ids = left_node_ids + num_nodes
# Double the node/element count
num_nodes *= 2
num_elems *= 2
# Set connectivity for each plate element
conn = []
for i in range(nx):
for j in range(ny):
conn.extend([left_node_ids[i, j],
left_node_ids[i + 1, j],
left_node_ids[i, j + 1],
left_node_ids[i + 1, j + 1]])
conn.extend([right_node_ids[i, j],
right_node_ids[i + 1, j],
right_node_ids[i, j + 1],
right_node_ids[i + 1, j + 1]])
# Append connectivity for rbe element
center_node_id = num_nodes
center_node_xyz = np.array([1.5 * Lx, 0.5 * Ly, 0.0], dtype=dtype)
num_nodes += 1
# Add center node as indep rbe node
rbe_conn = [center_node_id]
dep_nodes = []
dummy_nodes = []
# Append all dependent nodes and a dummy node for each dep node added
for j in range(ny + 1):
# Add nodes on right edge of left plate as dep RBE nodes
dep_nodes.append(left_node_ids[-1, j])
dummy_node_id = num_nodes
dummy_nodes.append(dummy_node_id)
# Add nodes on left edge of right plate as indep RBE nodes
dep_nodes.append(right_node_ids[0, j])
dummy_node_id = num_nodes + 1
dummy_nodes.append(dummy_node_id)
# Increment node count for new dummy nodes
num_nodes += 2
rbe_conn.extend(dep_nodes)
rbe_conn.extend(dummy_nodes)
dummy_node_xyz = np.zeros([len(dep_nodes), 3], dtype=dtype)
# Add rbe to global connectivity
num_rbe_nodes = len(rbe_conn)
conn.extend(rbe_conn)
num_elems += 1
# Set element info for plates
conn = np.array(conn, dtype=np.intc)
ptr = np.arange(0, 4 * num_elems + 1, 4, dtype=np.intc)
comp_ids = np.zeros(num_elems, dtype=np.intc)
# Correct last entries for RBE
ptr[-1] = ptr[-2] + num_rbe_nodes
comp_ids[-1] = 1
creator.setGlobalConnectivity(num_nodes, ptr, conn, comp_ids)
# Set up the boundary conditions (fixed at left hand edge)
bcnodes = np.append(left_node_ids[0, :], right_node_ids[-1, :])
creator.setBoundaryConditions(bcnodes)
# Set the node locations
xyz = np.append(left_xyz.flatten(), right_xyz.flatten())
xyz = np.append(xyz.flatten(), center_node_xyz)
xyz = np.append(xyz.flatten(), dummy_node_xyz)
creator.setNodes(xyz.flatten())
# Set up rbe object
num_rbe_nodes = comm.bcast(num_rbe_nodes, root=0)
# Which dependent dofs are connected
dep_dofs = np.array([1, 1, 1, 1, 1, 1], np.intc)
# Set the artificial stiffness to be low to pass the sensitivity tests
# This will affect the accuracy of the element behavior
rbe = elements.RBE2(num_rbe_nodes, dep_dofs, C1=1e2, C2=1e-1)
# Set the elements for each (only two) component
element_list = [shell, rbe]
creator.setElements(element_list)
# Create the tacs assembler object
assembler = creator.createTACS()
return assembler
def setup_tacs_vecs(self, assembler, force_vec, dv_pert_vec, ans_pert_vec, xpts_pert_vec):
"""
Setup user-defined vectors for analysis and fd/cs sensitivity verification
"""
local_num_nodes = assembler.getNumOwnedNodes()
vars_per_node = assembler.getVarsPerNode()
# The nodes have been distributed across processors now
# Let's find which nodes this processor owns
xpts0 = assembler.createNodeVec()
assembler.getNodes(xpts0)
xpts0_array = xpts0.getArray()
# Split node vector into numpy arrays for easier parsing of vectors
local_xyz = xpts0_array.reshape(local_num_nodes, 3)
local_x, local_y, local_z = local_xyz[:, 0], local_xyz[:, 1], local_xyz[:, 2]
# Create force vector
f_array = force_vec.getArray().reshape(local_num_nodes, vars_per_node)
# Apply distributed forces at tip of beam
# Apply Qxx
f_array[np.logical_and(local_x == 1.5 * Lx, local_y == 0.5 * Ly), :] = applied_force
# Create temporary dv vec for doing fd/cs
dv_pert_array = dv_pert_vec.getArray()
dv_pert_array[:] = 1.0
# Create temporary state variable vec for doing fd/cs
ans_pert_array = ans_pert_vec.getArray()
# Define perturbation array that uniformly moves all nodes on right edge of left plate to the upward
ans_pert_array = ans_pert_array.reshape(local_num_nodes, vars_per_node)
ans_pert_array[local_x == Lx, 1] = 1.0
# Define perturbation array that uniformly moves all nodes on right edge of plate to the right
xpts_pert_array = xpts_pert_vec.getArray()
xpts_pert_array = xpts_pert_array.reshape(local_num_nodes, 3)
# Define perturbation array that uniformly moves all nodes on right edge of left plate to the right
xpts_pert_array[local_x == Lx, 0] = 1.0
return
def setup_funcs(self, assembler):
"""
Create a list of functions to be tested and their reference values for the problem
"""
func_list = [functions.KSFailure(assembler, ksWeight=ksweight),
functions.StructuralMass(assembler),
functions.Compliance(assembler),
functions.KSDisplacement(assembler, ksWeight=ksweight, direction=[1.0, 1.0, 1.0])]
return func_list, FUNC_REFS
|
[
"tacs.functions.Compliance",
"tacs.TACS.Creator",
"tacs.constitutive.IsoShellConstitutive",
"tacs.functions.KSFailure",
"numpy.arange",
"numpy.logical_and",
"tacs.constitutive.MaterialProperties",
"tacs.elements.RBE2",
"numpy.append",
"numpy.array",
"numpy.linspace",
"tacs.elements.ShellNaturalTransform",
"numpy.zeros",
"tacs.functions.StructuralMass",
"tacs.functions.KSDisplacement",
"numpy.meshgrid",
"tacs.elements.Quad4Shell"
] |
[((612, 690), 'numpy.array', 'np.array', (['[1.2600980396870352, 51400.0, 3767896.1409673616, 2.912191091671254]'], {}), '([1.2600980396870352, 51400.0, 3767896.1409673616, 2.912191091671254])\n', (620, 690), True, 'import numpy as np\n'), ((863, 925), 'numpy.array', 'np.array', (['[100000000.0, 0.0, 1000000.0, 0.0, 0.0, 100000000.0]'], {}), '([100000000.0, 0.0, 1000000.0, 0.0, 0.0, 100000000.0])\n', (871, 925), True, 'import numpy as np\n'), ((1516, 1605), 'tacs.constitutive.MaterialProperties', 'constitutive.MaterialProperties', ([], {'rho': '(2570.0)', 'E': '(70000000000.0)', 'nu': '(0.3)', 'ys': '(350000000.0)'}), '(rho=2570.0, E=70000000000.0, nu=0.3, ys=\n 350000000.0)\n', (1547, 1605), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((1602, 1657), 'tacs.constitutive.IsoShellConstitutive', 'constitutive.IsoShellConstitutive', (['props'], {'t': '(0.1)', 'tNum': '(0)'}), '(props, t=0.1, tNum=0)\n', (1635, 1657), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((1727, 1759), 'tacs.elements.ShellNaturalTransform', 'elements.ShellNaturalTransform', ([], {}), '()\n', (1757, 1759), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((1776, 1813), 'tacs.elements.Quad4Shell', 'elements.Quad4Shell', (['transform', 'stiff'], {}), '(transform, stiff)\n', (1795, 1813), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((1948, 1981), 'tacs.TACS.Creator', 'TACS.Creator', (['comm', 'vars_per_node'], {}), '(comm, vars_per_node)\n', (1960, 1981), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((5711, 5748), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1]', 'np.intc'], {}), '([1, 1, 1, 1, 1, 1], np.intc)\n', (5719, 5748), True, 'import numpy as np\n'), ((5906, 5962), 'tacs.elements.RBE2', 'elements.RBE2', (['num_rbe_nodes', 'dep_dofs'], {'C1': '(100.0)', 'C2': '(0.1)'}), '(num_rbe_nodes, dep_dofs, C1=100.0, C2=0.1)\n', (5919, 5962), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((2141, 2174), 'numpy.linspace', 'np.linspace', (['(0)', 'Lx', '(nx + 1)', 'dtype'], {}), '(0, Lx, nx + 1, dtype)\n', (2152, 2174), True, 'import numpy as np\n'), ((2191, 2224), 'numpy.linspace', 'np.linspace', (['(0)', 'Ly', '(ny + 1)', 'dtype'], {}), '(0, Ly, ny + 1, dtype)\n', (2202, 2224), True, 'import numpy as np\n'), ((2248, 2284), 'numpy.zeros', 'np.zeros', (['[nx + 1, ny + 1, 3]', 'dtype'], {}), '([nx + 1, ny + 1, 3], dtype)\n', (2256, 2284), True, 'import numpy as np\n'), ((2336, 2368), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (2347, 2368), True, 'import numpy as np\n'), ((3503, 3551), 'numpy.array', 'np.array', (['[1.5 * Lx, 0.5 * Ly, 0.0]'], {'dtype': 'dtype'}), '([1.5 * Lx, 0.5 * Ly, 0.0], dtype=dtype)\n', (3511, 3551), True, 'import numpy as np\n'), ((4741, 4770), 'numpy.array', 'np.array', (['conn'], {'dtype': 'np.intc'}), '(conn, dtype=np.intc)\n', (4749, 4770), True, 'import numpy as np\n'), ((4789, 4838), 'numpy.arange', 'np.arange', (['(0)', '(4 * num_elems + 1)', '(4)'], {'dtype': 'np.intc'}), '(0, 4 * num_elems + 1, 4, dtype=np.intc)\n', (4798, 4838), True, 'import numpy as np\n'), ((4862, 4896), 'numpy.zeros', 'np.zeros', (['num_elems'], {'dtype': 'np.intc'}), '(num_elems, dtype=np.intc)\n', (4870, 4896), True, 'import numpy as np\n'), ((5185, 5238), 'numpy.append', 'np.append', (['left_node_ids[0, :]', 'right_node_ids[-1, :]'], {}), '(left_node_ids[0, :], right_node_ids[-1, :])\n', (5194, 5238), True, 'import numpy as np\n'), ((8295, 8344), 'tacs.functions.KSFailure', 'functions.KSFailure', (['assembler'], {'ksWeight': 'ksweight'}), '(assembler, ksWeight=ksweight)\n', (8314, 8344), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((8367, 8402), 'tacs.functions.StructuralMass', 'functions.StructuralMass', (['assembler'], {}), '(assembler)\n', (8391, 8402), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((8425, 8456), 'tacs.functions.Compliance', 'functions.Compliance', (['assembler'], {}), '(assembler)\n', (8445, 8456), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((8479, 8564), 'tacs.functions.KSDisplacement', 'functions.KSDisplacement', (['assembler'], {'ksWeight': 'ksweight', 'direction': '[1.0, 1.0, 1.0]'}), '(assembler, ksWeight=ksweight, direction=[1.0, 1.0,\n 1.0])\n', (8503, 8564), False, 'from tacs import TACS, elements, constitutive, functions\n'), ((7169, 7225), 'numpy.logical_and', 'np.logical_and', (['(local_x == 1.5 * Lx)', '(local_y == 0.5 * Ly)'], {}), '(local_x == 1.5 * Lx, local_y == 0.5 * Ly)\n', (7183, 7225), True, 'import numpy as np\n'), ((2398, 2433), 'numpy.arange', 'np.arange', (['num_nodes'], {'dtype': 'np.intc'}), '(num_nodes, dtype=np.intc)\n', (2407, 2433), True, 'import numpy as np\n')]
|
import sys
import pylab as plb
import numpy as np
import mountaincar
class DummyAgent():
"""A not so good agent for the mountain-car task.
"""
def __init__(self, mountain_car = None, parameter1 = 3.0):
if mountain_car is None:
self.mountain_car = mountaincar.MountainCar()
else:
self.mountain_car = mountain_car
self.parameter1 = parameter1
def visualize_trial(self, n_steps = 200):
"""Do a trial without learning, with display.
Parameters
----------
n_steps -- number of steps to simulate for
"""
# prepare for the visualization
plb.ion()
plb.pause(0.0001)
mv = mountaincar.MountainCarViewer(self.mountain_car)
mv.create_figure(n_steps, n_steps)
plb.show()
# make sure the mountain-car is reset
self.mountain_car.reset()
for n in range(n_steps):
print('\rt =', self.mountain_car.t,
sys.stdout.flush())
# choose a random action
self.mountain_car.apply_force(np.random.randint(3) - 1)
# simulate the timestep
self.mountain_car.simulate_timesteps(100, 0.01)
# update the visualization
mv.update_figure()
plb.show()
plb.pause(0.0001)
# check for rewards
if self.mountain_car.R > 0.0:
print("\rreward obtained at t = ", self.mountain_car.t)
break
def learn(self):
# This is your job!
pass
if __name__ == "__main__":
d = DummyAgent()
d.visualize_trial()
plb.show()
|
[
"pylab.ion",
"numpy.random.randint",
"pylab.pause",
"mountaincar.MountainCar",
"sys.stdout.flush",
"mountaincar.MountainCarViewer",
"pylab.show"
] |
[((1688, 1698), 'pylab.show', 'plb.show', ([], {}), '()\n', (1696, 1698), True, 'import pylab as plb\n'), ((674, 683), 'pylab.ion', 'plb.ion', ([], {}), '()\n', (681, 683), True, 'import pylab as plb\n'), ((692, 709), 'pylab.pause', 'plb.pause', (['(0.0001)'], {}), '(0.0001)\n', (701, 709), True, 'import pylab as plb\n'), ((723, 771), 'mountaincar.MountainCarViewer', 'mountaincar.MountainCarViewer', (['self.mountain_car'], {}), '(self.mountain_car)\n', (752, 771), False, 'import mountaincar\n'), ((823, 833), 'pylab.show', 'plb.show', ([], {}), '()\n', (831, 833), True, 'import pylab as plb\n'), ((291, 316), 'mountaincar.MountainCar', 'mountaincar.MountainCar', ([], {}), '()\n', (314, 316), False, 'import mountaincar\n'), ((1338, 1348), 'pylab.show', 'plb.show', ([], {}), '()\n', (1346, 1348), True, 'import pylab as plb\n'), ((1361, 1378), 'pylab.pause', 'plb.pause', (['(0.0001)'], {}), '(0.0001)\n', (1370, 1378), True, 'import pylab as plb\n'), ((1021, 1039), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1037, 1039), False, 'import sys\n'), ((1133, 1153), 'numpy.random.randint', 'np.random.randint', (['(3)'], {}), '(3)\n', (1150, 1153), True, 'import numpy as np\n')]
|
import numpy as np
from pyquil import Program
from pyquil.api import QuantumComputer, get_qc
from grove.alpha.jordan_gradient.gradient_utils import (binary_float_to_decimal_float,
measurements_to_bf)
from grove.alpha.phaseestimation.phase_estimation import phase_estimation
def gradient_program(f_h: float, precision: int) -> Program:
"""
Gradient estimation via Jordan's algorithm (10.1103/PhysRevLett.95.050501).
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:return: Quil program to estimate gradient of f.
"""
# encode oracle values into phase
phase_factor = np.exp(1.0j * 2 * np.pi * abs(f_h))
U = np.array([[phase_factor, 0],
[0, phase_factor]])
p_gradient = phase_estimation(U, precision)
return p_gradient
def estimate_gradient(f_h: float, precision: int,
gradient_max: int = 1,
n_measurements: int = 50,
qc: QuantumComputer = None) -> float:
"""
Estimate the gradient using function evaluation at perturbation, h.
:param f_h: Oracle output at perturbation h.
:param precision: Bit precision of gradient.
:param gradient_max: OOM estimate of largest gradient value.
:param n_measurements: Number of times to measure system.
:param qc: The QuantumComputer object.
:return: Decimal estimate of gradient.
"""
# scale f_h by range of values gradient can take on
f_h *= 1. / gradient_max
# generate gradient program
perturbation_sign = np.sign(f_h)
p_gradient = gradient_program(f_h, precision)
# run gradient program
if qc is None:
qc = get_qc(f"{len(p_gradient.get_qubits())}q-qvm")
p_gradient.wrap_in_numshots_loop(n_measurements)
executable = qc.compiler.native_quil_to_executable(p_gradient)
measurements = qc.run(executable)
# summarize measurements
bf_estimate = perturbation_sign * measurements_to_bf(measurements)
bf_explicit = '{0:.16f}'.format(bf_estimate)
deci_estimate = binary_float_to_decimal_float(bf_explicit)
# rescale gradient
deci_estimate *= gradient_max
return deci_estimate
|
[
"grove.alpha.phaseestimation.phase_estimation.phase_estimation",
"grove.alpha.jordan_gradient.gradient_utils.measurements_to_bf",
"numpy.array",
"grove.alpha.jordan_gradient.gradient_utils.binary_float_to_decimal_float",
"numpy.sign"
] |
[((744, 792), 'numpy.array', 'np.array', (['[[phase_factor, 0], [0, phase_factor]]'], {}), '([[phase_factor, 0], [0, phase_factor]])\n', (752, 792), True, 'import numpy as np\n'), ((828, 858), 'grove.alpha.phaseestimation.phase_estimation.phase_estimation', 'phase_estimation', (['U', 'precision'], {}), '(U, precision)\n', (844, 858), False, 'from grove.alpha.phaseestimation.phase_estimation import phase_estimation\n'), ((1630, 1642), 'numpy.sign', 'np.sign', (['f_h'], {}), '(f_h)\n', (1637, 1642), True, 'import numpy as np\n'), ((2129, 2171), 'grove.alpha.jordan_gradient.gradient_utils.binary_float_to_decimal_float', 'binary_float_to_decimal_float', (['bf_explicit'], {}), '(bf_explicit)\n', (2158, 2171), False, 'from grove.alpha.jordan_gradient.gradient_utils import binary_float_to_decimal_float, measurements_to_bf\n'), ((2027, 2059), 'grove.alpha.jordan_gradient.gradient_utils.measurements_to_bf', 'measurements_to_bf', (['measurements'], {}), '(measurements)\n', (2045, 2059), False, 'from grove.alpha.jordan_gradient.gradient_utils import binary_float_to_decimal_float, measurements_to_bf\n')]
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
Code written by <NAME> with modifications by <NAME> and <NAME>
This file produces plots comparing our first order sensitivity with BS vega.
"""
# %%
# To run the stuff, you need the package plotly in your anaconda "conda install plotly"
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.io as pio
init_notebook_mode()
pio.renderers.default='svg'
import numpy as np
import numpy.random
import pandas as pd
from scipy.stats import norm, multivariate_normal
from scipy.optimize import minimize
import time
_tstart_stack = []
def tic():
_tstart_stack.append(time.time())
def toc(fmt="Elapsed: %s s"):
print(fmt % (time.time() - _tstart_stack.pop()))
# %%
# We first provide the computation of a call option according to BS (we assume Log normal distribution)
# definition of the dplus and minus functions
# and the BS formula.
def dplus(S, K, T, sigma):
sigmaT = sigma * T ** 0.5
return np.log(S/K)/sigmaT + sigmaT/2
def dminus(S, K, T, sigma):
sigmaT = sigma * T ** 0.5
return np.log(S/K)/sigmaT - sigmaT/2
def BS(S, K, T, sigma, Type = 1):
factor1 = S * norm.cdf(Type * dplus(S, K, T, sigma))
factor2 = K * norm.cdf(Type * dminus(S, K, T, sigma))
return Type * (factor1 - factor2)
# Now we provide the computation for the exact call according to the computations in BDT
# We take p = 2
def Robust_Call_Exact_fun(S, K, T, sigma, delta):
def fun(v): #v[0] = a, v[1] = lambda
price = BS(S,max(K - (2 * v[0] + 1)/ (2 * v[1]),0.000001), T, sigma)
return price + v[0] ** 2 / (2 * v[1]) + 0.5 * v[1] * delta ** 2
def cons_fun(v): # the value of v[0] should be constrained to keep strike positive
tmp = K - (2 * v[0] + 1)/ (2 * v[1])
return tmp
cons = ({'type': 'ineq', 'fun' : cons_fun})
guess = np.array([0, 1])
bounds = ((-np.Inf, np.Inf), (0, np.Inf))
res = minimize(fun, guess,
constraints=cons,
method='SLSQP',
bounds=bounds
)
return res.fun
Robust_Call_Exact = np.vectorize(Robust_Call_Exact_fun)
# Now we provide the computation for the first order model uncertainty sensitivity (Upsilon)
# and the resulting BS robust price approximation
# We take p = 2
def Robust_Call_Upsilon(S, K, T, sigma, delta):
muK = norm.cdf(dminus(S, K, T, sigma))
correction = np.sqrt(muK * (1-muK))
return correction
def Robust_Call_Approximation(S, K, T, sigma, delta):
price = BS(S, K, T, sigma)
correction = Robust_Call_Upsilon(S, K, T, sigma, delta)
return price + correction * delta
# %%
# Ploting the robust call and FO appriximation for a given strike and increasing uncertainty radius
S = 1
K = 1.2
T = 1
sigma = 0.2
Delta = np.linspace(0, 0.2, 50)
Y0 = BS(S, K, T, sigma)
Y1 = Robust_Call_Approximation(S, K, T, sigma, Delta)
Y2 = Robust_Call_Exact(S, K, T, sigma, Delta)
fig = go.Figure()
fig.add_scatter(x = Delta, y = Y1, name = 'FO')
fig.add_scatter(x = Delta, y = Y2, name = 'RBS')
#fig.layout.title = "Exact Robust Call vs First Order Approx: Strike K="+str(K)+", BS Price="+str(np.round(Y0,4))
fig.layout.xaxis.title = "delta"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Ploting the robust call and FO appriximation for a given radius of uncertainty and a range of strikes
S = 1
K = np.linspace(0.6, 1.4, 100)
T = 1
sigma = 0.2
delta = 0.05
Y0 = Robust_Call_Approximation(S, K, T, sigma, delta)
Y1 = Robust_Call_Exact(S, K, T, sigma, delta)
Y2 = BS(S, K, T, sigma)
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'FO')
fig.add_scatter(x = K, y = Y1, name = 'Exact')
fig.add_scatter(x = K, y = Y2, name = 'BS')
fig.layout.title = "Call Price vs Exact Robust Call and First Order Approx : delta ="+str(delta)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Run a plot to comapre BS Vega and BS Upsilon (Uncertainty Sensitivity)
# Plots show the sensitivities
S = 1
K = np.linspace(0.4 * S, 2 * S, 100)
T = 1
sigma = 0.2
delta = 0.02 #is irrelevant here
Y1 = S * (norm.pdf(dplus(S, K , T, sigma)))
Y0 = S * (Robust_Call_Upsilon(S, K, T, sigma, delta))
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'BS Upsilon')
fig.add_scatter(x = K, y = Y1, name = 'BS Vega')
#fig.layout.title = "Call Price Sensitivity: Vega vs Upsilon, sigma= "+str(sigma)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
# %%
# Run a ploting to comapre BS Vega and BS Upsilon (Uncertainty Sensitivity)
# Plots show the sensitivities
S = 1
K = np.linspace(0.6 * S, 1.4 * S, 100)
T = 1
sigma = 0.2
delta = 0.02 #is irrelevant here
Y0 = S * (norm.pdf(dplus(S, K * np.exp(T * sigma ** 2), T, sigma)) + 1/2-1/np.sqrt(2 * np.pi))
Y1 = S * (Robust_Call_Upsilon(S, K, T, sigma, delta))
fig = go.Figure()
fig.add_scatter(x = K, y = Y0, name = 'BS Vega (shifted) + const')
fig.add_scatter(x = K, y = Y1, name = 'BS Upsilon')
fig.layout.title = "Call Price Sensitivity: Vega vs Upsilon, sigma= "+str(sigma)
fig.layout.xaxis.title = "Strike"
fig.layout.yaxis.title = "Price"
iplot(fig)
|
[
"numpy.sqrt",
"plotly.offline.iplot",
"scipy.optimize.minimize",
"plotly.offline.init_notebook_mode",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"time.time",
"plotly.graph_objs.Figure",
"numpy.vectorize"
] |
[((418, 438), 'plotly.offline.init_notebook_mode', 'init_notebook_mode', ([], {}), '()\n', (436, 438), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((2182, 2217), 'numpy.vectorize', 'np.vectorize', (['Robust_Call_Exact_fun'], {}), '(Robust_Call_Exact_fun)\n', (2194, 2217), True, 'import numpy as np\n'), ((2864, 2887), 'numpy.linspace', 'np.linspace', (['(0)', '(0.2)', '(50)'], {}), '(0, 0.2, 50)\n', (2875, 2887), True, 'import numpy as np\n'), ((3021, 3032), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (3030, 3032), True, 'import plotly.graph_objs as go\n'), ((3311, 3321), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (3316, 3321), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((3443, 3469), 'numpy.linspace', 'np.linspace', (['(0.6)', '(1.4)', '(100)'], {}), '(0.6, 1.4, 100)\n', (3454, 3469), True, 'import numpy as np\n'), ((3634, 3645), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (3643, 3645), True, 'import plotly.graph_objs as go\n'), ((3946, 3956), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (3951, 3956), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((4078, 4110), 'numpy.linspace', 'np.linspace', (['(0.4 * S)', '(2 * S)', '(100)'], {}), '(0.4 * S, 2 * S, 100)\n', (4089, 4110), True, 'import numpy as np\n'), ((4268, 4279), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (4277, 4279), True, 'import plotly.graph_objs as go\n'), ((4533, 4543), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (4538, 4543), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((4668, 4702), 'numpy.linspace', 'np.linspace', (['(0.6 * S)', '(1.4 * S)', '(100)'], {}), '(0.6 * S, 1.4 * S, 100)\n', (4679, 4702), True, 'import numpy as np\n'), ((4911, 4922), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (4920, 4922), True, 'import plotly.graph_objs as go\n'), ((5193, 5203), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (5198, 5203), False, 'from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n'), ((1909, 1925), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1917, 1925), True, 'import numpy as np\n'), ((1982, 2051), 'scipy.optimize.minimize', 'minimize', (['fun', 'guess'], {'constraints': 'cons', 'method': '"""SLSQP"""', 'bounds': 'bounds'}), "(fun, guess, constraints=cons, method='SLSQP', bounds=bounds)\n", (1990, 2051), False, 'from scipy.optimize import minimize\n'), ((2487, 2511), 'numpy.sqrt', 'np.sqrt', (['(muK * (1 - muK))'], {}), '(muK * (1 - muK))\n', (2494, 2511), True, 'import numpy as np\n'), ((682, 693), 'time.time', 'time.time', ([], {}), '()\n', (691, 693), False, 'import time\n'), ((1028, 1041), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (1034, 1041), True, 'import numpy as np\n'), ((1128, 1141), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (1134, 1141), True, 'import numpy as np\n'), ((4830, 4848), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4837, 4848), True, 'import numpy as np\n'), ((743, 754), 'time.time', 'time.time', ([], {}), '()\n', (752, 754), False, 'import time\n'), ((4787, 4809), 'numpy.exp', 'np.exp', (['(T * sigma ** 2)'], {}), '(T * sigma ** 2)\n', (4793, 4809), True, 'import numpy as np\n')]
|
import numpy as np
import torchvision.datasets as datasets
from pathlib import Path
import libs.dirs as dirs
import libs.utils as utils
import libs.dataset_utils as dutils
import models.utils as mutils
import libs.commons as commons
from libs.vis_functions import plot_confusion_matrix
def wrapper_train(epochs, model_path, history_path, dataset_path):
seed = None
device_id = 0
numImgBatch = 256
use_weights = True
# ImageNet statistics
dataTransforms = mutils.resnet_transforms(commons.IMAGENET_MEAN, commons.IMAGENET_STD)
# Load Dataset objects for train and val sets from folder
sets = ['train', 'val']
imageDataset = {}
for phase in sets:
f = dataset_path / phase
imageDataset[phase] = datasets.ImageFolder(str(f),
transform=dataTransforms[phase],
is_valid_file=utils.check_empty_file)
history, _ = mutils.train_network(dataset_path, dataTransforms, epochs=epochs,
batch_size=numImgBatch,
model_path=model_path,
history_path=history_path,
seed=seed,
weighted_loss=use_weights,
device_id=device_id)
# Get best epoch results
bestValIndex = np.argmin(history['loss-val'])
bestValLoss = history['loss-val'][bestValIndex]
bestValAcc = history['acc-val'][bestValIndex]
confMat = history['conf-val'][bestValIndex]
return bestValLoss, bestValAcc, confMat
if __name__ == "__main__":
numEvals = 5
net_type = dutils.get_input_network_type(commons.network_types)
val_type = dutils.get_input_network_type(commons.val_types, message="validation set")
rede = int(input("\nEnter net number.\n"))
numEpochs = 25
# Dataset root folder
datasetPath = Path(dirs.dataset) / "{}_dataset_rede_{}_val_{}".format(net_type, rede, val_type)
datasetName = datasetPath.stem
modelFolder = Path(dirs.saved_models) / \
"{}_{}_epochs".format(datasetName, numEpochs)
historyFolder = Path(dirs.saved_models) / \
"history_{}_{}_epochs".format(datasetName, numEpochs)
filePath = Path(dirs.results) / \
"log_evaluation_{}_{}_epochs.txt".format(datasetName, numEpochs)
confMatPath = Path(dirs.results) / \
"confusion_matrix_{}.pdf".format(datasetName)
valLoss = []
valAcc = []
print()
# Run function many times and save best results
for i in range(numEvals):
print("\nStarting run number {}/{}.\n".format(i+1, numEvals))
modelPath = modelFolder / "model_run_{}.pt".format(i)
historyPath = historyFolder / "history_run_{}.pickle".format(i)
roundValLoss, roundValAcc, confMat = wrapper_train(numEpochs, modelPath, historyPath, datasetPath)
valLoss.append(roundValLoss)
classAcc = mutils.compute_class_acc(confMat)
avgAcc = np.mean(classAcc)
valAcc.append(roundValAcc)
print("Debug\nAvg acc: {:.3f}".format(avgAcc))
print("other acc: {:.3f}\n".format(roundValAcc))
# Save best confusion matrix
if np.argmin(valLoss) == i:
bestConfMat = confMat
printString = ""
printString += "\nFinished training {} evaluation runs for dataset\n{}\n".format(numEvals, datasetPath)
printString += "\nResulting statistics:\n\
Val Loss:\n\
Mean: {:.3f}\n\
Std : {:.3f}\n\
Val Avg Acc:\n\
Mean: {:.5f}\n\
Std {:.5f}\n".format(np.mean(valLoss), np.std(valLoss),
np.mean(valAcc), np.std(valAcc))
print(printString)
with open(filePath, mode='w') as f:
f.write(printString)
title = "Confusion Matrix "+str(datasetName)
plot_confusion_matrix(confMat, title=title, normalize=True, show=False, save_path=confMatPath)
# print("Conf matrix:")
# print(confMat)
|
[
"numpy.mean",
"libs.dataset_utils.get_input_network_type",
"pathlib.Path",
"numpy.std",
"numpy.argmin",
"models.utils.train_network",
"models.utils.compute_class_acc",
"models.utils.resnet_transforms",
"libs.vis_functions.plot_confusion_matrix"
] |
[((556, 625), 'models.utils.resnet_transforms', 'mutils.resnet_transforms', (['commons.IMAGENET_MEAN', 'commons.IMAGENET_STD'], {}), '(commons.IMAGENET_MEAN, commons.IMAGENET_STD)\n', (580, 625), True, 'import models.utils as mutils\n'), ((1045, 1252), 'models.utils.train_network', 'mutils.train_network', (['dataset_path', 'dataTransforms'], {'epochs': 'epochs', 'batch_size': 'numImgBatch', 'model_path': 'model_path', 'history_path': 'history_path', 'seed': 'seed', 'weighted_loss': 'use_weights', 'device_id': 'device_id'}), '(dataset_path, dataTransforms, epochs=epochs,\n batch_size=numImgBatch, model_path=model_path, history_path=\n history_path, seed=seed, weighted_loss=use_weights, device_id=device_id)\n', (1065, 1252), True, 'import models.utils as mutils\n'), ((1533, 1563), 'numpy.argmin', 'np.argmin', (["history['loss-val']"], {}), "(history['loss-val'])\n", (1542, 1563), True, 'import numpy as np\n'), ((1831, 1883), 'libs.dataset_utils.get_input_network_type', 'dutils.get_input_network_type', (['commons.network_types'], {}), '(commons.network_types)\n', (1860, 1883), True, 'import libs.dataset_utils as dutils\n'), ((1899, 1973), 'libs.dataset_utils.get_input_network_type', 'dutils.get_input_network_type', (['commons.val_types'], {'message': '"""validation set"""'}), "(commons.val_types, message='validation set')\n", (1928, 1973), True, 'import libs.dataset_utils as dutils\n'), ((4023, 4121), 'libs.vis_functions.plot_confusion_matrix', 'plot_confusion_matrix', (['confMat'], {'title': 'title', 'normalize': '(True)', 'show': '(False)', 'save_path': 'confMatPath'}), '(confMat, title=title, normalize=True, show=False,\n save_path=confMatPath)\n', (4044, 4121), False, 'from libs.vis_functions import plot_confusion_matrix\n'), ((2087, 2105), 'pathlib.Path', 'Path', (['dirs.dataset'], {}), '(dirs.dataset)\n', (2091, 2105), False, 'from pathlib import Path\n'), ((2223, 2246), 'pathlib.Path', 'Path', (['dirs.saved_models'], {}), '(dirs.saved_models)\n', (2227, 2246), False, 'from pathlib import Path\n'), ((2329, 2352), 'pathlib.Path', 'Path', (['dirs.saved_models'], {}), '(dirs.saved_models)\n', (2333, 2352), False, 'from pathlib import Path\n'), ((2438, 2456), 'pathlib.Path', 'Path', (['dirs.results'], {}), '(dirs.results)\n', (2442, 2456), False, 'from pathlib import Path\n'), ((2556, 2574), 'pathlib.Path', 'Path', (['dirs.results'], {}), '(dirs.results)\n', (2560, 2574), False, 'from pathlib import Path\n'), ((3136, 3169), 'models.utils.compute_class_acc', 'mutils.compute_class_acc', (['confMat'], {}), '(confMat)\n', (3160, 3169), True, 'import models.utils as mutils\n'), ((3187, 3204), 'numpy.mean', 'np.mean', (['classAcc'], {}), '(classAcc)\n', (3194, 3204), True, 'import numpy as np\n'), ((3777, 3793), 'numpy.mean', 'np.mean', (['valLoss'], {}), '(valLoss)\n', (3784, 3793), True, 'import numpy as np\n'), ((3795, 3810), 'numpy.std', 'np.std', (['valLoss'], {}), '(valLoss)\n', (3801, 3810), True, 'import numpy as np\n'), ((3844, 3859), 'numpy.mean', 'np.mean', (['valAcc'], {}), '(valAcc)\n', (3851, 3859), True, 'import numpy as np\n'), ((3861, 3875), 'numpy.std', 'np.std', (['valAcc'], {}), '(valAcc)\n', (3867, 3875), True, 'import numpy as np\n'), ((3401, 3419), 'numpy.argmin', 'np.argmin', (['valLoss'], {}), '(valLoss)\n', (3410, 3419), True, 'import numpy as np\n')]
|
import cv2 as cv
import sys
import numpy as np
import tifffile as ti
import argparse
import itertools
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
def CannyThreshold(val):
low_threshold = val
#img_blur = cv.blur(src_gray, (3,3))
detected_edges = cv.Canny(src_gray, low_threshold, low_threshold*ratio, kernel_size)
mask = detected_edges != 0
dst = src * (mask[:,:,None].astype(src.dtype))
cv.imshow(window_name, dst)
# Sort grey image colors by frequency of appearance
def freq_sort(l):
flat_list = []
for sublist in l:
for item in sublist:
flat_list.append(item)
frequencies = {}
for item in flat_list:
if item in frequencies:
frequencies[item] += 1
else:
frequencies[item] = 1
return sorted(frequencies.items(), key=lambda x: x[1], reverse=True)
# Remove colors of selection ranked by frequency
def gray_filter(img, p_map, start, end):
# Slice the color range
p_map = p_map[start:end]
# Break down the dic
selected_colors = []
for p in p_map:
selected_colors.append(p[0])
# Replace out-off-range colors with black
r_len = len(img)
c_len = len(img[0])
for i in range(r_len):
for j in range(c_len):
if img[i][j] not in selected_colors:
img[i][j] = 0
return img
# Remove disconnected noises
def de_noise(img, kernel_size=1, criteria=4, iterations=4, remove_all=False):
cur = 0
r_len = len(img)
c_len = len(img[0])
while cur < iterations:
cur += 1
for i in range(r_len):
for j in range(c_len):
# If the iterated pixel is already black
if img[i][j] == 0:
continue
try:
# X, Y = np.mgrid[j:j+kernel_size, i:i+kernel_size]
# print(np.vstack((X.ravel(), Y.ravel())))
# exit(1)
# Put adjacent pixels with given kernel size into the list
p_list = []
indices = [p for p in itertools.product(range(kernel_size, -kernel_size-1, -1), repeat=2) if p != (0,0)]
for idx in indices:
p_list.append(img[i+idx[0]][j+idx[1]])
# Remove the pixel if number of adjacent black pixels are greater than the preset value
if p_list.count(0) > criteria:
img[i][j] = 0
if remove_all:
for idx in indices:
img[i+idx[0]][j+idx[1]] = 0
except IndexError:
pass
return img
if __name__ == '__main__':
src = cv.imread(cv.samples.findFile("input.tif"))
img = cv.cvtColor(src, cv.COLOR_BGR2HSV)
img_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
cv.imshow('original', img_gray)
freq_dic = freq_sort(img_gray)
filtered_img = gray_filter(img_gray, freq_dic, 10, -80)
cv.imshow('filtered', filtered_img)
ti.imwrite("filtered.tif", np.array([[filtered_img] * 90], np.uint8))
# de_noise_img = de_noise(filtered_img, 1, 4, 4)
# de_noise_img = de_noise(de_noise_img, 2, 18, 1)
de_noise_img = de_noise(filtered_img, 1, 5, 4)
ti.imwrite("de_noise_img.tif", np.array([[de_noise_img] * 90], np.uint8))
eroded = cv.dilate(de_noise_img, np.ones((2, 2), np.uint8), iterations=1)
dilated = cv.dilate(eroded, np.ones((2, 2), np.uint8), iterations=1)
med_blur = cv.medianBlur(de_noise_img, 3)
cv.imshow('dilated', dilated)
cv.imshow('de-noised-more-aggressive', de_noise_img)
cv.imshow('med_blur', med_blur)
cv.waitKey()
# img_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# print(img_gray)
# if img is None:
# sys.exit("Could not read the image.")
#
#
# rows, cols, channels = img.shape
# dst = img.copy()
# a = 2.5
# b = 380
# for i in range(rows):
# for j in range(cols):
# for c in range(3):
# color = img[i, j][c]*a+b
# if color > 255: # 防止像素值越界(0~255)
# dst[i, j][c] = 255
# elif color < 0: # 防止像素值越界(0~255)
# dst[i, j][c] = 0
#
# blur_img = cv.GaussianBlur(img, ksize=(5, 5), sigmaX=1, sigmaY=1)
# gaussian_gray = cv.GaussianBlur(img_gray, ksize=(5, 5), sigmaX=1, sigmaY=1)
# ti.imwrite("Gaussian_blur.tif", np.array([[gaussian_gray]*90], np.uint8))
#
# med_blur_img = cv.medianBlur(img_gray, 3)
# ti.imwrite("med_blur.tif", np.array([[med_blur_img]*90], np.uint8))
#
# ret, threshold = cv.threshold(blur_img, 85, 255, cv.THRESH_TOZERO_INV)
# ret_gray, threshold_gray = cv.threshold(gaussian_gray, 85, 255, cv.THRESH_TOZERO_INV)
#
# kernel = np.ones((2, 2), np.uint8)
# erosion = cv.erode(threshold, kernel, iterations=2)
# erosion_gray = cv.erode(threshold_gray, kernel, iterations=2)
# ti.imwrite("erosion.tif", np.array([[erosion_gray]*90], np.uint8))
#
# dilation = cv.dilate(erosion, kernel, iterations=2)
# dilation_gray = cv.dilate(threshold_gray, kernel, iterations=2)
# ti.imwrite("dilation.tif", np.array([[dilation_gray]*90], np.uint8))
#
# lower_grey = np.array([0, 0, 11])
# upper_grey = np.array([0, 0, 60])
# mask = cv.inRange(erosion, lower_grey, upper_grey)
# mask = cv.fastNlMeansDenoising(mask, None, 5)
# res = cv.bitwise_and(erosion, erosion, mask=mask)
# res_gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY)
# ti.imwrite("filtered.tif", np.array([[res_gray]*90], np.uint8))
#
# # gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY)
# # grad_x = cv.Sobel(gray, -1, 1, 0, ksize=5)
# # grad_y = cv.Sobel(gray, -1, 0, 1, ksize=5)
# # grad = cv.addWeighted(grad_x, 1, grad_y, 1, 0)
#
# # src = cv.GaussianBlur(src, (3, 3), 0)
# # src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
# # cv.namedWindow(window_name)
# # cv.createTrackbar(title_trackbar, window_name, 0, max_lowThreshold, CannyThreshold)
# # CannyThreshold(0)
# # cv.waitKey()
#
# cv.imshow("src", img)
# cv.imshow("blur", blur_img)
# cv.imshow("threshold", threshold)
#
# cv.imshow("erosion", erosion)
# cv.imshow("dilation", dilation)
#
# cv.imshow('mask', mask)
# cv.imshow('filtered', res)
#
# # cv.imshow("grad", grad)
# cv.imshow("blur", blur_img)
#
# k = cv.waitKey(0)
# if k == ord("s"):
# cv.imwrite("starry_night.png", erosion)
|
[
"numpy.ones",
"cv2.samples.findFile",
"cv2.medianBlur",
"cv2.imshow",
"numpy.array",
"cv2.cvtColor",
"cv2.Canny",
"cv2.waitKey"
] |
[((339, 408), 'cv2.Canny', 'cv.Canny', (['src_gray', 'low_threshold', '(low_threshold * ratio)', 'kernel_size'], {}), '(src_gray, low_threshold, low_threshold * ratio, kernel_size)\n', (347, 408), True, 'import cv2 as cv\n'), ((496, 523), 'cv2.imshow', 'cv.imshow', (['window_name', 'dst'], {}), '(window_name, dst)\n', (505, 523), True, 'import cv2 as cv\n'), ((2949, 2983), 'cv2.cvtColor', 'cv.cvtColor', (['src', 'cv.COLOR_BGR2HSV'], {}), '(src, cv.COLOR_BGR2HSV)\n', (2960, 2983), True, 'import cv2 as cv\n'), ((3000, 3035), 'cv2.cvtColor', 'cv.cvtColor', (['src', 'cv.COLOR_BGR2GRAY'], {}), '(src, cv.COLOR_BGR2GRAY)\n', (3011, 3035), True, 'import cv2 as cv\n'), ((3041, 3072), 'cv2.imshow', 'cv.imshow', (['"""original"""', 'img_gray'], {}), "('original', img_gray)\n", (3050, 3072), True, 'import cv2 as cv\n'), ((3177, 3212), 'cv2.imshow', 'cv.imshow', (['"""filtered"""', 'filtered_img'], {}), "('filtered', filtered_img)\n", (3186, 3212), True, 'import cv2 as cv\n'), ((3705, 3735), 'cv2.medianBlur', 'cv.medianBlur', (['de_noise_img', '(3)'], {}), '(de_noise_img, 3)\n', (3718, 3735), True, 'import cv2 as cv\n'), ((3741, 3770), 'cv2.imshow', 'cv.imshow', (['"""dilated"""', 'dilated'], {}), "('dilated', dilated)\n", (3750, 3770), True, 'import cv2 as cv\n'), ((3776, 3828), 'cv2.imshow', 'cv.imshow', (['"""de-noised-more-aggressive"""', 'de_noise_img'], {}), "('de-noised-more-aggressive', de_noise_img)\n", (3785, 3828), True, 'import cv2 as cv\n'), ((3834, 3865), 'cv2.imshow', 'cv.imshow', (['"""med_blur"""', 'med_blur'], {}), "('med_blur', med_blur)\n", (3843, 3865), True, 'import cv2 as cv\n'), ((3873, 3885), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (3883, 3885), True, 'import cv2 as cv\n'), ((2904, 2936), 'cv2.samples.findFile', 'cv.samples.findFile', (['"""input.tif"""'], {}), "('input.tif')\n", (2923, 2936), True, 'import cv2 as cv\n'), ((3245, 3286), 'numpy.array', 'np.array', (['[[filtered_img] * 90]', 'np.uint8'], {}), '([[filtered_img] * 90], np.uint8)\n', (3253, 3286), True, 'import numpy as np\n'), ((3489, 3530), 'numpy.array', 'np.array', (['[[de_noise_img] * 90]', 'np.uint8'], {}), '([[de_noise_img] * 90], np.uint8)\n', (3497, 3530), True, 'import numpy as np\n'), ((3572, 3597), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (3579, 3597), True, 'import numpy as np\n'), ((3646, 3671), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (3653, 3671), True, 'import numpy as np\n')]
|
from ._base import BaseWeight
from ..exceptions import NotFittedError
from ..utils.functions import mean_log_beta
import numpy as np
from scipy.special import loggamma
class PitmanYorProcess(BaseWeight):
def __init__(self, pyd=0, alpha=1, truncation_length=-1, rng=None):
super().__init__(rng=rng)
assert -pyd < alpha, "alpha param must be greater than -pyd"
self.pyd = pyd
self.alpha = alpha
self.v = np.array([], dtype=np.float64)
self.truncation_length = truncation_length
def random(self, size=None):
if size is None and len(self.d) == 0:
raise ValueError("Weight structure not fitted and `n` not passed.")
if size is not None:
if type(size) is not int:
raise TypeError("size parameter must be integer or None")
if len(self.d) == 0:
pitman_yor_bias = np.arange(size)
self.v = self.rng.beta(a=1 - self.pyd,
b=self.alpha + pitman_yor_bias * self.pyd,
size=size)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
else:
a_c = np.bincount(self.d)
b_c = np.concatenate((np.cumsum(a_c[::-1])[-2::-1], [0]))
if size is not None and size < len(a_c):
a_c = a_c[:size]
b_c = b_c[:size]
pitman_yor_bias = np.arange(len(a_c))
self.v = self.rng.beta(
a=1 - self.pyd + a_c,
b=self.alpha + pitman_yor_bias * self.pyd + b_c
)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
if size is not None:
self.complete(size)
return self.w
def complete(self, size):
if type(size) is not int:
raise TypeError("size parameter must be integer or None")
if self.get_size() < size:
pitman_yor_bias = np.arange(self.get_size(), size)
self.v = np.concatenate(
(
self.v,
self.rng.beta(a=1 - self.pyd,
b=self.alpha + pitman_yor_bias * self.pyd)
)
)
self.w = self.v * np.cumprod(np.concatenate(([1],
1 - self.v[:-1])))
return self.w
def fit_variational(self, variational_d):
self.variational_d = variational_d
self.variational_k = len(self.variational_d)
self.variational_params = np.empty((self.variational_k, 2),
dtype=np.float64)
a_c = np.sum(self.variational_d, 1)
b_c = np.concatenate((np.cumsum(a_c[::-1])[-2::-1], [0]))
self.variational_params[:, 0] = 1 - self.pyd + a_c
self.variational_params[:, 1] = self.alpha + (
1 + np.arange(self.variational_params.shape[0])
) * self.pyd + b_c
def variational_mean_log_w_j(self, j):
if self.variational_d is None:
raise NotFittedError
res = 0
for jj in range(j):
res += mean_log_beta(self.variational_params[jj][1],
self.variational_params[jj][0])
res += mean_log_beta(self.variational_params[j, 0],
self.variational_params[j, 1]
)
return res
def variational_mean_log_p_d__w(self, variational_d=None):
if variational_d is None:
_variational_d = self.variational_d
if _variational_d is None:
raise NotFittedError
else:
_variational_d = variational_d
res = 0
for j, nj in enumerate(np.sum(_variational_d, 1)):
res += nj * self.variational_mean_log_w_j(j)
return res
def variational_mean_log_p_w(self):
if self.variational_d is None:
raise NotFittedError
res = 0
for j, params in enumerate(self.variational_params):
res += mean_log_beta(params[0], params[1]) * -self.pyd
res += mean_log_beta(params[1], params[0]) * (
self.alpha + (j + 1) * self.pyd - 1
)
res += loggamma(self.alpha + j * self.pyd + 1)
res -= loggamma(self.alpha + (j + 1) * self.pyd + 1)
res -= loggamma(1 - self.pyd)
return res
def variational_mean_log_q_w(self):
if self.variational_d is None:
raise NotFittedError
res = 0
for params in self.variational_params:
res += (params[0] - 1) * mean_log_beta(params[0], params[1])
res += (params[1] - 1) * mean_log_beta(params[1], params[0])
res += loggamma(params[0] + params[1])
res -= loggamma(params[0]) + loggamma(params[1])
return res
def variational_mean_w(self, j):
if j > self.variational_k:
return 0
res = 1
for jj in range(j):
res *= (self.variational_params[jj][1] /
self.variational_params[jj].sum())
res *= self.variational_params[j, 0] / self.variational_params[j].sum()
return res
def variational_mode_w(self, j):
if j > self.variational_k:
return 0
res = 1
for jj in range(j):
if self.variational_params[jj, 1] <= 1:
if self.variational_params[jj, 0] <= 1:
raise ValueError('multimodal distribution')
else:
return 0
elif self.variational_params[jj, 0] <= 1:
continue
res *= ((self.variational_params[jj, 1] - 1) /
(self.variational_params[jj].sum() - 2))
if self.variational_params[j, 0] <= 1:
if self.variational_params[j, 1] <= 1:
raise ValueError('multimodal distribution')
else:
return 0
elif self.variational_params[j, 1] <= 1:
return res
res *= ((self.variational_params[j, 0] - 1) /
(self.variational_params[j].sum() - 2))
return res
|
[
"scipy.special.loggamma",
"numpy.array",
"numpy.sum",
"numpy.empty",
"numpy.concatenate",
"numpy.cumsum",
"numpy.bincount",
"numpy.arange"
] |
[((449, 479), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (457, 479), True, 'import numpy as np\n'), ((2706, 2757), 'numpy.empty', 'np.empty', (['(self.variational_k, 2)'], {'dtype': 'np.float64'}), '((self.variational_k, 2), dtype=np.float64)\n', (2714, 2757), True, 'import numpy as np\n'), ((2815, 2844), 'numpy.sum', 'np.sum', (['self.variational_d', '(1)'], {}), '(self.variational_d, 1)\n', (2821, 2844), True, 'import numpy as np\n'), ((891, 906), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (900, 906), True, 'import numpy as np\n'), ((1252, 1271), 'numpy.bincount', 'np.bincount', (['self.d'], {}), '(self.d)\n', (1263, 1271), True, 'import numpy as np\n'), ((3901, 3926), 'numpy.sum', 'np.sum', (['_variational_d', '(1)'], {}), '(_variational_d, 1)\n', (3907, 3926), True, 'import numpy as np\n'), ((4410, 4449), 'scipy.special.loggamma', 'loggamma', (['(self.alpha + j * self.pyd + 1)'], {}), '(self.alpha + j * self.pyd + 1)\n', (4418, 4449), False, 'from scipy.special import loggamma\n'), ((4469, 4514), 'scipy.special.loggamma', 'loggamma', (['(self.alpha + (j + 1) * self.pyd + 1)'], {}), '(self.alpha + (j + 1) * self.pyd + 1)\n', (4477, 4514), False, 'from scipy.special import loggamma\n'), ((4534, 4556), 'scipy.special.loggamma', 'loggamma', (['(1 - self.pyd)'], {}), '(1 - self.pyd)\n', (4542, 4556), False, 'from scipy.special import loggamma\n'), ((4917, 4948), 'scipy.special.loggamma', 'loggamma', (['(params[0] + params[1])'], {}), '(params[0] + params[1])\n', (4925, 4948), False, 'from scipy.special import loggamma\n'), ((4968, 4987), 'scipy.special.loggamma', 'loggamma', (['params[0]'], {}), '(params[0])\n', (4976, 4987), False, 'from scipy.special import loggamma\n'), ((4990, 5009), 'scipy.special.loggamma', 'loggamma', (['params[1]'], {}), '(params[1])\n', (4998, 5009), False, 'from scipy.special import loggamma\n'), ((1123, 1161), 'numpy.concatenate', 'np.concatenate', (['([1], 1 - self.v[:-1])'], {}), '(([1], 1 - self.v[:-1]))\n', (1137, 1161), True, 'import numpy as np\n'), ((1706, 1744), 'numpy.concatenate', 'np.concatenate', (['([1], 1 - self.v[:-1])'], {}), '(([1], 1 - self.v[:-1]))\n', (1720, 1744), True, 'import numpy as np\n'), ((2410, 2448), 'numpy.concatenate', 'np.concatenate', (['([1], 1 - self.v[:-1])'], {}), '(([1], 1 - self.v[:-1]))\n', (2424, 2448), True, 'import numpy as np\n'), ((2875, 2895), 'numpy.cumsum', 'np.cumsum', (['a_c[::-1]'], {}), '(a_c[::-1])\n', (2884, 2895), True, 'import numpy as np\n'), ((1306, 1326), 'numpy.cumsum', 'np.cumsum', (['a_c[::-1]'], {}), '(a_c[::-1])\n', (1315, 1326), True, 'import numpy as np\n'), ((3045, 3088), 'numpy.arange', 'np.arange', (['self.variational_params.shape[0]'], {}), '(self.variational_params.shape[0])\n', (3054, 3088), True, 'import numpy as np\n')]
|
###################################################
## ##
## This file is part of the KinBot code v2.0 ##
## ##
## The contents are covered by the terms of the ##
## BSD 3-clause license included in the LICENSE ##
## file, found at the root. ##
## ##
## Copyright 2018 National Technology & ##
## Engineering Solutions of Sandia, LLC (NTESS). ##
## Under the terms of Contract DE-NA0003525 with ##
## NTESS, the U.S. Government retains certain ##
## rights to this software. ##
## ##
## Authors: ##
## <NAME> ##
## <NAME> ##
## ##
###################################################
import os,sys
import time
import logging
import numpy as np
import matplotlib.pyplot as plt
from constants import *
from stationary_pt import *
from zmat import *
from qc import *
import par
def generate_hir_geoms(species, natom, atom, mult, charge, cart, wellorts):
species.hir_status = []
species.hir_energies = []
species.hir_geoms = []
while len(species.hir_status) < len(species.dihed):
species.hir_status.append([-1 for i in range(par.nrotation)])
species.hir_energies.append([-1 for i in range(par.nrotation)])
species.hir_geoms.append([[] for i in range(par.nrotation)])
for rotor in range(len(species.dihed)):
cart = np.asarray(cart)
zmat_atom, zmat_ref, zmat, zmatorder = make_zmat_from_cart(species, rotor, natom, atom, cart, 0)
#first element has same geometry ( TODO: this shouldn't be recalculated)
cart_new = make_cart_from_zmat(zmat, zmat_atom, zmat_ref, natom, atom, zmatorder)
fi = [(zi+1) for zi in zmatorder[:4]]
qc_hir(species,cart_new,wellorts,natom,atom,mult,charge,rotor,0,[fi])
for ai in range(1,par.nrotation):
ang = 360. / float(par.nrotation)
zmat[3][2] += ang
for i in range(4, natom):
if zmat_ref[i][2] == 4:
zmat[i][2] += ang
if zmat_ref[i][2] == 1:
zmat[i][2] += ang
cart_new = make_cart_from_zmat(zmat, zmat_atom, zmat_ref, natom, atom, zmatorder)
qc_hir(species,cart_new,wellorts,natom,atom,mult,charge,rotor,ai,[fi])
return 0
def test_hir(species,natom,atom,mult,charge,wellorts):
for rotor in range(len(species.dihed)):
for ai in range(par.nrotation):
if species.hir_status[rotor][ai] == -1:
if wellorts:
job = 'hir/' + species.name + '_hir_' + str(rotor) + '_' + str(ai).zfill(2)
else:
job = 'hir/' + str(species.chemid) + '_hir_' + str(rotor) + '_' + str(ai).zfill(2)
err, geom = get_qc_geom(job, natom)
if err == 1: #still running
continue
elif err == -1: #failed
species.hir_status[rotor][ai] = 1
species.hir_energies[rotor][ai] = -1
species.hir_geoms[rotor][ai] = geom
else:
#check if all the bond lenghts are within 15% or the original bond lengths
if equal_geom(species.bond,species.geom,geom,0.15):
err, energy = get_qc_energy(job)
species.hir_status[rotor][ai] = 0
species.hir_energies[rotor][ai] = energy
species.hir_geoms[rotor][ai] = geom
else:
species.hir_status[rotor][ai] = 1
species.hir_energies[rotor][ai] = -1
species.hir_geoms[rotor][ai] = geom
return 0
def check_hir(species, natom, atom, mult, charge, wellorts, wait = 0):
"""
Check for hir calculations and optionally wait for them to finish
"""
while 1:
#check if all the calculations are finished
test_hir(species,natom,atom,mult,charge,wellorts)
if all([all([test >= 0 for test in status]) for status in species.hir_status]):
for rotor in range(len(species.dihed)):
if wellorts:
job = species.name + '_hir_' + str(rotor)
else:
job = str(species.chemid) + '_hir_' + str(rotor)
angles = [i * 2 * np.pi / float(par.nrotation) for i in range(par.nrotation)]
#write profile to file
write_profile(species,rotor,job,atom,natom)
species.hir_fourier.append(fourier_fit(job,angles,species.hir_energies[rotor],species.hir_status[rotor],plot_fit = 0))
return 1
else:
if wait:
time.sleep(1)
else:
return 0
def write_profile(species,rotor,job,atom,natom):
"""
Write a molden-readable file with the HIR scan (geometries and energies)
"""
file = open('hir/' + job + '.xyz','w')
for i in range(par.nrotation):
s = str(natom) + '\n'
s += 'energy = ' + str(species.hir_energies[rotor][i]) + '\n'
for j,at in enumerate(atom):
x,y,z = species.hir_geoms[rotor][i][j]
s += '{} {:.8f} {:.8f} {:.8f}\n'.format(at,x,y,z)
file.write(s)
file.close()
def fourier_fit(job,angles,energies,status,plot_fit = 0):
"""
Create a alternative fourier formulation of a hindered rotor
(Vanspeybroeck et al.)
profile, the angles are in radians and the eneries in
kcal per mol
plot_fit: plot the profile and the fit to a png
"""
n_terms = 6 #the number of sine and cosine terms
ang = [angles[i] for i in range(len(status)) if status[i] == 0]
ens = [(energies[i] - energies[0])*AUtoKCAL for i in range(len(status)) if status[i] == 0]
if len(ens) < par.par.nrotation - 2:
#more than two points are off
logging.warning("Hindered rotor potential has more than 2 failures for " + job)
X = np.zeros((len(ang), 2 * n_terms))
for i,ai in enumerate(ang):
for j in range(n_terms):
X[i][j] = (1 - np.cos((j+1) * ai))
X[i][j+n_terms] = np.sin((j+1) * ai)
A = np.linalg.lstsq(X,np.array(ens))[0]
for i,si in enumerate(status):
if si == 1:
energies[i] = energies[0] + get_fit_value(A,angles[i])/AUtoKCAL
if plot_fit:
#fit the plot to a png file
plt.plot(ang,ens,'ro')
fit_angles = [i * 2. * np.pi / 360 for i in range(360)]
fit_energies = [get_fit_value(A,ai) for ai in fit_angles]
plt.plot(fit_angles,fit_energies)
plt.xlabel('Dihedral angle [radians]')
plt.ylabel('Energy [kcal/mol]')
plt.savefig('hir_profiles/{}.png'.format(job))
plt.clf()
return A
def get_fit_value(A,ai):
"""
Get the fitted energy
"""
e = 0.
n_terms = (len(A)) / 2
for j in range(n_terms):
e += A[j] * ( 1 - np.cos((j+1) * ai))
e += A[j+n_terms] * np.sin((j+1) * ai)
return e
def main():
"""
Calculate the 1D hindered rotor profiles
Create a fourier fit representation of the profile
"""
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"logging.warning",
"numpy.asarray",
"matplotlib.pyplot.clf",
"time.sleep",
"numpy.array",
"numpy.cos",
"numpy.sin"
] |
[((1651, 1667), 'numpy.asarray', 'np.asarray', (['cart'], {}), '(cart)\n', (1661, 1667), True, 'import numpy as np\n'), ((6232, 6311), 'logging.warning', 'logging.warning', (["('Hindered rotor potential has more than 2 failures for ' + job)"], {}), "('Hindered rotor potential has more than 2 failures for ' + job)\n", (6247, 6311), False, 'import logging\n'), ((6767, 6791), 'matplotlib.pyplot.plot', 'plt.plot', (['ang', 'ens', '"""ro"""'], {}), "(ang, ens, 'ro')\n", (6775, 6791), True, 'import matplotlib.pyplot as plt\n'), ((6928, 6962), 'matplotlib.pyplot.plot', 'plt.plot', (['fit_angles', 'fit_energies'], {}), '(fit_angles, fit_energies)\n', (6936, 6962), True, 'import matplotlib.pyplot as plt\n'), ((6970, 7008), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dihedral angle [radians]"""'], {}), "('Dihedral angle [radians]')\n", (6980, 7008), True, 'import matplotlib.pyplot as plt\n'), ((7017, 7048), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Energy [kcal/mol]"""'], {}), "('Energy [kcal/mol]')\n", (7027, 7048), True, 'import matplotlib.pyplot as plt\n'), ((7112, 7121), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7119, 7121), True, 'import matplotlib.pyplot as plt\n'), ((6501, 6521), 'numpy.sin', 'np.sin', (['((j + 1) * ai)'], {}), '((j + 1) * ai)\n', (6507, 6521), True, 'import numpy as np\n'), ((6547, 6560), 'numpy.array', 'np.array', (['ens'], {}), '(ens)\n', (6555, 6560), True, 'import numpy as np\n'), ((7344, 7364), 'numpy.sin', 'np.sin', (['((j + 1) * ai)'], {}), '((j + 1) * ai)\n', (7350, 7364), True, 'import numpy as np\n'), ((5048, 5061), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5058, 5061), False, 'import time\n'), ((6451, 6471), 'numpy.cos', 'np.cos', (['((j + 1) * ai)'], {}), '((j + 1) * ai)\n', (6457, 6471), True, 'import numpy as np\n'), ((7296, 7316), 'numpy.cos', 'np.cos', (['((j + 1) * ai)'], {}), '((j + 1) * ai)\n', (7302, 7316), True, 'import numpy as np\n')]
|
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from torch import nn
from tqdm import tqdm
import numpy as np
from datasets.preprocess import DatasetWrapper
from utils import AverageMeter
class IOC_MLP(torch.nn.Module):
def __init__(self, input_features, out_classes):
super().__init__()
self.model = nn.Sequential(
nn.Flatten(),
nn.Linear(input_features, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, 800),
nn.BatchNorm1d(800),
nn.ELU(),
nn.Linear(800, out_classes),
)
def forward(self, x):
output = self.model(x)
return output
def train_epoch(model: nn.Module, optimizer, loss_func, dataset, train_loader,
epoch,
n_epochs):
model.train()
losses = AverageMeter()
errors = AverageMeter()
with tqdm(total=len(dataset.train_set),
desc=f"Epoch {epoch + 1} / {n_epochs}") as pbar:
for data, targets in train_loader:
if torch.cuda.is_available():
data = data.cuda()
targets = targets.cuda()
optimizer.zero_grad()
outputs = model(data)
loss = loss_func(outputs, targets)
loss.backward()
optimizer.step()
# convex ensuring step:
for name, param in model.named_parameters():
split = name.split('.')
if int(split[1]) >= 2 and split[2] == 'weight':
param_data = param.data.cpu().numpy()
param_data[param_data < 0] = np.exp(
param_data[param_data < 0] - 5)
#
param.data.copy_(torch.tensor(param_data))
batch_size = targets.size(0)
_, pred = outputs.data.cpu().topk(1, dim=1)
error = torch.ne(pred.squeeze(),
targets.cpu()).float().sum().item() / batch_size
errors.update(error, batch_size)
losses.update(loss.item())
pbar.update(data.size(0))
pbar.set_postfix(**{
'[Train/Loss]': losses.avg,
'[Train/Error]': errors.avg
})
return losses.avg, errors.avg
#
#
def test_epoch(model: nn.Module, dataset: DatasetWrapper,
test_loader: torch.utils.data.DataLoader):
model.eval()
# losses = AverageMeter()
errors = AverageMeter()
with tqdm(total=len(dataset.test_set),
desc=f"Valid") as pbar:
with torch.no_grad():
for data, targets in test_loader:
if torch.cuda.is_available():
data = data.cuda()
targets = targets.cuda()
outputs = model(data)
# loss = loss_func(outputs, targets)
batch_size = targets.size(0)
_, pred = outputs.data.cpu().topk(1, dim=1)
error = torch.ne(pred.squeeze(),
targets.cpu()).float().sum().item() / batch_size
errors.update(error, batch_size)
# losses.update(loss.item())
pbar.update(data.shape[0])
pbar.set_postfix(**{
'[Valid/Error]': errors.avg
})
return errors.avg
def fit(model: IOC_MLP, dataset: DatasetWrapper, lr=0.0001, batch_size=64,
n_epochs=10, path=None):
if path is None:
path = f'trained_models/ioc_mlp.{dataset.name}'
writer = SummaryWriter(f'runs/ioc_mlp.{dataset.name}')
if torch.cuda.is_available():
model.cuda()
model.train()
train_loader = torch.utils.data.DataLoader(dataset.train_set,
batch_size=batch_size,
shuffle=True,
)
test_loader = torch.utils.data.DataLoader(dataset.test_set,
batch_size=batch_size,
)
valid_loader = torch.utils.data.DataLoader(dataset.valid_set,
batch_size=batch_size,
)
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
best_error = np.inf
counter = 0
for epoch in range(n_epochs):
train_loss, train_error = train_epoch(model=model, optimizer=optimizer,
loss_func=loss_func,
dataset=dataset,
train_loader=train_loader,
epoch=epoch,
n_epochs=n_epochs)
valid_error = test_epoch(model, dataset, valid_loader)
writer.add_scalars('loss', {'train': train_loss}, epoch)
writer.add_scalars('accuracy', {'train': (1 - train_error) * 100,
'valid': (1 - valid_error) * 100},
epoch)
print(valid_error)
if valid_error < best_error:
print('Saving!')
torch.save(model.state_dict(), path)
best_error = valid_error
counter = 0
else:
counter += 1
if counter > 7:
print("Patience came ending now")
break
writer.close()
|
[
"torch.nn.ELU",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.CrossEntropyLoss",
"torch.nn.Flatten",
"numpy.exp",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"utils.AverageMeter"
] |
[((960, 974), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (972, 974), False, 'from utils import AverageMeter\n'), ((988, 1002), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (1000, 1002), False, 'from utils import AverageMeter\n'), ((2598, 2612), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2610, 2612), False, 'from utils import AverageMeter\n'), ((3693, 3738), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['f"""runs/ioc_mlp.{dataset.name}"""'], {}), "(f'runs/ioc_mlp.{dataset.name}')\n", (3706, 3738), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((4440, 4461), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4459, 4461), False, 'from torch import nn\n'), ((378, 390), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (388, 390), False, 'from torch import nn\n'), ((404, 434), 'torch.nn.Linear', 'nn.Linear', (['input_features', '(800)'], {}), '(input_features, 800)\n', (413, 434), False, 'from torch import nn\n'), ((448, 467), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(800)'], {}), '(800)\n', (462, 467), False, 'from torch import nn\n'), ((481, 489), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (487, 489), False, 'from torch import nn\n'), ((503, 522), 'torch.nn.Linear', 'nn.Linear', (['(800)', '(800)'], {}), '(800, 800)\n', (512, 522), False, 'from torch import nn\n'), ((536, 555), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(800)'], {}), '(800)\n', (550, 555), False, 'from torch import nn\n'), ((569, 577), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (575, 577), False, 'from torch import nn\n'), ((591, 610), 'torch.nn.Linear', 'nn.Linear', (['(800)', '(800)'], {}), '(800, 800)\n', (600, 610), False, 'from torch import nn\n'), ((624, 643), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(800)'], {}), '(800)\n', (638, 643), False, 'from torch import nn\n'), ((657, 665), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (663, 665), False, 'from torch import nn\n'), ((679, 706), 'torch.nn.Linear', 'nn.Linear', (['(800)', 'out_classes'], {}), '(800, out_classes)\n', (688, 706), False, 'from torch import nn\n'), ((1752, 1790), 'numpy.exp', 'np.exp', (['(param_data[param_data < 0] - 5)'], {}), '(param_data[param_data < 0] - 5)\n', (1758, 1790), True, 'import numpy as np\n')]
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.framework import program_guard, Program
from paddle.fluid.executor import Executor
from paddle.fluid import framework
from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell
from paddle.fluid.layers import rnn as dynamic_rnn
from paddle.fluid import contrib
from paddle.fluid.contrib.layers import basic_lstm
import paddle.fluid.layers.utils as utils
import numpy as np
class TestLSTMCellError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, input_size, hidden_size = 4, 16, 16
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = fluid.data(name='pre_hidden',
shape=[None, hidden_size],
dtype='float32')
pre_cell = fluid.data(name='pre_cell',
shape=[None, hidden_size],
dtype='float32')
cell = LSTMCell(hidden_size)
def test_input_Variable():
np_input = np.random.random(
(batch_size, input_size)).astype("float32")
cell(np_input, [pre_hidden, pre_cell])
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
np_pre_hidden = np.random.random(
(batch_size, hidden_size)).astype("float32")
cell(inputs, [np_pre_hidden, pre_cell])
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_pre_cell_Variable():
np_pre_cell = np.random.random(
(batch_size, input_size)).astype("float32")
cell(inputs, [pre_hidden, np_pre_cell])
self.assertRaises(TypeError, test_pre_cell_Variable)
def test_input_type():
error_inputs = fluid.data(name='error_inputs',
shape=[None, input_size],
dtype='int32')
cell(error_inputs, [pre_hidden, pre_cell])
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, [error_pre_hidden, pre_cell])
self.assertRaises(TypeError, test_pre_hidden_type)
def test_pre_cell_type():
error_pre_cell = fluid.data(name='error_pre_cell',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, [pre_hidden, error_pre_cell])
self.assertRaises(TypeError, test_pre_cell_type)
def test_dtype():
# the input type must be Variable
LSTMCell(hidden_size, dtype="int32")
self.assertRaises(TypeError, test_dtype)
class TestLSTMCell(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
def test_run(self):
inputs = fluid.data(name='inputs',
shape=[None, self.input_size],
dtype='float32')
pre_hidden = fluid.data(name='pre_hidden',
shape=[None, self.hidden_size],
dtype='float32')
pre_cell = fluid.data(name='pre_cell',
shape=[None, self.hidden_size],
dtype='float32')
cell = LSTMCell(self.hidden_size)
lstm_hidden_new, lstm_states_new = cell(inputs, [pre_hidden, pre_cell])
lstm_unit = contrib.layers.rnn_impl.BasicLSTMUnit(
"basicLSTM", self.hidden_size, None, None, None, None, 1.0,
"float32")
lstm_hidden, lstm_cell = lstm_unit(inputs, pre_hidden, pre_cell)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
pre_cell_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [[
"LSTMCell/BasicLSTMUnit_0.w_0", "basicLSTM/BasicLSTMUnit_0.w_0"
], ["LSTMCell/BasicLSTMUnit_0.b_0", "basicLSTM/BasicLSTMUnit_0.b_0"]]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs': inputs_np,
'pre_hidden': pre_hidden_np,
'pre_cell': pre_cell_np
},
fetch_list=[lstm_hidden_new, lstm_hidden])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))
class TestGRUCellError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size, input_size, hidden_size = 4, 16, 16
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, hidden_size],
append_batch_size=False,
dtype='float32')
cell = GRUCell(hidden_size)
def test_input_Variable():
np_input = np.random.random(
(batch_size, input_size)).astype("float32")
cell(np_input, pre_hidden)
self.assertRaises(TypeError, test_input_Variable)
def test_pre_hidden_Variable():
np_pre_hidden = np.random.random(
(batch_size, hidden_size)).astype("float32")
cell(inputs, np_pre_hidden)
self.assertRaises(TypeError, test_pre_hidden_Variable)
def test_input_type():
error_inputs = fluid.data(name='error_inputs',
shape=[None, input_size],
dtype='int32')
cell(error_inputs, pre_hidden)
self.assertRaises(TypeError, test_input_type)
def test_pre_hidden_type():
error_pre_hidden = fluid.data(name='error_pre_hidden',
shape=[None, hidden_size],
dtype='int32')
cell(inputs, error_pre_hidden)
self.assertRaises(TypeError, test_pre_hidden_type)
def test_dtype():
# the input type must be Variable
GRUCell(hidden_size, dtype="int32")
self.assertRaises(TypeError, test_dtype)
class TestGRUCell(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
def test_run(self):
inputs = fluid.data(name='inputs',
shape=[None, self.input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, self.hidden_size],
append_batch_size=False,
dtype='float32')
cell = GRUCell(self.hidden_size)
gru_hidden_new, _ = cell(inputs, pre_hidden)
gru_unit = contrib.layers.rnn_impl.BasicGRUUnit("basicGRU",
self.hidden_size, None,
None, None, None,
"float32")
gru_hidden = gru_unit(inputs, pre_hidden)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [
["GRUCell/BasicGRUUnit_0.w_0", "basicGRU/BasicGRUUnit_0.w_0"],
["GRUCell/BasicGRUUnit_0.w_1", "basicGRU/BasicGRUUnit_0.w_1"],
["GRUCell/BasicGRUUnit_0.b_0", "basicGRU/BasicGRUUnit_0.b_0"],
["GRUCell/BasicGRUUnit_0.b_1", "basicGRU/BasicGRUUnit_0.b_1"]
]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs': inputs_np,
'pre_hidden': pre_hidden_np
},
fetch_list=[gru_hidden_new, gru_hidden])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4, atol=0))
class TestRnnError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
batch_size = 4
input_size = 16
hidden_size = 16
seq_len = 4
inputs = fluid.data(name='inputs',
shape=[None, input_size],
dtype='float32')
pre_hidden = layers.data(name='pre_hidden',
shape=[None, hidden_size],
append_batch_size=False,
dtype='float32')
inputs_basic_lstm = fluid.data(name='inputs_basic_lstm',
shape=[None, None, input_size],
dtype='float32')
sequence_length = fluid.data(name="sequence_length",
shape=[None],
dtype='int64')
inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm,
perm=[1, 0, 2])
cell = LSTMCell(hidden_size, name="LSTMCell_for_rnn")
np_inputs_dynamic_rnn = np.random.random(
(seq_len, batch_size, input_size)).astype("float32")
def test_input_Variable():
dynamic_rnn(cell=cell,
inputs=np_inputs_dynamic_rnn,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_input_Variable)
def test_input_list():
dynamic_rnn(cell=cell,
inputs=[np_inputs_dynamic_rnn],
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_input_list)
def test_initial_states_type():
cell = GRUCell(hidden_size, name="GRUCell_for_rnn")
error_initial_states = np.random.random(
(batch_size, hidden_size)).astype("float32")
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
initial_states=error_initial_states,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_initial_states_type)
def test_initial_states_list():
error_initial_states = [
np.random.random(
(batch_size, hidden_size)).astype("float32"),
np.random.random(
(batch_size, hidden_size)).astype("float32")
]
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
initial_states=error_initial_states,
sequence_length=sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_initial_states_type)
def test_sequence_length_type():
np_sequence_length = np.random.random(
(batch_size)).astype("float32")
dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
sequence_length=np_sequence_length,
is_reverse=False)
self.assertRaises(TypeError, test_sequence_length_type)
class TestRnn(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.input_size = 16
self.hidden_size = 16
self.seq_len = 4
def test_run(self):
inputs_basic_lstm = fluid.data(name='inputs_basic_lstm',
shape=[None, None, self.input_size],
dtype='float32')
sequence_length = fluid.data(name="sequence_length",
shape=[None],
dtype='int64')
inputs_dynamic_rnn = layers.transpose(inputs_basic_lstm, perm=[1, 0, 2])
cell = LSTMCell(self.hidden_size, name="LSTMCell_for_rnn")
output, final_state = dynamic_rnn(cell=cell,
inputs=inputs_dynamic_rnn,
sequence_length=sequence_length,
is_reverse=False)
output_new = layers.transpose(output, perm=[1, 0, 2])
rnn_out, last_hidden, last_cell = basic_lstm(inputs_basic_lstm, None, None, self.hidden_size, num_layers=1, \
batch_first = False, bidirectional=False, sequence_length=sequence_length, forget_bias = 1.0)
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
inputs_basic_lstm_np = np.random.uniform(
-0.1, 0.1,
(self.seq_len, self.batch_size, self.input_size)).astype('float32')
sequence_length_np = np.ones(self.batch_size,
dtype='int64') * self.seq_len
inputs_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.input_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
pre_cell_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
param_names = [[
"LSTMCell_for_rnn/BasicLSTMUnit_0.w_0",
"basic_lstm_layers_0/BasicLSTMUnit_0.w_0"
],
[
"LSTMCell_for_rnn/BasicLSTMUnit_0.b_0",
"basic_lstm_layers_0/BasicLSTMUnit_0.b_0"
]]
for names in param_names:
param = np.array(fluid.global_scope().find_var(
names[0]).get_tensor())
param = np.random.uniform(-0.1, 0.1,
size=param.shape).astype('float32')
fluid.global_scope().find_var(names[0]).get_tensor().set(
param, place)
fluid.global_scope().find_var(names[1]).get_tensor().set(
param, place)
out = exe.run(feed={
'inputs_basic_lstm': inputs_basic_lstm_np,
'sequence_length': sequence_length_np,
'inputs': inputs_np,
'pre_hidden': pre_hidden_np,
'pre_cell': pre_cell_np
},
fetch_list=[output_new, rnn_out])
self.assertTrue(np.allclose(out[0], out[1], rtol=1e-4))
class TestRnnUtil(unittest.TestCase):
"""
Test cases for rnn apis' utility methods for coverage.
"""
def test_case(self):
inputs = {"key1": 1, "key2": 2}
func = lambda x: x + 1
outputs = utils.map_structure(func, inputs)
utils.assert_same_structure(inputs, outputs)
try:
inputs["key3"] = 3
utils.assert_same_structure(inputs, outputs)
except ValueError as identifier:
pass
class EncoderCell(RNNCell):
"""Encoder Cell"""
def __init__(
self,
num_layers,
hidden_size,
dropout_prob=0.,
init_scale=0.1,
):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout_prob = dropout_prob
self.lstm_cells = []
for i in range(num_layers):
self.lstm_cells.append(LSTMCell(hidden_size))
def call(self, step_input, states):
new_states = []
for i in range(self.num_layers):
out, new_state = self.lstm_cells[i](step_input, states[i])
step_input = layers.dropout(
out,
self.dropout_prob,
) if self.dropout_prob else out
new_states.append(new_state)
return step_input, new_states
@property
def state_shape(self):
return [cell.state_shape for cell in self.lstm_cells]
class DecoderCell(RNNCell):
"""Decoder Cell"""
def __init__(self, num_layers, hidden_size, dropout_prob=0.):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.dropout_prob = dropout_prob
self.lstm_cells = []
for i in range(num_layers):
self.lstm_cells.append(LSTMCell(hidden_size))
def call(self, step_input, states):
new_lstm_states = []
for i in range(self.num_layers):
out, new_lstm_state = self.lstm_cells[i](step_input, states[i])
step_input = layers.dropout(
out,
self.dropout_prob,
) if self.dropout_prob else out
new_lstm_states.append(new_lstm_state)
return step_input, new_lstm_states
def def_seq2seq_model(num_layers, hidden_size, dropout_prob, src_vocab_size,
trg_vocab_size):
"vanilla seq2seq model"
# data
source = fluid.data(name="src", shape=[None, None], dtype="int64")
source_length = fluid.data(name="src_sequence_length",
shape=[None],
dtype="int64")
target = fluid.data(name="trg", shape=[None, None], dtype="int64")
target_length = fluid.data(name="trg_sequence_length",
shape=[None],
dtype="int64")
label = fluid.data(name="label", shape=[None, None, 1], dtype="int64")
# embedding
src_emb = fluid.embedding(source, (src_vocab_size, hidden_size))
tar_emb = fluid.embedding(target, (src_vocab_size, hidden_size))
# encoder
enc_cell = EncoderCell(num_layers, hidden_size, dropout_prob)
enc_output, enc_final_state = dynamic_rnn(cell=enc_cell,
inputs=src_emb,
sequence_length=source_length)
# decoder
dec_cell = DecoderCell(num_layers, hidden_size, dropout_prob)
dec_output, dec_final_state = dynamic_rnn(cell=dec_cell,
inputs=tar_emb,
initial_states=enc_final_state)
logits = layers.fc(dec_output,
size=trg_vocab_size,
num_flatten_dims=len(dec_output.shape) - 1,
bias_attr=False)
# loss
loss = layers.softmax_with_cross_entropy(logits=logits,
label=label,
soft_label=False)
loss = layers.unsqueeze(loss, axes=[2])
max_tar_seq_len = layers.shape(target)[1]
tar_mask = layers.sequence_mask(target_length,
maxlen=max_tar_seq_len,
dtype="float32")
loss = loss * tar_mask
loss = layers.reduce_mean(loss, dim=[0])
loss = layers.reduce_sum(loss)
# optimizer
optimizer = fluid.optimizer.Adam(0.001)
optimizer.minimize(loss)
return loss
class TestSeq2SeqModel(unittest.TestCase):
"""
Test cases to confirm seq2seq api training correctly.
"""
def setUp(self):
np.random.seed(123)
self.model_hparams = {
"num_layers": 2,
"hidden_size": 128,
"dropout_prob": 0.1,
"src_vocab_size": 100,
"trg_vocab_size": 100
}
self.iter_num = iter_num = 2
self.batch_size = batch_size = 4
src_seq_len = 10
trg_seq_len = 12
self.data = {
"src":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, src_seq_len)).astype("int64"),
"src_sequence_length":
np.random.randint(1, src_seq_len,
(iter_num * batch_size, )).astype("int64"),
"trg":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, trg_seq_len)).astype("int64"),
"trg_sequence_length":
np.random.randint(1, trg_seq_len,
(iter_num * batch_size, )).astype("int64"),
"label":
np.random.randint(
2, self.model_hparams["src_vocab_size"],
(iter_num * batch_size, trg_seq_len, 1)).astype("int64"),
}
place = core.CUDAPlace(
0) if core.is_compiled_with_cuda() else core.CPUPlace()
self.exe = Executor(place)
def test_seq2seq_model(self):
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
cost = def_seq2seq_model(**self.model_hparams)
self.exe.run(startup_program)
for iter_idx in range(self.iter_num):
cost_val = self.exe.run(feed={
"src":
self.data["src"][iter_idx * self.batch_size:(iter_idx + 1) *
self.batch_size, :],
"src_sequence_length":
self.data["src_sequence_length"][iter_idx *
self.batch_size:(iter_idx +
1) *
self.batch_size],
"trg":
self.data["trg"][iter_idx * self.batch_size:(iter_idx + 1) *
self.batch_size, :],
"trg_sequence_length":
self.data["trg_sequence_length"][iter_idx *
self.batch_size:(iter_idx +
1) *
self.batch_size],
"label":
self.data["label"][iter_idx *
self.batch_size:(iter_idx + 1) *
self.batch_size]
},
fetch_list=[cost])[0]
print("iter_idx: %d, cost: %f" % (iter_idx, cost_val))
if __name__ == '__main__':
unittest.main()
|
[
"paddle.fluid.embedding",
"paddle.fluid.layers.data",
"paddle.fluid.layers.shape",
"paddle.fluid.layers.sequence_mask",
"paddle.fluid.contrib.layers.rnn_impl.BasicLSTMUnit",
"unittest.main",
"paddle.fluid.optimizer.Adam",
"paddle.fluid.layers.transpose",
"paddle.fluid.executor.Executor",
"paddle.fluid.contrib.layers.basic_lstm",
"paddle.fluid.layers.reduce_mean",
"numpy.random.random",
"paddle.fluid.global_scope",
"paddle.fluid.layers.reduce_sum",
"numpy.random.seed",
"paddle.fluid.core.is_compiled_with_cuda",
"paddle.fluid.layers.rnn.LSTMCell",
"numpy.allclose",
"paddle.fluid.data",
"paddle.fluid.layers.utils.assert_same_structure",
"paddle.fluid.contrib.layers.rnn_impl.BasicGRUUnit",
"paddle.fluid.Program",
"numpy.ones",
"paddle.fluid.layers.dropout",
"paddle.fluid.layers.softmax_with_cross_entropy",
"paddle.fluid.layers.rnn",
"paddle.fluid.layers.utils.map_structure",
"paddle.fluid.layers.unsqueeze",
"paddle.fluid.framework.Program",
"numpy.random.randint",
"paddle.fluid.framework.default_startup_program",
"numpy.random.uniform",
"paddle.fluid.core.CUDAPlace",
"paddle.fluid.layers.rnn.GRUCell",
"paddle.fluid.program_guard",
"paddle.fluid.core.CPUPlace"
] |
[((20147, 20204), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""src"""', 'shape': '[None, None]', 'dtype': '"""int64"""'}), "(name='src', shape=[None, None], dtype='int64')\n", (20157, 20204), True, 'import paddle.fluid as fluid\n'), ((20225, 20292), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""src_sequence_length"""', 'shape': '[None]', 'dtype': '"""int64"""'}), "(name='src_sequence_length', shape=[None], dtype='int64')\n", (20235, 20292), True, 'import paddle.fluid as fluid\n'), ((20368, 20425), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""trg"""', 'shape': '[None, None]', 'dtype': '"""int64"""'}), "(name='trg', shape=[None, None], dtype='int64')\n", (20378, 20425), True, 'import paddle.fluid as fluid\n'), ((20446, 20513), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""trg_sequence_length"""', 'shape': '[None]', 'dtype': '"""int64"""'}), "(name='trg_sequence_length', shape=[None], dtype='int64')\n", (20456, 20513), True, 'import paddle.fluid as fluid\n'), ((20588, 20650), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""label"""', 'shape': '[None, None, 1]', 'dtype': '"""int64"""'}), "(name='label', shape=[None, None, 1], dtype='int64')\n", (20598, 20650), True, 'import paddle.fluid as fluid\n'), ((20682, 20736), 'paddle.fluid.embedding', 'fluid.embedding', (['source', '(src_vocab_size, hidden_size)'], {}), '(source, (src_vocab_size, hidden_size))\n', (20697, 20736), True, 'import paddle.fluid as fluid\n'), ((20751, 20805), 'paddle.fluid.embedding', 'fluid.embedding', (['target', '(src_vocab_size, hidden_size)'], {}), '(target, (src_vocab_size, hidden_size))\n', (20766, 20805), True, 'import paddle.fluid as fluid\n'), ((20921, 20994), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'enc_cell', 'inputs': 'src_emb', 'sequence_length': 'source_length'}), '(cell=enc_cell, inputs=src_emb, sequence_length=source_length)\n', (20932, 20994), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((21202, 21276), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'dec_cell', 'inputs': 'tar_emb', 'initial_states': 'enc_final_state'}), '(cell=dec_cell, inputs=tar_emb, initial_states=enc_final_state)\n', (21213, 21276), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((21578, 21657), 'paddle.fluid.layers.softmax_with_cross_entropy', 'layers.softmax_with_cross_entropy', ([], {'logits': 'logits', 'label': 'label', 'soft_label': '(False)'}), '(logits=logits, label=label, soft_label=False)\n', (21611, 21657), True, 'import paddle.fluid.layers as layers\n'), ((21759, 21791), 'paddle.fluid.layers.unsqueeze', 'layers.unsqueeze', (['loss'], {'axes': '[2]'}), '(loss, axes=[2])\n', (21775, 21791), True, 'import paddle.fluid.layers as layers\n'), ((21853, 21929), 'paddle.fluid.layers.sequence_mask', 'layers.sequence_mask', (['target_length'], {'maxlen': 'max_tar_seq_len', 'dtype': '"""float32"""'}), "(target_length, maxlen=max_tar_seq_len, dtype='float32')\n", (21873, 21929), True, 'import paddle.fluid.layers as layers\n'), ((22040, 22073), 'paddle.fluid.layers.reduce_mean', 'layers.reduce_mean', (['loss'], {'dim': '[0]'}), '(loss, dim=[0])\n', (22058, 22073), True, 'import paddle.fluid.layers as layers\n'), ((22085, 22108), 'paddle.fluid.layers.reduce_sum', 'layers.reduce_sum', (['loss'], {}), '(loss)\n', (22102, 22108), True, 'import paddle.fluid.layers as layers\n'), ((22142, 22169), 'paddle.fluid.optimizer.Adam', 'fluid.optimizer.Adam', (['(0.001)'], {}), '(0.001)\n', (22162, 22169), True, 'import paddle.fluid as fluid\n'), ((25505, 25520), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25518, 25520), False, 'import unittest\n'), ((4166, 4239), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, self.input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, self.input_size], dtype='float32')\n", (4176, 4239), True, 'import paddle.fluid as fluid\n'), ((4317, 4395), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, self.hidden_size]', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, self.hidden_size], dtype='float32')\n", (4327, 4395), True, 'import paddle.fluid as fluid\n'), ((4479, 4555), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""pre_cell"""', 'shape': '[None, self.hidden_size]', 'dtype': '"""float32"""'}), "(name='pre_cell', shape=[None, self.hidden_size], dtype='float32')\n", (4489, 4555), True, 'import paddle.fluid as fluid\n'), ((4632, 4658), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['self.hidden_size'], {}), '(self.hidden_size)\n', (4640, 4658), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((4760, 4872), 'paddle.fluid.contrib.layers.rnn_impl.BasicLSTMUnit', 'contrib.layers.rnn_impl.BasicLSTMUnit', (['"""basicLSTM"""', 'self.hidden_size', 'None', 'None', 'None', 'None', '(1.0)', '"""float32"""'], {}), "('basicLSTM', self.hidden_size, None,\n None, None, None, 1.0, 'float32')\n", (4797, 4872), False, 'from paddle.fluid import contrib\n'), ((4979, 5007), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (5005, 5007), True, 'import paddle.fluid.core as core\n'), ((5111, 5126), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (5119, 5126), False, 'from paddle.fluid.executor import Executor\n'), ((8669, 8742), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, self.input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, self.input_size], dtype='float32')\n", (8679, 8742), True, 'import paddle.fluid as fluid\n'), ((8820, 8928), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, self.hidden_size]', 'append_batch_size': '(False)', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, self.hidden_size],\n append_batch_size=False, dtype='float32')\n", (8831, 8928), True, 'import paddle.fluid.layers as layers\n'), ((9040, 9065), 'paddle.fluid.layers.rnn.GRUCell', 'GRUCell', (['self.hidden_size'], {}), '(self.hidden_size)\n', (9047, 9065), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((9139, 9244), 'paddle.fluid.contrib.layers.rnn_impl.BasicGRUUnit', 'contrib.layers.rnn_impl.BasicGRUUnit', (['"""basicGRU"""', 'self.hidden_size', 'None', 'None', 'None', 'None', '"""float32"""'], {}), "('basicGRU', self.hidden_size, None,\n None, None, None, 'float32')\n", (9175, 9244), False, 'from paddle.fluid import contrib\n'), ((9471, 9499), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (9497, 9499), True, 'import paddle.fluid.core as core\n'), ((9603, 9618), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (9611, 9618), False, 'from paddle.fluid.executor import Executor\n'), ((14753, 14847), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs_basic_lstm"""', 'shape': '[None, None, self.input_size]', 'dtype': '"""float32"""'}), "(name='inputs_basic_lstm', shape=[None, None, self.input_size],\n dtype='float32')\n", (14763, 14847), True, 'import paddle.fluid as fluid\n'), ((14948, 15011), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""sequence_length"""', 'shape': '[None]', 'dtype': '"""int64"""'}), "(name='sequence_length', shape=[None], dtype='int64')\n", (14958, 15011), True, 'import paddle.fluid as fluid\n'), ((15116, 15167), 'paddle.fluid.layers.transpose', 'layers.transpose', (['inputs_basic_lstm'], {'perm': '[1, 0, 2]'}), '(inputs_basic_lstm, perm=[1, 0, 2])\n', (15132, 15167), True, 'import paddle.fluid.layers as layers\n'), ((15183, 15234), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['self.hidden_size'], {'name': '"""LSTMCell_for_rnn"""'}), "(self.hidden_size, name='LSTMCell_for_rnn')\n", (15191, 15234), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((15265, 15370), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'inputs_dynamic_rnn', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=inputs_dynamic_rnn, sequence_length=\n sequence_length, is_reverse=False)\n', (15276, 15370), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((15513, 15553), 'paddle.fluid.layers.transpose', 'layers.transpose', (['output'], {'perm': '[1, 0, 2]'}), '(output, perm=[1, 0, 2])\n', (15529, 15553), True, 'import paddle.fluid.layers as layers\n'), ((15597, 15768), 'paddle.fluid.contrib.layers.basic_lstm', 'basic_lstm', (['inputs_basic_lstm', 'None', 'None', 'self.hidden_size'], {'num_layers': '(1)', 'batch_first': '(False)', 'bidirectional': '(False)', 'sequence_length': 'sequence_length', 'forget_bias': '(1.0)'}), '(inputs_basic_lstm, None, None, self.hidden_size, num_layers=1,\n batch_first=False, bidirectional=False, sequence_length=sequence_length,\n forget_bias=1.0)\n', (15607, 15768), False, 'from paddle.fluid.contrib.layers import basic_lstm\n'), ((15795, 15823), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (15821, 15823), True, 'import paddle.fluid.core as core\n'), ((15927, 15942), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (15935, 15942), False, 'from paddle.fluid.executor import Executor\n'), ((18024, 18057), 'paddle.fluid.layers.utils.map_structure', 'utils.map_structure', (['func', 'inputs'], {}), '(func, inputs)\n', (18043, 18057), True, 'import paddle.fluid.layers.utils as utils\n'), ((18066, 18110), 'paddle.fluid.layers.utils.assert_same_structure', 'utils.assert_same_structure', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (18093, 18110), True, 'import paddle.fluid.layers.utils as utils\n'), ((21814, 21834), 'paddle.fluid.layers.shape', 'layers.shape', (['target'], {}), '(target)\n', (21826, 21834), True, 'import paddle.fluid.layers as layers\n'), ((22364, 22383), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (22378, 22383), True, 'import numpy as np\n'), ((23718, 23733), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (23726, 23733), False, 'from paddle.fluid.executor import Executor\n'), ((23792, 23807), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (23805, 23807), True, 'import paddle.fluid as fluid\n'), ((23834, 23849), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (23847, 23849), True, 'import paddle.fluid as fluid\n'), ((1382, 1450), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, input_size], dtype='float32')\n", (1392, 1450), True, 'import paddle.fluid as fluid\n'), ((1540, 1613), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, hidden_size]', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, hidden_size], dtype='float32')\n", (1550, 1613), True, 'import paddle.fluid as fluid\n'), ((1709, 1780), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""pre_cell"""', 'shape': '[None, hidden_size]', 'dtype': '"""float32"""'}), "(name='pre_cell', shape=[None, hidden_size], dtype='float32')\n", (1719, 1780), True, 'import paddle.fluid as fluid\n'), ((1868, 1889), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {}), '(hidden_size)\n', (1876, 1889), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((5029, 5046), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (5043, 5046), True, 'import paddle.fluid.core as core\n'), ((5081, 5096), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (5094, 5096), True, 'import paddle.fluid.core as core\n'), ((5143, 5178), 'paddle.fluid.framework.default_startup_program', 'framework.default_startup_program', ([], {}), '()\n', (5176, 5178), False, 'from paddle.fluid import framework\n'), ((6416, 6464), 'numpy.allclose', 'np.allclose', (['out[0]', 'out[1]'], {'rtol': '(0.0001)', 'atol': '(0)'}), '(out[0], out[1], rtol=0.0001, atol=0)\n', (6427, 6464), True, 'import numpy as np\n'), ((6668, 6736), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, input_size], dtype='float32')\n", (6678, 6736), True, 'import paddle.fluid as fluid\n'), ((6826, 6930), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, hidden_size]', 'append_batch_size': '(False)', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, hidden_size], append_batch_size\n =False, dtype='float32')\n", (6837, 6930), True, 'import paddle.fluid.layers as layers\n'), ((7056, 7076), 'paddle.fluid.layers.rnn.GRUCell', 'GRUCell', (['hidden_size'], {}), '(hidden_size)\n', (7063, 7076), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((9521, 9538), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (9535, 9538), True, 'import paddle.fluid.core as core\n'), ((9573, 9588), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (9586, 9588), True, 'import paddle.fluid.core as core\n'), ((9635, 9670), 'paddle.fluid.framework.default_startup_program', 'framework.default_startup_program', ([], {}), '()\n', (9668, 9670), False, 'from paddle.fluid import framework\n'), ((10904, 10952), 'numpy.allclose', 'np.allclose', (['out[0]', 'out[1]'], {'rtol': '(0.0001)', 'atol': '(0)'}), '(out[0], out[1], rtol=0.0001, atol=0)\n', (10915, 10952), True, 'import numpy as np\n'), ((11200, 11268), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs"""', 'shape': '[None, input_size]', 'dtype': '"""float32"""'}), "(name='inputs', shape=[None, input_size], dtype='float32')\n", (11210, 11268), True, 'import paddle.fluid as fluid\n'), ((11358, 11462), 'paddle.fluid.layers.data', 'layers.data', ([], {'name': '"""pre_hidden"""', 'shape': '[None, hidden_size]', 'append_batch_size': '(False)', 'dtype': '"""float32"""'}), "(name='pre_hidden', shape=[None, hidden_size], append_batch_size\n =False, dtype='float32')\n", (11369, 11462), True, 'import paddle.fluid.layers as layers\n'), ((11601, 11691), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""inputs_basic_lstm"""', 'shape': '[None, None, input_size]', 'dtype': '"""float32"""'}), "(name='inputs_basic_lstm', shape=[None, None, input_size], dtype=\n 'float32')\n", (11611, 11691), True, 'import paddle.fluid as fluid\n'), ((11803, 11866), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""sequence_length"""', 'shape': '[None]', 'dtype': '"""int64"""'}), "(name='sequence_length', shape=[None], dtype='int64')\n", (11813, 11866), True, 'import paddle.fluid as fluid\n'), ((11983, 12034), 'paddle.fluid.layers.transpose', 'layers.transpose', (['inputs_basic_lstm'], {'perm': '[1, 0, 2]'}), '(inputs_basic_lstm, perm=[1, 0, 2])\n', (11999, 12034), True, 'import paddle.fluid.layers as layers\n'), ((12104, 12150), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {'name': '"""LSTMCell_for_rnn"""'}), "(hidden_size, name='LSTMCell_for_rnn')\n", (12112, 12150), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((15845, 15862), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (15859, 15862), True, 'import paddle.fluid.core as core\n'), ((15897, 15912), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (15910, 15912), True, 'import paddle.fluid.core as core\n'), ((15959, 15994), 'paddle.fluid.framework.default_startup_program', 'framework.default_startup_program', ([], {}), '()\n', (15992, 15994), False, 'from paddle.fluid import framework\n'), ((16179, 16218), 'numpy.ones', 'np.ones', (['self.batch_size'], {'dtype': '"""int64"""'}), "(self.batch_size, dtype='int64')\n", (16186, 16218), True, 'import numpy as np\n'), ((17754, 17794), 'numpy.allclose', 'np.allclose', (['out[0]', 'out[1]'], {'rtol': '(0.0001)'}), '(out[0], out[1], rtol=0.0001)\n', (17765, 17794), True, 'import numpy as np\n'), ((18167, 18211), 'paddle.fluid.layers.utils.assert_same_structure', 'utils.assert_same_structure', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (18194, 18211), True, 'import paddle.fluid.layers.utils as utils\n'), ((23649, 23677), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (23675, 23677), True, 'import paddle.fluid.core as core\n'), ((23615, 23632), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (23629, 23632), True, 'import paddle.fluid.core as core\n'), ((23683, 23698), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (23696, 23698), True, 'import paddle.fluid.core as core\n'), ((23863, 23913), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main_program', 'startup_program'], {}), '(main_program, startup_program)\n', (23882, 23913), True, 'import paddle.fluid as fluid\n'), ((1278, 1287), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (1285, 1287), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((1289, 1298), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (1296, 1298), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((2785, 2857), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_inputs"""', 'shape': '[None, input_size]', 'dtype': '"""int32"""'}), "(name='error_inputs', shape=[None, input_size], dtype='int32')\n", (2795, 2857), True, 'import paddle.fluid as fluid\n'), ((3136, 3213), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_pre_hidden"""', 'shape': '[None, hidden_size]', 'dtype': '"""int32"""'}), "(name='error_pre_hidden', shape=[None, hidden_size], dtype='int32')\n", (3146, 3213), True, 'import paddle.fluid as fluid\n'), ((3501, 3576), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_pre_cell"""', 'shape': '[None, hidden_size]', 'dtype': '"""int32"""'}), "(name='error_pre_cell', shape=[None, hidden_size], dtype='int32')\n", (3511, 3576), True, 'import paddle.fluid as fluid\n'), ((3883, 3919), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {'dtype': '"""int32"""'}), "(hidden_size, dtype='int32')\n", (3891, 3919), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((5201, 5265), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.input_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.input_size))\n', (5218, 5265), True, 'import numpy as np\n'), ((5321, 5386), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (5338, 5386), True, 'import numpy as np\n'), ((5440, 5505), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (5457, 5505), True, 'import numpy as np\n'), ((6564, 6573), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (6571, 6573), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((6575, 6584), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (6582, 6584), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((7671, 7743), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_inputs"""', 'shape': '[None, input_size]', 'dtype': '"""int32"""'}), "(name='error_inputs', shape=[None, input_size], dtype='int32')\n", (7681, 7743), True, 'import paddle.fluid as fluid\n'), ((8010, 8087), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""error_pre_hidden"""', 'shape': '[None, hidden_size]', 'dtype': '"""int32"""'}), "(name='error_pre_hidden', shape=[None, hidden_size], dtype='int32')\n", (8020, 8087), True, 'import paddle.fluid as fluid\n'), ((8388, 8423), 'paddle.fluid.layers.rnn.GRUCell', 'GRUCell', (['hidden_size'], {'dtype': '"""int32"""'}), "(hidden_size, dtype='int32')\n", (8395, 8423), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((9693, 9757), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.input_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.input_size))\n', (9710, 9757), True, 'import numpy as np\n'), ((9813, 9878), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (9830, 9878), True, 'import numpy as np\n'), ((11048, 11057), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (11055, 11057), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((11059, 11068), 'paddle.fluid.framework.Program', 'Program', ([], {}), '()\n', (11066, 11068), False, 'from paddle.fluid.framework import program_guard, Program\n'), ((12330, 12438), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'np_inputs_dynamic_rnn', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=np_inputs_dynamic_rnn, sequence_length=\n sequence_length, is_reverse=False)\n', (12341, 12438), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((12633, 12743), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': '[np_inputs_dynamic_rnn]', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=[np_inputs_dynamic_rnn], sequence_length=\n sequence_length, is_reverse=False)\n', (12644, 12743), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((12950, 12994), 'paddle.fluid.layers.rnn.GRUCell', 'GRUCell', (['hidden_size'], {'name': '"""GRUCell_for_rnn"""'}), "(hidden_size, name='GRUCell_for_rnn')\n", (12957, 12994), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((13133, 13275), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'inputs_dynamic_rnn', 'initial_states': 'error_initial_states', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=inputs_dynamic_rnn, initial_states=\n error_initial_states, sequence_length=sequence_length, is_reverse=False)\n', (13144, 13275), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((13786, 13928), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'inputs_dynamic_rnn', 'initial_states': 'error_initial_states', 'sequence_length': 'sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=inputs_dynamic_rnn, initial_states=\n error_initial_states, sequence_length=sequence_length, is_reverse=False)\n', (13797, 13928), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((14273, 14381), 'paddle.fluid.layers.rnn', 'dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'inputs_dynamic_rnn', 'sequence_length': 'np_sequence_length', 'is_reverse': '(False)'}), '(cell=cell, inputs=inputs_dynamic_rnn, sequence_length=\n np_sequence_length, is_reverse=False)\n', (14284, 14381), True, 'from paddle.fluid.layers import rnn as dynamic_rnn\n'), ((16028, 16106), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.seq_len, self.batch_size, self.input_size)'], {}), '(-0.1, 0.1, (self.seq_len, self.batch_size, self.input_size))\n', (16045, 16106), True, 'import numpy as np\n'), ((16292, 16356), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.input_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.input_size))\n', (16309, 16356), True, 'import numpy as np\n'), ((16412, 16477), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (16429, 16477), True, 'import numpy as np\n'), ((16531, 16596), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', '(self.batch_size, self.hidden_size)'], {}), '(-0.1, 0.1, (self.batch_size, self.hidden_size))\n', (16548, 16596), True, 'import numpy as np\n'), ((18671, 18692), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {}), '(hidden_size)\n', (18679, 18692), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((18896, 18934), 'paddle.fluid.layers.dropout', 'layers.dropout', (['out', 'self.dropout_prob'], {}), '(out, self.dropout_prob)\n', (18910, 18934), True, 'import paddle.fluid.layers as layers\n'), ((19532, 19553), 'paddle.fluid.layers.rnn.LSTMCell', 'LSTMCell', (['hidden_size'], {}), '(hidden_size)\n', (19540, 19553), False, 'from paddle.fluid.layers.rnn import LSTMCell, GRUCell, RNNCell\n'), ((19767, 19805), 'paddle.fluid.layers.dropout', 'layers.dropout', (['out', 'self.dropout_prob'], {}), '(out, self.dropout_prob)\n', (19781, 19805), True, 'import paddle.fluid.layers as layers\n'), ((5872, 5918), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': 'param.shape'}), '(-0.1, 0.1, size=param.shape)\n', (5889, 5918), True, 'import numpy as np\n'), ((10399, 10445), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': 'param.shape'}), '(-0.1, 0.1, size=param.shape)\n', (10416, 10445), True, 'import numpy as np\n'), ((12187, 12238), 'numpy.random.random', 'np.random.random', (['(seq_len, batch_size, input_size)'], {}), '((seq_len, batch_size, input_size))\n', (12203, 12238), True, 'import numpy as np\n'), ((17113, 17159), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': 'param.shape'}), '(-0.1, 0.1, size=param.shape)\n', (17130, 17159), True, 'import numpy as np\n'), ((22770, 22870), 'numpy.random.randint', 'np.random.randint', (['(2)', "self.model_hparams['src_vocab_size']", '(iter_num * batch_size, src_seq_len)'], {}), "(2, self.model_hparams['src_vocab_size'], (iter_num *\n batch_size, src_seq_len))\n", (22787, 22870), True, 'import numpy as np\n'), ((22964, 23023), 'numpy.random.randint', 'np.random.randint', (['(1)', 'src_seq_len', '(iter_num * batch_size,)'], {}), '(1, src_seq_len, (iter_num * batch_size,))\n', (22981, 23023), True, 'import numpy as np\n'), ((23103, 23203), 'numpy.random.randint', 'np.random.randint', (['(2)', "self.model_hparams['src_vocab_size']", '(iter_num * batch_size, trg_seq_len)'], {}), "(2, self.model_hparams['src_vocab_size'], (iter_num *\n batch_size, trg_seq_len))\n", (23120, 23203), True, 'import numpy as np\n'), ((23297, 23356), 'numpy.random.randint', 'np.random.randint', (['(1)', 'trg_seq_len', '(iter_num * batch_size,)'], {}), '(1, trg_seq_len, (iter_num * batch_size,))\n', (23314, 23356), True, 'import numpy as np\n'), ((23438, 23541), 'numpy.random.randint', 'np.random.randint', (['(2)', "self.model_hparams['src_vocab_size']", '(iter_num * batch_size, trg_seq_len, 1)'], {}), "(2, self.model_hparams['src_vocab_size'], (iter_num *\n batch_size, trg_seq_len, 1))\n", (23455, 23541), True, 'import numpy as np\n'), ((1957, 1999), 'numpy.random.random', 'np.random.random', (['(batch_size, input_size)'], {}), '((batch_size, input_size))\n', (1973, 1999), True, 'import numpy as np\n'), ((2234, 2277), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (2250, 2277), True, 'import numpy as np\n'), ((2514, 2556), 'numpy.random.random', 'np.random.random', (['(batch_size, input_size)'], {}), '((batch_size, input_size))\n', (2530, 2556), True, 'import numpy as np\n'), ((7144, 7186), 'numpy.random.random', 'np.random.random', (['(batch_size, input_size)'], {}), '((batch_size, input_size))\n', (7160, 7186), True, 'import numpy as np\n'), ((7409, 7452), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (7425, 7452), True, 'import numpy as np\n'), ((13034, 13077), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (13050, 13077), True, 'import numpy as np\n'), ((14187, 14215), 'numpy.random.random', 'np.random.random', (['batch_size'], {}), '(batch_size)\n', (14203, 14215), True, 'import numpy as np\n'), ((13557, 13600), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (13573, 13600), True, 'import numpy as np\n'), ((13665, 13708), 'numpy.random.random', 'np.random.random', (['(batch_size, hidden_size)'], {}), '((batch_size, hidden_size))\n', (13681, 13708), True, 'import numpy as np\n'), ((5781, 5801), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (5799, 5801), True, 'import paddle.fluid as fluid\n'), ((10308, 10328), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (10326, 10328), True, 'import paddle.fluid as fluid\n'), ((17022, 17042), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (17040, 17042), True, 'import paddle.fluid as fluid\n'), ((5987, 6007), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (6005, 6007), True, 'import paddle.fluid as fluid\n'), ((6087, 6107), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (6105, 6107), True, 'import paddle.fluid as fluid\n'), ((10514, 10534), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (10532, 10534), True, 'import paddle.fluid as fluid\n'), ((10614, 10634), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (10632, 10634), True, 'import paddle.fluid as fluid\n'), ((17228, 17248), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (17246, 17248), True, 'import paddle.fluid as fluid\n'), ((17328, 17348), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (17346, 17348), True, 'import paddle.fluid as fluid\n')]
|
#!/usr/bin/python3
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pyfsdb
def parse_args():
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
description=__doc__,
epilog="Exmaple Usage: ")
parser.add_argument("-c", "--columns", type=str, nargs=2,
help="Columns to use")
parser.add_argument("-v", "--value-column", default="count", type=str,
help="The value column to plot as the heat map")
parser.add_argument("-i", "--invert", action="store_true",
help="Invert the foreground/background colors")
parser.add_argument("-F", "--add-fractions", action="store_true",
help="Add text fraction labels to the grid")
parser.add_argument("-R", "--add-raw", action="store_true",
help="Add text raw-value labels to the grid")
parser.add_argument("-L", "--add-labels", action="store_true",
help="Add x/y axis labels")
parser.add_argument("-fs", "--font-size", default=None, type=int,
help="Set the fontsize for labels")
parser.add_argument("input_file", type=FileType('r'),
nargs='?', default=sys.stdin,
help="Input fsdb file to read")
parser.add_argument("output_file", type=str,
nargs='?', default="out.png",
help="Where to write the png file to")
args = parser.parse_args()
if not args.columns or len(args.columns) != 2:
raise ValueError("exactly 2 columns must be passed to -c")
return args
def main():
args = parse_args()
# read in the input data
f = pyfsdb.Fsdb(file_handle=args.input_file,
return_type=pyfsdb.RETURN_AS_DICTIONARY)
max_value = None
dataset = {} # nested tree structure
ycols = {} # stores each unique second value
for row in f:
if not max_value:
max_value = float(row[args.value_column])
else:
max_value = max(max_value, float(row[args.value_column]))
if row[args.columns[0]] not in dataset:
dataset[row[args.columns[0]]] = \
{ row[args.columns[1]]: float(row[args.value_column]) }
else:
dataset[row[args.columns[0]]][row[args.columns[1]]] = \
float(row[args.value_column])
ycols[row[args.columns[1]]] = 1
# merge the data into a two dimensional array
data = []
xcols = sorted(dataset.keys())
ycols = sorted(ycols.keys())
for first_column in xcols:
newrow = []
for second_column in ycols:
if second_column in dataset[first_column]:
newrow.append(dataset[first_column][second_column] / max_value)
else:
newrow.append(0.0)
data.append(newrow)
grapharray = np.array(data)
if not args.invert:
grapharray = 1 - grapharray
# generate the graph
fig, ax = plt.subplots()
ax.imshow(grapharray, vmin=0.0, vmax=1.0, cmap='gray')
ax.set_xlabel(args.columns[1])
ax.set_ylabel(args.columns[0])
if args.add_labels:
ax.set_yticks(np.arange(len(dataset)))
ax.set_yticklabels(xcols)
ax.set_xticks(np.arange(len(ycols)))
ax.set_xticklabels(ycols)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
if args.add_fractions:
for i in range(len(grapharray)):
for j in range(len(grapharray[i])):
text = ax.text(j, i, "{:1.1f}".format(grapharray[i][j]),
ha="center", va="center", color="r",
fontsize=args.font_size)
elif args.add_raw:
for i, first_column in enumerate(xcols):
for j, second_column in enumerate(ycols):
try:
value = dataset[first_column][second_column]
ax.text(j, i, "{}".format(int(value)),
ha="center", va="center", color="r",
fontsize=args.font_size)
except Exception:
pass
fig.tight_layout()
plt.savefig(args.output_file,
bbox_inches="tight", pad_inches=0)
# import pprint
# pprint.pprint(dataset)
if __name__ == "__main__":
main()
|
[
"argparse.FileType",
"pyfsdb.Fsdb",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"numpy.array",
"matplotlib.pyplot.subplots"
] |
[((226, 339), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'formatter_class': 'ArgumentDefaultsHelpFormatter', 'description': '__doc__', 'epilog': '"""Exmaple Usage: """'}), "(formatter_class=ArgumentDefaultsHelpFormatter, description=\n __doc__, epilog='Exmaple Usage: ')\n", (240, 339), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType\n'), ((1892, 1978), 'pyfsdb.Fsdb', 'pyfsdb.Fsdb', ([], {'file_handle': 'args.input_file', 'return_type': 'pyfsdb.RETURN_AS_DICTIONARY'}), '(file_handle=args.input_file, return_type=pyfsdb.\n RETURN_AS_DICTIONARY)\n', (1903, 1978), False, 'import pyfsdb\n'), ((3079, 3093), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3087, 3093), True, 'import numpy as np\n'), ((3194, 3208), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3206, 3208), True, 'import matplotlib.pyplot as plt\n'), ((4420, 4484), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output_file'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)'}), "(args.output_file, bbox_inches='tight', pad_inches=0)\n", (4431, 4484), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1369), 'argparse.FileType', 'FileType', (['"""r"""'], {}), "('r')\n", (1364, 1369), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 13:59:43 2018
@author: ofn77899
"""
import numpy
from ccpi.segmentation.SimpleflexSegmentor import SimpleflexSegmentor
from ccpi.viewer.CILViewer import CILViewer
from ccpi.viewer.CILViewer2D import CILViewer2D, Converter
import vtk
#Text-based input system
valid = False
while valid == False:
try:
InitialCameraPositionX = int(input('Enter the initital camera position on the x-axis:'))
InitialCameraPositionY = int(input('Enter the initital camera position on the y-axis:'))
InitialCameraPositionZ = int(input('Enter the initital camera position on the z-axis:'))
FrameCount = int(input('Enter number of frames for the animation:'))
ViewUp1 = int(input('Enter the first viewup value:'))
ViewUp2 = int(input('Enter the second viewup value:'))
ViewUp3 = int(input('Enter the third viewup value:'))
FocalPointX = int(input('Enter the x-coordinate for the camera focal point:'))
FocalPointY = int(input('Enter the y-coordinate for the camera focal point:'))
FocalPointZ = int(input('Enter the z-coordinate for the camera focal point:'))
AngleRangeStart = int(input('Enter the first value for the angle range:'))
AngleRangeEnd = int(input('Enter the last value for the angle range:'))
ClippingRangeStart = int(input('Set lowest value for clipping range:'))
ClippingRangeEnd = int(input('Set highest value for clipping range:'))
InitialCameraPosition = (InitialCameraPositionX, InitialCameraPositionY, InitialCameraPositionZ)
FocalPoint = (FocalPointX, FocalPointY, FocalPointZ)
AngleRange = (AngleRangeStart, AngleRangeEnd)
ClippingRange = (ClippingRangeStart, ClippingRangeEnd)
ViewUp = (ViewUp1, ViewUp2, ViewUp3)
except ValueError:
print('One or more of your inputs were not valid! Try again')
else:
valid = True
def surface2vtkPolyData(coord_list, origin = (0,0,0), spacing=(1,1,1)):
########################################################################
# 7. Display
# with the retrieved data we construct polydata actors to be displayed
# with VTK. Notice that this part is VTK specific. However, it shows how to
# process the data returned by the algorithm.
# Create the VTK output
# Points coordinates structure
triangle_vertices = vtk.vtkPoints()
#associate the points to triangles
triangle = vtk.vtkTriangle()
trianglePointIds = triangle.GetPointIds()
# put all triangles in an array
triangles = vtk.vtkCellArray()
isTriangle = 0
nTriangle = 0
surface = 0
# associate each coordinate with a point: 3 coordinates are needed for a point
# in 3D. Additionally we perform a shift from image coordinates (pixel) which
# is the default of the Contour Tree Algorithm to the World Coordinates.
# TODO: add this in the algorithm.
mScaling = numpy.asarray([spacing[0], 0,0,0,
0,spacing[1],0,0,
0,0,spacing[2],0,
0,0,0,1]).reshape((4,4))
mShift = numpy.asarray([1,0,0,origin[0],
0,1,0,origin[1],
0,0,1,origin[2],
0,0,0,1]).reshape((4,4))
mTransform = numpy.dot(mScaling, mShift)
point_count = 0
for surf in coord_list:
print("Image-to-world coordinate trasformation ... %d" % surface)
for point in surf:
world_coord = numpy.dot(mTransform, point)
xCoord = world_coord[2]
yCoord = world_coord[1]
zCoord = world_coord[0]
# i += 3
triangle_vertices.InsertNextPoint(xCoord, yCoord, zCoord);
# The id of the vertex of the triangle (0,1,2) is linked to
# the id of the points in the list, so in facts we just link id-to-id
trianglePointIds.SetId(isTriangle, point_count)
isTriangle += 1
point_count += 1
if (isTriangle == 3) :
isTriangle = 0;
# insert the current triangle in the triangles array
triangles.InsertNextCell(triangle);
surface += 1
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints( triangle_vertices )
trianglePolyData.SetPolys( triangles )
return trianglePolyData
reader = vtk.vtkMetaImageReader()
reader.SetFileName("../../data/fuel_uc_python.mha")
reader.Update()
seg = SimpleflexSegmentor()
seg.setInputData(Converter.vtk2numpy(reader.GetOutput()))
seg.calculateContourTree()
#seg.setIsoValuePercent(24.)
seg.setLocalIsoValuePercent(0.)
seg.resetCollapsePriority(seg.PRIORITY_VOLUME)
# 5. Construct the iso-surfaces
print ("calling resetCollapsePriority")
#seg.updateTreeFromLogTreeSize(size=0.6, isGlobal=False)
print ("calling setlogtreesize")
seg.ct.SetLogTreeSize(1)
print ("calling UpdateTreeFromLogTreeSize")
seg.ct.UpdateTreeFromLogTreeSize()
print ("calling ConstructLocalIsoSurface")
#seg.constructLocalIsoSurfaces()
seg.ct.ConstructLocalIsoSurface()
print ("called ConstructLocalIsoSurface")
#seg.constructIsoSurfaces()
# 6. Retrieve the isosurfaces and display
coord_list = seg.getSurfaces()
del (seg)
#print ("getSurface " , len(coord_list))
spacing = numpy.asarray(reader.GetOutput().GetSpacing())
s1 = spacing[0]
spacing[0] = spacing[2]
spacing[2] = s1
print (len(coord_list))
v = CILViewer()
v.setInput3DData(reader.GetOutput())
v.displayPolyData(surface2vtkPolyData(coord_list, spacing=spacing))
#v.startRenderLoop()
dimX, dimY, dimZ = reader.GetOutput().GetDimensions()
#Setting locked values for camera position
locX = InitialCameraPosition[0]
locY = InitialCameraPosition[1]
locZ = InitialCameraPosition[2]
#Setting camera position
v.getCamera().SetPosition(InitialCameraPosition)
v.getCamera().SetFocalPoint(FocalPoint)
#Setting camera viewup
v.getCamera().SetViewUp(ViewUp)
#Set camera clipping range
v.getCamera().SetClippingRange(ClippingRange)
#Defining distance from camera to focal point
r = numpy.sqrt(((InitialCameraPosition[2]-FocalPoint[2])**2)
+(InitialCameraPosition[1]-FocalPoint[1])**2)
print('Radius: {}'.format(r))
camera = vtk.vtkCamera()
camera.SetPosition(InitialCameraPosition)
camera.SetFocalPoint(FocalPoint)
camera.SetViewUp(ViewUp)
v.getRenderer().SetActiveCamera(camera)
#Animating the camera
for x in range(100):
angle = ((numpy.pi)*4/100)*x
NewLocationX = r*(numpy.sin(angle))+FocalPoint[0]
NewLocationY = r*(numpy.cos(angle))+FocalPoint[1]
NewLocationZ = r*(numpy.cos(angle))+FocalPoint[2]
NewLocation = (NewLocationX, NewLocationY, locZ)
v.getCamera().SetPosition(NewLocation)
#Rendering and saving the render
v.getRenderer().Render()
v.saveRender('test_{}.png'.format(x))
v.startRenderLoop()
|
[
"numpy.sqrt",
"vtk.vtkMetaImageReader",
"vtk.vtkTriangle",
"vtk.vtkCamera",
"vtk.vtkCellArray",
"vtk.vtkPolyData",
"numpy.asarray",
"vtk.vtkPoints",
"numpy.dot",
"numpy.cos",
"ccpi.segmentation.SimpleflexSegmentor.SimpleflexSegmentor",
"numpy.sin",
"ccpi.viewer.CILViewer.CILViewer"
] |
[((4710, 4734), 'vtk.vtkMetaImageReader', 'vtk.vtkMetaImageReader', ([], {}), '()\n', (4732, 4734), False, 'import vtk\n'), ((4814, 4835), 'ccpi.segmentation.SimpleflexSegmentor.SimpleflexSegmentor', 'SimpleflexSegmentor', ([], {}), '()\n', (4833, 4835), False, 'from ccpi.segmentation.SimpleflexSegmentor import SimpleflexSegmentor\n'), ((5786, 5797), 'ccpi.viewer.CILViewer.CILViewer', 'CILViewer', ([], {}), '()\n', (5795, 5797), False, 'from ccpi.viewer.CILViewer import CILViewer\n'), ((6439, 6553), 'numpy.sqrt', 'numpy.sqrt', (['((InitialCameraPosition[2] - FocalPoint[2]) ** 2 + (InitialCameraPosition[1\n ] - FocalPoint[1]) ** 2)'], {}), '((InitialCameraPosition[2] - FocalPoint[2]) ** 2 + (\n InitialCameraPosition[1] - FocalPoint[1]) ** 2)\n', (6449, 6553), False, 'import numpy\n'), ((6588, 6603), 'vtk.vtkCamera', 'vtk.vtkCamera', ([], {}), '()\n', (6601, 6603), False, 'import vtk\n'), ((2541, 2556), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (2554, 2556), False, 'import vtk\n'), ((2613, 2630), 'vtk.vtkTriangle', 'vtk.vtkTriangle', ([], {}), '()\n', (2628, 2630), False, 'import vtk\n'), ((2732, 2750), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (2748, 2750), False, 'import vtk\n'), ((3524, 3551), 'numpy.dot', 'numpy.dot', (['mScaling', 'mShift'], {}), '(mScaling, mShift)\n', (3533, 3551), False, 'import numpy\n'), ((4552, 4569), 'vtk.vtkPolyData', 'vtk.vtkPolyData', ([], {}), '()\n', (4567, 4569), False, 'import vtk\n'), ((3120, 3215), 'numpy.asarray', 'numpy.asarray', (['[spacing[0], 0, 0, 0, 0, spacing[1], 0, 0, 0, 0, spacing[2], 0, 0, 0, 0, 1]'], {}), '([spacing[0], 0, 0, 0, 0, spacing[1], 0, 0, 0, 0, spacing[2], \n 0, 0, 0, 0, 1])\n', (3133, 3215), False, 'import numpy\n'), ((3322, 3414), 'numpy.asarray', 'numpy.asarray', (['[1, 0, 0, origin[0], 0, 1, 0, origin[1], 0, 0, 1, origin[2], 0, 0, 0, 1]'], {}), '([1, 0, 0, origin[0], 0, 1, 0, origin[1], 0, 0, 1, origin[2], \n 0, 0, 0, 1])\n', (3335, 3414), False, 'import numpy\n'), ((3742, 3770), 'numpy.dot', 'numpy.dot', (['mTransform', 'point'], {}), '(mTransform, point)\n', (3751, 3770), False, 'import numpy\n'), ((6852, 6868), 'numpy.sin', 'numpy.sin', (['angle'], {}), '(angle)\n', (6861, 6868), False, 'import numpy\n'), ((6907, 6923), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (6916, 6923), False, 'import numpy\n'), ((6963, 6979), 'numpy.cos', 'numpy.cos', (['angle'], {}), '(angle)\n', (6972, 6979), False, 'import numpy\n')]
|
from easyvec import Mat2, Vec2
import numpy as np
from pytest import approx
def test_constructor1():
m = Mat2(1,2,3,4)
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor2():
m = Mat2([1,2,3,4])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor3():
m = Mat2([[1,2],[3,4]])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor4():
m = Mat2([1,2],[3,4])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor5():
m = Mat2(Vec2(1,2),Vec2(3,4))
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor6():
m = Mat2([Vec2(1,2),Vec2(3,4)])
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(2)
assert m.m21 == approx(3)
assert m.m22 == approx(4)
def test_constructor7():
m = Mat2.eye()
assert m is not None
assert m.m11 == approx(1)
assert m.m12 == approx(0)
assert m.m21 == approx(0)
assert m.m22 == approx(1)
def test_constructor8():
from math import sin, cos, pi
for angle in np.random.uniform(-720, 720, 1000):
angle *= pi/180
m = Mat2.from_angle(angle)
assert m is not None
assert m.m11 == approx(cos(angle))
assert m.m12 == approx(sin(angle))
assert m.m21 == approx(-sin(angle))
assert m.m22 == approx(cos(angle))
def test_constructor9():
m = Mat2.from_xaxis((1,1))
assert m is not None
assert m.m11 == approx(1/2**0.5)
assert m.m12 == approx(1/2**0.5)
assert m.m21 == approx(-1/2**0.5)
assert m.m22 == approx(1/2**0.5)
def test_xiyj_axis():
m = Mat2(1,2,3,4)
assert m.x_axis() == (1,2)
assert m.i_axis() == (1,2)
assert m.y_axis() == (3,4)
assert m.j_axis() == (3,4)
def test_cmp():
m = Mat2(-1,2,-3,4)
assert m == [[-1,2],[-3,4]]
assert m != [[-1,-2],[-3,4]]
def test_T():
m = Mat2(-1,2,-3,4)
assert m.T == [[-1,-3], [2,4]]
def test_inverse1():
for angle in np.random.uniform(-720,720,1000):
m = Mat2.from_angle(angle)
assert m._1 == m.T
assert m.det() == approx(1)
def test_inverse2():
for ms in np.random.uniform(-720,720,(1000,4)):
m = Mat2(ms)
if abs(m.det()) < 1e-6:
continue
assert m * m._1 == Mat2.eye()
def test_mul1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
assert m * ms[-1] == (ms[:-1] * ms[-1]).reshape(2,2)
assert ms[-1] * m == (ms[:-1] * ms[-1]).reshape(2,2)
def test_mul2():
for angle, x, y in np.random.uniform(-180,180,(1000,3)):
m = Mat2.from_angle(angle, 1)
v = Vec2(x, y).norm()
v1 = m * v
assert v.angle_to(v1, 1) == approx(-angle)
v2 = m._1 * v1
assert v2 == v
v3 = m._1 * v
assert v.angle_to(v3, 1) == approx(angle)
def test_imul():
for ms in np.random.uniform(-720,720,(1000,4)):
m = Mat2(ms)
if abs(m.det()) < 1e-6:
continue
assert m * m._1 == Mat2.eye()
m *= m._1
assert m == Mat2.eye()
def test_add1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m + ms[-1]
m1i = (ms[:-1] + ms[-1]).reshape(2,2)
assert m1 == m1i
assert ms[-1] + m == (ms[:-1] + ms[-1]).reshape(2,2)
def test_add2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 + m2 == m2 + m1
assert m1 + m2 == (ms[:4] + ms[4:]).reshape(2,2)
def test_iadd():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 + m2
m1 += m2
assert m12 == m1
assert m1 == (ms[:4] + ms[4:]).reshape(2,2)
def test_sub1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m - ms[-1]
m1i = (ms[:-1] - ms[-1]).reshape(2,2)
assert m1 == m1i
assert ms[-1] - m == -(ms[:-1] - ms[-1]).reshape(2,2)
def test_sub2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 - m2 == -(m2 - m1)
assert m1 - m2 == (ms[:4] - ms[4:]).reshape(2,2)
def test_isub():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 - m2
m1 -= m2
assert m12 == m1
assert m1 == (ms[:4] - ms[4:]).reshape(2,2)
def test_div1():
for ms in np.random.uniform(-720,720,(1000,5)):
m = Mat2(ms[:-1])
m1 = m / ms[-1]
m1i = (ms[:-1] / ms[-1]).reshape(2,2)
assert m1 == m1i
def test_div2():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
assert m1 / m2 == (ms[:4] / ms[4:]).reshape(2,2)
def test_idiv():
for ms in np.random.uniform(-720,720,(1000,8)):
m1 = Mat2(ms[:4])
m2 = Mat2(ms[4:])
m12 = m1 / m2
m1 /= m2
assert m12 == m1
assert m1 == (ms[:4] / ms[4:]).reshape(2,2)
|
[
"pytest.approx",
"easyvec.Vec2",
"easyvec.Mat2",
"easyvec.Mat2.eye",
"math.cos",
"numpy.random.uniform",
"easyvec.Mat2.from_angle",
"easyvec.Mat2.from_xaxis",
"math.sin"
] |
[((110, 126), 'easyvec.Mat2', 'Mat2', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (114, 126), False, 'from easyvec import Mat2, Vec2\n'), ((304, 322), 'easyvec.Mat2', 'Mat2', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (308, 322), False, 'from easyvec import Mat2, Vec2\n'), ((500, 522), 'easyvec.Mat2', 'Mat2', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (504, 522), False, 'from easyvec import Mat2, Vec2\n'), ((700, 720), 'easyvec.Mat2', 'Mat2', (['[1, 2]', '[3, 4]'], {}), '([1, 2], [3, 4])\n', (704, 720), False, 'from easyvec import Mat2, Vec2\n'), ((1313, 1323), 'easyvec.Mat2.eye', 'Mat2.eye', ([], {}), '()\n', (1321, 1323), False, 'from easyvec import Mat2, Vec2\n'), ((1547, 1581), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000)'], {}), '(-720, 720, 1000)\n', (1564, 1581), True, 'import numpy as np\n'), ((1879, 1902), 'easyvec.Mat2.from_xaxis', 'Mat2.from_xaxis', (['(1, 1)'], {}), '((1, 1))\n', (1894, 1902), False, 'from easyvec import Mat2, Vec2\n'), ((2108, 2124), 'easyvec.Mat2', 'Mat2', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (2112, 2124), False, 'from easyvec import Mat2, Vec2\n'), ((2271, 2289), 'easyvec.Mat2', 'Mat2', (['(-1)', '(2)', '(-3)', '(4)'], {}), '(-1, 2, -3, 4)\n', (2275, 2289), False, 'from easyvec import Mat2, Vec2\n'), ((2375, 2393), 'easyvec.Mat2', 'Mat2', (['(-1)', '(2)', '(-3)', '(4)'], {}), '(-1, 2, -3, 4)\n', (2379, 2393), False, 'from easyvec import Mat2, Vec2\n'), ((2465, 2499), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000)'], {}), '(-720, 720, 1000)\n', (2482, 2499), True, 'import numpy as np\n'), ((2634, 2673), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 4)'], {}), '(-720, 720, (1000, 4))\n', (2651, 2673), True, 'import numpy as np\n'), ((2817, 2856), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 5)'], {}), '(-720, 720, (1000, 5))\n', (2834, 2856), True, 'import numpy as np\n'), ((3045, 3084), 'numpy.random.uniform', 'np.random.uniform', (['(-180)', '(180)', '(1000, 3)'], {}), '(-180, 180, (1000, 3))\n', (3062, 3084), True, 'import numpy as np\n'), ((3372, 3411), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 4)'], {}), '(-720, 720, (1000, 4))\n', (3389, 3411), True, 'import numpy as np\n'), ((3604, 3643), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 5)'], {}), '(-720, 720, (1000, 5))\n', (3621, 3643), True, 'import numpy as np\n'), ((3857, 3896), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (3874, 3896), True, 'import numpy as np\n'), ((4070, 4109), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (4087, 4109), True, 'import numpy as np\n'), ((4308, 4347), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 5)'], {}), '(-720, 720, (1000, 5))\n', (4325, 4347), True, 'import numpy as np\n'), ((4562, 4601), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (4579, 4601), True, 'import numpy as np\n'), ((4778, 4817), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (4795, 4817), True, 'import numpy as np\n'), ((5016, 5055), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 5)'], {}), '(-720, 720, (1000, 5))\n', (5033, 5055), True, 'import numpy as np\n'), ((5207, 5246), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (5224, 5246), True, 'import numpy as np\n'), ((5386, 5425), 'numpy.random.uniform', 'np.random.uniform', (['(-720)', '(720)', '(1000, 8)'], {}), '(-720, 720, (1000, 8))\n', (5403, 5425), True, 'import numpy as np\n'), ((169, 178), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (175, 178), False, 'from pytest import approx\n'), ((199, 208), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (205, 208), False, 'from pytest import approx\n'), ((229, 238), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (235, 238), False, 'from pytest import approx\n'), ((259, 268), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (265, 268), False, 'from pytest import approx\n'), ((365, 374), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (371, 374), False, 'from pytest import approx\n'), ((395, 404), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (401, 404), False, 'from pytest import approx\n'), ((425, 434), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (431, 434), False, 'from pytest import approx\n'), ((455, 464), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (461, 464), False, 'from pytest import approx\n'), ((565, 574), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (571, 574), False, 'from pytest import approx\n'), ((595, 604), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (601, 604), False, 'from pytest import approx\n'), ((625, 634), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (631, 634), False, 'from pytest import approx\n'), ((655, 664), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (661, 664), False, 'from pytest import approx\n'), ((763, 772), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (769, 772), False, 'from pytest import approx\n'), ((793, 802), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (799, 802), False, 'from pytest import approx\n'), ((823, 832), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (829, 832), False, 'from pytest import approx\n'), ((853, 862), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (859, 862), False, 'from pytest import approx\n'), ((903, 913), 'easyvec.Vec2', 'Vec2', (['(1)', '(2)'], {}), '(1, 2)\n', (907, 913), False, 'from easyvec import Mat2, Vec2\n'), ((913, 923), 'easyvec.Vec2', 'Vec2', (['(3)', '(4)'], {}), '(3, 4)\n', (917, 923), False, 'from easyvec import Mat2, Vec2\n'), ((969, 978), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (975, 978), False, 'from pytest import approx\n'), ((999, 1008), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (1005, 1008), False, 'from pytest import approx\n'), ((1029, 1038), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (1035, 1038), False, 'from pytest import approx\n'), ((1059, 1068), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (1065, 1068), False, 'from pytest import approx\n'), ((1177, 1186), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (1183, 1186), False, 'from pytest import approx\n'), ((1207, 1216), 'pytest.approx', 'approx', (['(2)'], {}), '(2)\n', (1213, 1216), False, 'from pytest import approx\n'), ((1237, 1246), 'pytest.approx', 'approx', (['(3)'], {}), '(3)\n', (1243, 1246), False, 'from pytest import approx\n'), ((1267, 1276), 'pytest.approx', 'approx', (['(4)'], {}), '(4)\n', (1273, 1276), False, 'from pytest import approx\n'), ((1369, 1378), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (1375, 1378), False, 'from pytest import approx\n'), ((1399, 1408), 'pytest.approx', 'approx', (['(0)'], {}), '(0)\n', (1405, 1408), False, 'from pytest import approx\n'), ((1429, 1438), 'pytest.approx', 'approx', (['(0)'], {}), '(0)\n', (1435, 1438), False, 'from pytest import approx\n'), ((1459, 1468), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (1465, 1468), False, 'from pytest import approx\n'), ((1619, 1641), 'easyvec.Mat2.from_angle', 'Mat2.from_angle', (['angle'], {}), '(angle)\n', (1634, 1641), False, 'from easyvec import Mat2, Vec2\n'), ((1947, 1967), 'pytest.approx', 'approx', (['(1 / 2 ** 0.5)'], {}), '(1 / 2 ** 0.5)\n', (1953, 1967), False, 'from pytest import approx\n'), ((1984, 2004), 'pytest.approx', 'approx', (['(1 / 2 ** 0.5)'], {}), '(1 / 2 ** 0.5)\n', (1990, 2004), False, 'from pytest import approx\n'), ((2021, 2042), 'pytest.approx', 'approx', (['(-1 / 2 ** 0.5)'], {}), '(-1 / 2 ** 0.5)\n', (2027, 2042), False, 'from pytest import approx\n'), ((2059, 2079), 'pytest.approx', 'approx', (['(1 / 2 ** 0.5)'], {}), '(1 / 2 ** 0.5)\n', (2065, 2079), False, 'from pytest import approx\n'), ((2511, 2533), 'easyvec.Mat2.from_angle', 'Mat2.from_angle', (['angle'], {}), '(angle)\n', (2526, 2533), False, 'from easyvec import Mat2, Vec2\n'), ((2684, 2692), 'easyvec.Mat2', 'Mat2', (['ms'], {}), '(ms)\n', (2688, 2692), False, 'from easyvec import Mat2, Vec2\n'), ((2867, 2880), 'easyvec.Mat2', 'Mat2', (['ms[:-1]'], {}), '(ms[:-1])\n', (2871, 2880), False, 'from easyvec import Mat2, Vec2\n'), ((3095, 3120), 'easyvec.Mat2.from_angle', 'Mat2.from_angle', (['angle', '(1)'], {}), '(angle, 1)\n', (3110, 3120), False, 'from easyvec import Mat2, Vec2\n'), ((3422, 3430), 'easyvec.Mat2', 'Mat2', (['ms'], {}), '(ms)\n', (3426, 3430), False, 'from easyvec import Mat2, Vec2\n'), ((3654, 3667), 'easyvec.Mat2', 'Mat2', (['ms[:-1]'], {}), '(ms[:-1])\n', (3658, 3667), False, 'from easyvec import Mat2, Vec2\n'), ((3908, 3920), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (3912, 3920), False, 'from easyvec import Mat2, Vec2\n'), ((3934, 3946), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (3938, 3946), False, 'from easyvec import Mat2, Vec2\n'), ((4121, 4133), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (4125, 4133), False, 'from easyvec import Mat2, Vec2\n'), ((4147, 4159), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (4151, 4159), False, 'from easyvec import Mat2, Vec2\n'), ((4358, 4371), 'easyvec.Mat2', 'Mat2', (['ms[:-1]'], {}), '(ms[:-1])\n', (4362, 4371), False, 'from easyvec import Mat2, Vec2\n'), ((4613, 4625), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (4617, 4625), False, 'from easyvec import Mat2, Vec2\n'), ((4639, 4651), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (4643, 4651), False, 'from easyvec import Mat2, Vec2\n'), ((4829, 4841), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (4833, 4841), False, 'from easyvec import Mat2, Vec2\n'), ((4855, 4867), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (4859, 4867), False, 'from easyvec import Mat2, Vec2\n'), ((5066, 5079), 'easyvec.Mat2', 'Mat2', (['ms[:-1]'], {}), '(ms[:-1])\n', (5070, 5079), False, 'from easyvec import Mat2, Vec2\n'), ((5258, 5270), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (5262, 5270), False, 'from easyvec import Mat2, Vec2\n'), ((5284, 5296), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (5288, 5296), False, 'from easyvec import Mat2, Vec2\n'), ((5437, 5449), 'easyvec.Mat2', 'Mat2', (['ms[:4]'], {}), '(ms[:4])\n', (5441, 5449), False, 'from easyvec import Mat2, Vec2\n'), ((5463, 5475), 'easyvec.Mat2', 'Mat2', (['ms[4:]'], {}), '(ms[4:])\n', (5467, 5475), False, 'from easyvec import Mat2, Vec2\n'), ((1110, 1120), 'easyvec.Vec2', 'Vec2', (['(1)', '(2)'], {}), '(1, 2)\n', (1114, 1120), False, 'from easyvec import Mat2, Vec2\n'), ((1120, 1130), 'easyvec.Vec2', 'Vec2', (['(3)', '(4)'], {}), '(3, 4)\n', (1124, 1130), False, 'from easyvec import Mat2, Vec2\n'), ((2587, 2596), 'pytest.approx', 'approx', (['(1)'], {}), '(1)\n', (2593, 2596), False, 'from pytest import approx\n'), ((2774, 2784), 'easyvec.Mat2.eye', 'Mat2.eye', ([], {}), '()\n', (2782, 2784), False, 'from easyvec import Mat2, Vec2\n'), ((3206, 3220), 'pytest.approx', 'approx', (['(-angle)'], {}), '(-angle)\n', (3212, 3220), False, 'from pytest import approx\n'), ((3325, 3338), 'pytest.approx', 'approx', (['angle'], {}), '(angle)\n', (3331, 3338), False, 'from pytest import approx\n'), ((3512, 3522), 'easyvec.Mat2.eye', 'Mat2.eye', ([], {}), '()\n', (3520, 3522), False, 'from easyvec import Mat2, Vec2\n'), ((3561, 3571), 'easyvec.Mat2.eye', 'Mat2.eye', ([], {}), '()\n', (3569, 3571), False, 'from easyvec import Mat2, Vec2\n'), ((1702, 1712), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (1705, 1712), False, 'from math import sin, cos, pi\n'), ((1745, 1755), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (1748, 1755), False, 'from math import sin, cos, pi\n'), ((1832, 1842), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (1835, 1842), False, 'from math import sin, cos, pi\n'), ((3133, 3143), 'easyvec.Vec2', 'Vec2', (['x', 'y'], {}), '(x, y)\n', (3137, 3143), False, 'from easyvec import Mat2, Vec2\n'), ((1789, 1799), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (1792, 1799), False, 'from math import sin, cos, pi\n')]
|
import numpy as np
from two_d_nav.envs.static_maze import StaticMazeNavigation
def test_goal():
env = StaticMazeNavigation()
for i in range(60):
obs, reward, done, _ = env.step(np.array([1.0, -0.1]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([-1.0, -0.5]))
env.render()
for i in range(5):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(15):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(18):
obs, reward, done, _ = env.step(np.array([-1.0, -0.6]))
env.render()
if done:
print(f"Reach goal: {obs}")
print(f"Reward: {reward}")
def test_obstacle():
env = StaticMazeNavigation()
for i in range(60):
obs, reward, done, _ = env.step(np.array([1.0, -0.1]))
env.render()
for i in range(5):
obs, reward, done, _ = env.step(np.array([0.0, -1.0]))
env.render()
for i in range(30):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
if done:
print(f"Hit obstacle: {obs}")
print(f"Reward: {reward}")
def test_wall():
env = StaticMazeNavigation()
reward = 0.0
for i in range(20):
obs, reward, done, _ = env.step(np.array([-1.0, 0.0]))
env.render()
print(f"Hit wall reward {reward}")
if __name__ == '__main__':
test_goal()
test_obstacle()
test_wall()
|
[
"two_d_nav.envs.static_maze.StaticMazeNavigation",
"numpy.array"
] |
[((109, 131), 'two_d_nav.envs.static_maze.StaticMazeNavigation', 'StaticMazeNavigation', ([], {}), '()\n', (129, 131), False, 'from two_d_nav.envs.static_maze import StaticMazeNavigation\n'), ((917, 939), 'two_d_nav.envs.static_maze.StaticMazeNavigation', 'StaticMazeNavigation', ([], {}), '()\n', (937, 939), False, 'from two_d_nav.envs.static_maze import StaticMazeNavigation\n'), ((1394, 1416), 'two_d_nav.envs.static_maze.StaticMazeNavigation', 'StaticMazeNavigation', ([], {}), '()\n', (1414, 1416), False, 'from two_d_nav.envs.static_maze import StaticMazeNavigation\n'), ((197, 218), 'numpy.array', 'np.array', (['[1.0, -0.1]'], {}), '([1.0, -0.1])\n', (205, 218), True, 'import numpy as np\n'), ((306, 328), 'numpy.array', 'np.array', (['[-1.0, -0.5]'], {}), '([-1.0, -0.5])\n', (314, 328), True, 'import numpy as np\n'), ((415, 436), 'numpy.array', 'np.array', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (423, 436), True, 'import numpy as np\n'), ((524, 545), 'numpy.array', 'np.array', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (532, 545), True, 'import numpy as np\n'), ((633, 654), 'numpy.array', 'np.array', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (641, 654), True, 'import numpy as np\n'), ((742, 764), 'numpy.array', 'np.array', (['[-1.0, -0.6]'], {}), '([-1.0, -0.6])\n', (750, 764), True, 'import numpy as np\n'), ((1005, 1026), 'numpy.array', 'np.array', (['[1.0, -0.1]'], {}), '([1.0, -0.1])\n', (1013, 1026), True, 'import numpy as np\n'), ((1113, 1134), 'numpy.array', 'np.array', (['[0.0, -1.0]'], {}), '([0.0, -1.0])\n', (1121, 1134), True, 'import numpy as np\n'), ((1222, 1243), 'numpy.array', 'np.array', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (1230, 1243), True, 'import numpy as np\n'), ((1499, 1520), 'numpy.array', 'np.array', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (1507, 1520), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorflow as tf
import cPickle
from utils import shared, get_name
from nn import HiddenLayer, EmbeddingLayer, LSTM, forward
class Model(object):
"""
Network architecture.
"""
def __init__(self, parameters=None, models_path=None, model_path=None):
"""
Initialize the model. We either provide the parameters and a path where
we store the models, or the location of a trained model.
"""
if model_path is None:
assert parameters and models_path
# Create a name based on the parameters
self.parameters = parameters
self.name = get_name(parameters)
# Model location
model_path = os.path.join(models_path, self.name)
self.model_path = model_path
self.mappings_path = os.path.join(model_path, 'mappings.pkl')
self.parameters_path = os.path.join(model_path, 'parameters.pkl')
# Create directory for the model if it does not exist
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
# Save the parameters to disk
with open(self.parameters_path, 'wb') as f:
cPickle.dump(parameters, f)
else:
assert parameters is None and models_path is None
# Model location
self.model_path = model_path
self.mappings_path = os.path.join(model_path, 'mappings.pkl')
self.parameters_path = os.path.join(model_path, 'parameters.pkl')
# Load the parameters and the mappings from disk
with open(self.parameters_path, 'rb') as f:
self.parameters = cPickle.load(f)
self.reload_mappings()
def save_mappings(self, id_to_word, id_to_char, id_to_tag):
"""
We need to save the mappings if we want to use the model later.
"""
self.id_to_word = id_to_word
self.id_to_char = id_to_char
self.id_to_tag = id_to_tag
with open(self.mappings_path, 'wb') as f:
mappings = {
'id_to_word': self.id_to_word,
'id_to_char': self.id_to_char,
'id_to_tag': self.id_to_tag,
}
cPickle.dump(mappings, f)
def reload_mappings(self):
"""
Load mappings from disk.
"""
with open(self.mappings_path, 'rb') as f:
mappings = cPickle.load(f)
self.id_to_word = mappings['id_to_word']
self.id_to_char = mappings['id_to_char']
self.id_to_tag = mappings['id_to_tag']
def build(self,
dropout,
char_dim,
char_lstm_dim,
char_bidirect,
word_dim,
word_lstm_dim,
word_bidirect,
lr_method,
lr_rate,
clip_norm,
crf,
is_train,
**kwargs
):
"""
Build the network.
"""
# Training parameters
n_words = len(self.id_to_word)
n_chars = len(self.id_to_char)
n_tags = len(self.id_to_tag)
# Network variables
self.word_ids = tf.placeholder(tf.int32, shape=[None, None], name='word_ids') # shape:[batch_size, max_word_len]
self.word_pos_ids = tf.placeholder(tf.int32, shape=[None], name='word_pos_ids') # shape: [batch_size]
self.char_for_ids = tf.placeholder(tf.int32, shape=[None, None, None], name='char_for_ids') # shape: [batch_size, word_max_len, char_max_len]
self.char_rev_ids = tf.placeholder(tf.int32, shape=[None, None, None], name='char_rev_ids') # shape: [batch_size, word_max_len, char_max_len]
self.char_pos_ids = tf.placeholder(tf.int32, shape=[None, None], name='char_pos_ids') # shape: [batch_size*word_max_len, char_max_len]
self.tag_ids = tf.placeholder(tf.int32, shape=[None, None], name='tag_ids') # shape: [batch_size,word_max_len]
self.tag_id_trans = tf.placeholder(tf.int32, shape=[None, None, None], name='tag_id_trans') # shape: [batch_size,word_max_len+1,2]
self.tag_id_index = tf.placeholder(tf.int32, shape=[None, None, None], name='tag_id_index') # shape: [batch_size,word_max_len,2]
# Final input (all word features)
input_dim = 0
inputs = []
#
# Word inputs
#
if word_dim:
input_dim += word_dim
with tf.device("/cpu:0"):
word_layer = EmbeddingLayer(n_words, word_dim, name='word_layer')
word_input = word_layer.link(self.word_ids)
inputs.append(word_input)
#
# Phars inputs
#
if char_dim:
input_dim += char_lstm_dim
char_layer = EmbeddingLayer(n_chars, char_dim, name='char_layer')
char_lstm_for = LSTM(char_dim, char_lstm_dim, with_batch=True,
name='char_lstm_for')
char_lstm_rev = LSTM(char_dim, char_lstm_dim, with_batch=True,
name='char_lstm_rev')
with tf.device("/cpu:0"):
char_for_embedding_batch = char_layer.link(self.char_for_ids)
char_rev_embedding_batch = char_layer.link(self.char_rev_ids)
shape_for = tf.shape(char_for_embedding_batch)
# reshape from [batch_size, word_max_len, char_max_len, char_dim] to [batch_size*word_max_len, char_max_len, char_dim]
char_for_embedding = tf.reshape(char_for_embedding_batch,
(shape_for[0]*shape_for[1], shape_for[2], shape_for[3]))
shape_rev = tf.shape(char_rev_embedding_batch)
char_rev_embedding = tf.reshape(char_rev_embedding_batch,
(shape_rev[0] * shape_rev[1], shape_rev[2], shape_rev[3]))
char_lstm_for_states = char_lstm_for.link(char_for_embedding)
char_lstm_rev_states = char_lstm_rev.link(char_rev_embedding)
char_lstm_for_h_trans = tf.transpose(char_lstm_for_states[1], (1, 0, 2), name='char_lstm_for_h_trans')
char_lstm_rev_h_trans = tf.transpose(char_lstm_rev_states[1], (1, 0, 2), name='char_lstm_rev_h_trans')
char_for_output = tf.gather_nd(char_lstm_for_h_trans, self.char_pos_ids, name='char_for_output')
char_rev_output = tf.gather_nd(char_lstm_rev_h_trans, self.char_pos_ids, name='char_rev_output')
char_for_output_batch = tf.reshape(char_for_output, (shape_for[0], shape_for[1], char_lstm_dim))
char_rev_output_batch = tf.reshape(char_rev_output, (shape_rev[0], shape_rev[1], char_lstm_dim))
inputs.append(char_for_output_batch)
if char_bidirect:
inputs.append(char_rev_output_batch)
input_dim += char_lstm_dim
inputs = tf.concat(inputs, axis=-1)
# Dropout on final input
assert dropout < 1 and 0.0 <= dropout
if dropout:
input_train = tf.nn.dropout(inputs, 1 - dropout)
if is_train:
inputs = input_train
# LSTM for words
word_lstm_for = LSTM(input_dim, word_lstm_dim, with_batch=True,
name='word_lstm_for')
word_lstm_rev = LSTM(input_dim, word_lstm_dim, with_batch=True,
name='word_lstm_rev')
# fordword hidden output
word_states_for = word_lstm_for.link(inputs)
word_lstm_for_output = tf.transpose(word_states_for[1], (1, 0, 2), name='word_lstm_for_h_trans')
# reverse hidden ouput
inputs_rev = tf.reverse_sequence(inputs, self.word_pos_ids, seq_dim=1, batch_dim=0)
word_states_rev = word_lstm_rev.link(inputs_rev)
word_lstm_rev_h_trans = tf.transpose(word_states_rev[1], (1, 0, 2), name='word_lstm_rev_h_trans')
word_lstm_rev_output = tf.reverse_sequence(word_lstm_rev_h_trans, self.word_pos_ids, seq_dim=1, batch_dim=0)
if word_bidirect:
final_output = tf.concat([word_lstm_for_output, word_lstm_rev_output],axis=-1)
tanh_layer = HiddenLayer(2 * word_lstm_dim, word_lstm_dim, name='tanh_layer', activation='tanh')
final_output = tanh_layer.link(final_output)
else:
final_output = word_lstm_for_output
final_layer = HiddenLayer(word_lstm_dim, n_tags, name='final_layer')
tags_scores = final_layer.link(final_output)
# No CRF
if not crf:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.tag_ids, logits=tags_scores, name='xentropy')
cost = tf.reduce_mean(cross_entropy, name='xentropy_mean')
else:
transitions = shared((n_tags + 2, n_tags + 2), 'transitions')
small = -1000
b_s = np.array([[small] * n_tags + [0, small]]).astype(np.float32)
e_s = np.array([[small] * n_tags + [small, 0]]).astype(np.float32)
# for batch observation
#def recurrence(prev, obs):
# s_len = tf.shape(obs)[0]
# obvs = tf.concat([obs, small * tf.ones((s_len, 2))], axis=1)
# observations = tf.concat([b_s, obvs, e_s], axis=0)
# return observations
#tags_scores_shape = tf.shape(tags_scores)
#obs_initial = tf.ones((tags_scores_shape[1] + 2, n_tags + 2))
#obs_batch = tf.scan(fn=recurrence, elems=tags_scores, initializer=obs_initial)
# Score from tags
def recurrence_real_score(prev,obs):
tags_score = obs[0]
tag_id_index_ = obs[1]
tag_id_trans_= obs[2]
word_pos_ = obs[3] + 1
tags_score_slice = tags_score[0:word_pos_,:]
tag_id_index_slice = tag_id_index_[0:word_pos_,:]
tag_id_trans_slice = tag_id_trans_[0:(word_pos_+1),:]
real_path_score = tf.reduce_sum(tf.gather_nd(tags_score_slice, tag_id_index_slice))
real_path_score += tf.reduce_sum(tf.gather_nd(transitions, tag_id_trans_slice))
return tf.reshape(real_path_score,[])
real_path_score_list = tf.scan(fn=recurrence_real_score, elems=[tags_scores, self.tag_id_index, self.tag_id_trans, self.word_pos_ids], initializer=0.0)
def recurrence_all_path(prev, obs):
tags_score = obs[0]
word_pos_ = obs[1] + 1
tags_score_slice = tags_score[0:word_pos_,:]
s_len = tf.shape(tags_score_slice)[0]
obvs = tf.concat([tags_score_slice, small * tf.ones((s_len, 2))], axis=1)
observations = tf.concat([b_s, obvs, e_s], axis=0)
all_paths_scores = forward(observations, transitions)
return tf.reshape(all_paths_scores,[])
all_paths_scores_list = tf.scan(fn=recurrence_all_path, elems=[tags_scores, self.word_pos_ids], initializer=0.0)
cost = - tf.reduce_mean(real_path_score_list - all_paths_scores_list)
# Network parameters
if not crf:
f_score = tf.nn.softmax(tags_scores)
else:
def recurrence_predict(prev, obs):
tags_score = obs[0]
word_pos_ = obs[1] + 1
tags_score_slice = tags_score[0:word_pos_,:]
s_len = tf.shape(tags_score_slice)[0]
obvs = tf.concat([tags_score_slice, small * tf.ones((s_len, 2))], axis=1)
observations = tf.concat([b_s, obvs, e_s], axis=0)
all_paths_scores = forward(observations, transitions, viterbi=True, return_alpha=False, return_best_sequence=True)
all_paths_scores = tf.concat([all_paths_scores, tf.zeros([tf.shape(tags_score)[0]-s_len], tf.int32)], axis=0)
return all_paths_scores
f_score = tf.scan(fn=recurrence_predict, elems=[tags_scores, self.word_pos_ids], initializer=tf.zeros([tf.shape(tags_scores)[1]+2], tf.int32))
# Optimization
tvars = tf.trainable_variables()
grads = tf.gradients(cost, tvars)
if clip_norm > 0:
grads, _ = tf.clip_by_global_norm(grads, clip_norm)
if lr_method == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(lr_rate)
elif lr_method == 'adagrad':
optimizer = tf.train.AdagradOptimizer(lr_rate)
elif lr_method == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(lr_rate)
elif lr_method == 'adam':
optimizer = tf.train.AdamOptimizer(lr_rate)
elif lr_method == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(lr_rate)
else:
raise("Not implemented learning method: %s" % lr_method)
train_op = optimizer.apply_gradients(zip(grads, tvars))
return cost, f_score, train_op
|
[
"tensorflow.shape",
"tensorflow.transpose",
"nn.forward",
"tensorflow.gradients",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.array",
"nn.LSTM",
"tensorflow.nn.dropout",
"tensorflow.reverse_sequence",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.scan",
"utils.shared",
"tensorflow.clip_by_global_norm",
"os.path.exists",
"tensorflow.placeholder",
"tensorflow.concat",
"nn.HiddenLayer",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOptimizer",
"tensorflow.device",
"cPickle.dump",
"cPickle.load",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.train.AdadeltaOptimizer",
"tensorflow.reshape",
"utils.get_name",
"nn.EmbeddingLayer",
"os.makedirs",
"tensorflow.ones",
"tensorflow.train.RMSPropOptimizer",
"os.path.join",
"tensorflow.train.AdagradOptimizer",
"tensorflow.gather_nd"
] |
[((3248, 3309), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""word_ids"""'}), "(tf.int32, shape=[None, None], name='word_ids')\n", (3262, 3309), True, 'import tensorflow as tf\n'), ((3373, 3432), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]', 'name': '"""word_pos_ids"""'}), "(tf.int32, shape=[None], name='word_pos_ids')\n", (3387, 3432), True, 'import tensorflow as tf\n'), ((3483, 3554), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""char_for_ids"""'}), "(tf.int32, shape=[None, None, None], name='char_for_ids')\n", (3497, 3554), True, 'import tensorflow as tf\n'), ((3633, 3704), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""char_rev_ids"""'}), "(tf.int32, shape=[None, None, None], name='char_rev_ids')\n", (3647, 3704), True, 'import tensorflow as tf\n'), ((3783, 3848), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""char_pos_ids"""'}), "(tf.int32, shape=[None, None], name='char_pos_ids')\n", (3797, 3848), True, 'import tensorflow as tf\n'), ((3921, 3981), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]', 'name': '"""tag_ids"""'}), "(tf.int32, shape=[None, None], name='tag_ids')\n", (3935, 3981), True, 'import tensorflow as tf\n'), ((4045, 4116), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""tag_id_trans"""'}), "(tf.int32, shape=[None, None, None], name='tag_id_trans')\n", (4059, 4116), True, 'import tensorflow as tf\n'), ((4185, 4256), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None, None]', 'name': '"""tag_id_index"""'}), "(tf.int32, shape=[None, None, None], name='tag_id_index')\n", (4199, 4256), True, 'import tensorflow as tf\n'), ((6942, 6968), 'tensorflow.concat', 'tf.concat', (['inputs'], {'axis': '(-1)'}), '(inputs, axis=-1)\n', (6951, 6968), True, 'import tensorflow as tf\n'), ((7241, 7310), 'nn.LSTM', 'LSTM', (['input_dim', 'word_lstm_dim'], {'with_batch': '(True)', 'name': '"""word_lstm_for"""'}), "(input_dim, word_lstm_dim, with_batch=True, name='word_lstm_for')\n", (7245, 7310), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((7364, 7433), 'nn.LSTM', 'LSTM', (['input_dim', 'word_lstm_dim'], {'with_batch': '(True)', 'name': '"""word_lstm_rev"""'}), "(input_dim, word_lstm_dim, with_batch=True, name='word_lstm_rev')\n", (7368, 7433), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((7580, 7653), 'tensorflow.transpose', 'tf.transpose', (['word_states_for[1]', '(1, 0, 2)'], {'name': '"""word_lstm_for_h_trans"""'}), "(word_states_for[1], (1, 0, 2), name='word_lstm_for_h_trans')\n", (7592, 7653), True, 'import tensorflow as tf\n'), ((7707, 7777), 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['inputs', 'self.word_pos_ids'], {'seq_dim': '(1)', 'batch_dim': '(0)'}), '(inputs, self.word_pos_ids, seq_dim=1, batch_dim=0)\n', (7726, 7777), True, 'import tensorflow as tf\n'), ((7867, 7940), 'tensorflow.transpose', 'tf.transpose', (['word_states_rev[1]', '(1, 0, 2)'], {'name': '"""word_lstm_rev_h_trans"""'}), "(word_states_rev[1], (1, 0, 2), name='word_lstm_rev_h_trans')\n", (7879, 7940), True, 'import tensorflow as tf\n'), ((7972, 8061), 'tensorflow.reverse_sequence', 'tf.reverse_sequence', (['word_lstm_rev_h_trans', 'self.word_pos_ids'], {'seq_dim': '(1)', 'batch_dim': '(0)'}), '(word_lstm_rev_h_trans, self.word_pos_ids, seq_dim=1,\n batch_dim=0)\n', (7991, 8061), True, 'import tensorflow as tf\n'), ((8425, 8479), 'nn.HiddenLayer', 'HiddenLayer', (['word_lstm_dim', 'n_tags'], {'name': '"""final_layer"""'}), "(word_lstm_dim, n_tags, name='final_layer')\n", (8436, 8479), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((12164, 12188), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (12186, 12188), True, 'import tensorflow as tf\n'), ((12205, 12230), 'tensorflow.gradients', 'tf.gradients', (['cost', 'tvars'], {}), '(cost, tvars)\n', (12217, 12230), True, 'import tensorflow as tf\n'), ((666, 686), 'utils.get_name', 'get_name', (['parameters'], {}), '(parameters)\n', (674, 686), False, 'from utils import shared, get_name\n'), ((741, 777), 'os.path.join', 'os.path.join', (['models_path', 'self.name'], {}), '(models_path, self.name)\n', (753, 777), False, 'import os\n'), ((852, 892), 'os.path.join', 'os.path.join', (['model_path', '"""mappings.pkl"""'], {}), "(model_path, 'mappings.pkl')\n", (864, 892), False, 'import os\n'), ((928, 970), 'os.path.join', 'os.path.join', (['model_path', '"""parameters.pkl"""'], {}), "(model_path, 'parameters.pkl')\n", (940, 970), False, 'import os\n'), ((1455, 1495), 'os.path.join', 'os.path.join', (['model_path', '"""mappings.pkl"""'], {}), "(model_path, 'mappings.pkl')\n", (1467, 1495), False, 'import os\n'), ((1531, 1573), 'os.path.join', 'os.path.join', (['model_path', '"""parameters.pkl"""'], {}), "(model_path, 'parameters.pkl')\n", (1543, 1573), False, 'import os\n'), ((2286, 2311), 'cPickle.dump', 'cPickle.dump', (['mappings', 'f'], {}), '(mappings, f)\n', (2298, 2311), False, 'import cPickle\n'), ((2474, 2489), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (2486, 2489), False, 'import cPickle\n'), ((4834, 4886), 'nn.EmbeddingLayer', 'EmbeddingLayer', (['n_chars', 'char_dim'], {'name': '"""char_layer"""'}), "(n_chars, char_dim, name='char_layer')\n", (4848, 4886), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((4916, 4984), 'nn.LSTM', 'LSTM', (['char_dim', 'char_lstm_dim'], {'with_batch': '(True)', 'name': '"""char_lstm_for"""'}), "(char_dim, char_lstm_dim, with_batch=True, name='char_lstm_for')\n", (4920, 4984), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((5046, 5114), 'nn.LSTM', 'LSTM', (['char_dim', 'char_lstm_dim'], {'with_batch': '(True)', 'name': '"""char_lstm_rev"""'}), "(char_dim, char_lstm_dim, with_batch=True, name='char_lstm_rev')\n", (5050, 5114), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((5367, 5401), 'tensorflow.shape', 'tf.shape', (['char_for_embedding_batch'], {}), '(char_for_embedding_batch)\n', (5375, 5401), True, 'import tensorflow as tf\n'), ((5566, 5665), 'tensorflow.reshape', 'tf.reshape', (['char_for_embedding_batch', '(shape_for[0] * shape_for[1], shape_for[2], shape_for[3])'], {}), '(char_for_embedding_batch, (shape_for[0] * shape_for[1],\n shape_for[2], shape_for[3]))\n', (5576, 5665), True, 'import tensorflow as tf\n'), ((5728, 5762), 'tensorflow.shape', 'tf.shape', (['char_rev_embedding_batch'], {}), '(char_rev_embedding_batch)\n', (5736, 5762), True, 'import tensorflow as tf\n'), ((5796, 5895), 'tensorflow.reshape', 'tf.reshape', (['char_rev_embedding_batch', '(shape_rev[0] * shape_rev[1], shape_rev[2], shape_rev[3])'], {}), '(char_rev_embedding_batch, (shape_rev[0] * shape_rev[1],\n shape_rev[2], shape_rev[3]))\n', (5806, 5895), True, 'import tensorflow as tf\n'), ((6120, 6198), 'tensorflow.transpose', 'tf.transpose', (['char_lstm_for_states[1]', '(1, 0, 2)'], {'name': '"""char_lstm_for_h_trans"""'}), "(char_lstm_for_states[1], (1, 0, 2), name='char_lstm_for_h_trans')\n", (6132, 6198), True, 'import tensorflow as tf\n'), ((6235, 6313), 'tensorflow.transpose', 'tf.transpose', (['char_lstm_rev_states[1]', '(1, 0, 2)'], {'name': '"""char_lstm_rev_h_trans"""'}), "(char_lstm_rev_states[1], (1, 0, 2), name='char_lstm_rev_h_trans')\n", (6247, 6313), True, 'import tensorflow as tf\n'), ((6344, 6422), 'tensorflow.gather_nd', 'tf.gather_nd', (['char_lstm_for_h_trans', 'self.char_pos_ids'], {'name': '"""char_for_output"""'}), "(char_lstm_for_h_trans, self.char_pos_ids, name='char_for_output')\n", (6356, 6422), True, 'import tensorflow as tf\n'), ((6453, 6531), 'tensorflow.gather_nd', 'tf.gather_nd', (['char_lstm_rev_h_trans', 'self.char_pos_ids'], {'name': '"""char_rev_output"""'}), "(char_lstm_rev_h_trans, self.char_pos_ids, name='char_rev_output')\n", (6465, 6531), True, 'import tensorflow as tf\n'), ((6568, 6640), 'tensorflow.reshape', 'tf.reshape', (['char_for_output', '(shape_for[0], shape_for[1], char_lstm_dim)'], {}), '(char_for_output, (shape_for[0], shape_for[1], char_lstm_dim))\n', (6578, 6640), True, 'import tensorflow as tf\n'), ((6677, 6749), 'tensorflow.reshape', 'tf.reshape', (['char_rev_output', '(shape_rev[0], shape_rev[1], char_lstm_dim)'], {}), '(char_rev_output, (shape_rev[0], shape_rev[1], char_lstm_dim))\n', (6687, 6749), True, 'import tensorflow as tf\n'), ((7094, 7128), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputs', '(1 - dropout)'], {}), '(inputs, 1 - dropout)\n', (7107, 7128), True, 'import tensorflow as tf\n'), ((8111, 8175), 'tensorflow.concat', 'tf.concat', (['[word_lstm_for_output, word_lstm_rev_output]'], {'axis': '(-1)'}), '([word_lstm_for_output, word_lstm_rev_output], axis=-1)\n', (8120, 8175), True, 'import tensorflow as tf\n'), ((8200, 8288), 'nn.HiddenLayer', 'HiddenLayer', (['(2 * word_lstm_dim)', 'word_lstm_dim'], {'name': '"""tanh_layer"""', 'activation': '"""tanh"""'}), "(2 * word_lstm_dim, word_lstm_dim, name='tanh_layer', activation\n ='tanh')\n", (8211, 8288), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((8598, 8707), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.tag_ids', 'logits': 'tags_scores', 'name': '"""xentropy"""'}), "(labels=self.tag_ids, logits=\n tags_scores, name='xentropy')\n", (8644, 8707), True, 'import tensorflow as tf\n'), ((8722, 8773), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""xentropy_mean"""'}), "(cross_entropy, name='xentropy_mean')\n", (8736, 8773), True, 'import tensorflow as tf\n'), ((8814, 8861), 'utils.shared', 'shared', (['(n_tags + 2, n_tags + 2)', '"""transitions"""'], {}), "((n_tags + 2, n_tags + 2), 'transitions')\n", (8820, 8861), False, 'from utils import shared, get_name\n'), ((10297, 10429), 'tensorflow.scan', 'tf.scan', ([], {'fn': 'recurrence_real_score', 'elems': '[tags_scores, self.tag_id_index, self.tag_id_trans, self.word_pos_ids]', 'initializer': '(0.0)'}), '(fn=recurrence_real_score, elems=[tags_scores, self.tag_id_index,\n self.tag_id_trans, self.word_pos_ids], initializer=0.0)\n', (10304, 10429), True, 'import tensorflow as tf\n'), ((10995, 11087), 'tensorflow.scan', 'tf.scan', ([], {'fn': 'recurrence_all_path', 'elems': '[tags_scores, self.word_pos_ids]', 'initializer': '(0.0)'}), '(fn=recurrence_all_path, elems=[tags_scores, self.word_pos_ids],\n initializer=0.0)\n', (11002, 11087), True, 'import tensorflow as tf\n'), ((11237, 11263), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['tags_scores'], {}), '(tags_scores)\n', (11250, 11263), True, 'import tensorflow as tf\n'), ((12280, 12320), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'clip_norm'], {}), '(grads, clip_norm)\n', (12302, 12320), True, 'import tensorflow as tf\n'), ((12385, 12427), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12418, 12427), True, 'import tensorflow as tf\n'), ((1056, 1087), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (1070, 1087), False, 'import os\n'), ((1105, 1133), 'os.makedirs', 'os.makedirs', (['self.model_path'], {}), '(self.model_path)\n', (1116, 1133), False, 'import os\n'), ((1248, 1275), 'cPickle.dump', 'cPickle.dump', (['parameters', 'f'], {}), '(parameters, f)\n', (1260, 1275), False, 'import cPickle\n'), ((1725, 1740), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (1737, 1740), False, 'import cPickle\n'), ((4492, 4511), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (4501, 4511), True, 'import tensorflow as tf\n'), ((4542, 4594), 'nn.EmbeddingLayer', 'EmbeddingLayer', (['n_words', 'word_dim'], {'name': '"""word_layer"""'}), "(n_words, word_dim, name='word_layer')\n", (4556, 4594), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((5166, 5185), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (5175, 5185), True, 'import tensorflow as tf\n'), ((10231, 10262), 'tensorflow.reshape', 'tf.reshape', (['real_path_score', '[]'], {}), '(real_path_score, [])\n', (10241, 10262), True, 'import tensorflow as tf\n'), ((10798, 10833), 'tensorflow.concat', 'tf.concat', (['[b_s, obvs, e_s]'], {'axis': '(0)'}), '([b_s, obvs, e_s], axis=0)\n', (10807, 10833), True, 'import tensorflow as tf\n'), ((10869, 10903), 'nn.forward', 'forward', (['observations', 'transitions'], {}), '(observations, transitions)\n', (10876, 10903), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((10927, 10959), 'tensorflow.reshape', 'tf.reshape', (['all_paths_scores', '[]'], {}), '(all_paths_scores, [])\n', (10937, 10959), True, 'import tensorflow as tf\n'), ((11105, 11165), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(real_path_score_list - all_paths_scores_list)'], {}), '(real_path_score_list - all_paths_scores_list)\n', (11119, 11165), True, 'import tensorflow as tf\n'), ((11636, 11671), 'tensorflow.concat', 'tf.concat', (['[b_s, obvs, e_s]'], {'axis': '(0)'}), '([b_s, obvs, e_s], axis=0)\n', (11645, 11671), True, 'import tensorflow as tf\n'), ((11707, 11806), 'nn.forward', 'forward', (['observations', 'transitions'], {'viterbi': '(True)', 'return_alpha': '(False)', 'return_best_sequence': '(True)'}), '(observations, transitions, viterbi=True, return_alpha=False,\n return_best_sequence=True)\n', (11714, 11806), False, 'from nn import HiddenLayer, EmbeddingLayer, LSTM, forward\n'), ((12489, 12523), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12514, 12523), True, 'import tensorflow as tf\n'), ((8906, 8947), 'numpy.array', 'np.array', (['[[small] * n_tags + [0, small]]'], {}), '([[small] * n_tags + [0, small]])\n', (8914, 8947), True, 'import numpy as np\n'), ((8985, 9026), 'numpy.array', 'np.array', (['[[small] * n_tags + [small, 0]]'], {}), '([[small] * n_tags + [small, 0]])\n', (8993, 9026), True, 'import numpy as np\n'), ((10060, 10110), 'tensorflow.gather_nd', 'tf.gather_nd', (['tags_score_slice', 'tag_id_index_slice'], {}), '(tags_score_slice, tag_id_index_slice)\n', (10072, 10110), True, 'import tensorflow as tf\n'), ((10161, 10206), 'tensorflow.gather_nd', 'tf.gather_nd', (['transitions', 'tag_id_trans_slice'], {}), '(transitions, tag_id_trans_slice)\n', (10173, 10206), True, 'import tensorflow as tf\n'), ((10647, 10673), 'tensorflow.shape', 'tf.shape', (['tags_score_slice'], {}), '(tags_score_slice)\n', (10655, 10673), True, 'import tensorflow as tf\n'), ((11485, 11511), 'tensorflow.shape', 'tf.shape', (['tags_score_slice'], {}), '(tags_score_slice)\n', (11493, 11511), True, 'import tensorflow as tf\n'), ((12586, 12621), 'tensorflow.train.AdadeltaOptimizer', 'tf.train.AdadeltaOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12612, 12621), True, 'import tensorflow as tf\n'), ((12680, 12711), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12702, 12711), True, 'import tensorflow as tf\n'), ((10737, 10756), 'tensorflow.ones', 'tf.ones', (['(s_len, 2)'], {}), '((s_len, 2))\n', (10744, 10756), True, 'import tensorflow as tf\n'), ((11575, 11594), 'tensorflow.ones', 'tf.ones', (['(s_len, 2)'], {}), '((s_len, 2))\n', (11582, 11594), True, 'import tensorflow as tf\n'), ((12773, 12807), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (12798, 12807), True, 'import tensorflow as tf\n'), ((12084, 12105), 'tensorflow.shape', 'tf.shape', (['tags_scores'], {}), '(tags_scores)\n', (12092, 12105), True, 'import tensorflow as tf\n'), ((11877, 11897), 'tensorflow.shape', 'tf.shape', (['tags_score'], {}), '(tags_score)\n', (11885, 11897), True, 'import tensorflow as tf\n')]
|
import numpy as np
import json
from collections import Counter
import matplotlib.pyplot as plt
DATASET_DIR = './dataset/tacred/train_mod.json'
with open(DATASET_DIR) as f:
examples = json.load(f)
def plot_counts(data):
counts = Counter(data)
del counts["no_relation"]
labels, values = zip(*counts.items())
indexes = np.arange(len(labels))
width = 1
idx = list(reversed(np.argsort(values)))
indexes_sorted = indexes[idx]
values_sorted = np.array(values)[idx]
labels_sorted = np.array(labels)[idx]
print(values_sorted)
plt.bar(range(len(indexes_sorted)), values_sorted, width)
plt.xticks(indexes_sorted + width * 0.5, labels_sorted, rotation='vertical')
plt.ylabel("Number of examples")
plt.tight_layout()
plt.show()
# relation distribution
print('NUM EXAMPLES', len(examples))
relations = [e['relation'] for e in examples]
print("NUM_UNIQUE_RELATIONS", len(Counter(relations)))
plot_counts(relations)
def plot_counts_sent(data):
plt.hist(sents, range=(0, 100), bins=100)
plt.ylabel("Number of examples")
plt.xlabel("Sentence Length")
plt.show()
# sentence length distribution
sents = [len(e['token']) for e in examples]
plot_counts_sent(sents)
|
[
"matplotlib.pyplot.hist",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"collections.Counter",
"numpy.array",
"numpy.argsort",
"matplotlib.pyplot.tight_layout",
"json.load",
"matplotlib.pyplot.show"
] |
[((190, 202), 'json.load', 'json.load', (['f'], {}), '(f)\n', (199, 202), False, 'import json\n'), ((240, 253), 'collections.Counter', 'Counter', (['data'], {}), '(data)\n', (247, 253), False, 'from collections import Counter\n'), ((634, 710), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(indexes_sorted + width * 0.5)', 'labels_sorted'], {'rotation': '"""vertical"""'}), "(indexes_sorted + width * 0.5, labels_sorted, rotation='vertical')\n", (644, 710), True, 'import matplotlib.pyplot as plt\n'), ((715, 747), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of examples"""'], {}), "('Number of examples')\n", (725, 747), True, 'import matplotlib.pyplot as plt\n'), ((752, 770), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (768, 770), True, 'import matplotlib.pyplot as plt\n'), ((775, 785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (783, 785), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1046), 'matplotlib.pyplot.hist', 'plt.hist', (['sents'], {'range': '(0, 100)', 'bins': '(100)'}), '(sents, range=(0, 100), bins=100)\n', (1013, 1046), True, 'import matplotlib.pyplot as plt\n'), ((1051, 1083), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of examples"""'], {}), "('Number of examples')\n", (1061, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1088, 1117), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sentence Length"""'], {}), "('Sentence Length')\n", (1098, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1130, 1132), True, 'import matplotlib.pyplot as plt\n'), ((478, 494), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (486, 494), True, 'import numpy as np\n'), ((520, 536), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (528, 536), True, 'import numpy as np\n'), ((928, 946), 'collections.Counter', 'Counter', (['relations'], {}), '(relations)\n', (935, 946), False, 'from collections import Counter\n'), ((403, 421), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (413, 421), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 06:10:55 2018
@author: <NAME>
Demo of gradient boosting tree
A very nice reference for gradient boosting
http://homes.cs.washington.edu/~tqchen/pdf/BoostedTree.pdf
LightGBM
https://github.com/Microsoft/LightGBM/tree/master/examples/python-guide
Catboost
https://github.com/catboost/tutorials
Comparative study of different gradient boosting tree
https://towardsdatascience.com/catboost-vs-light-gbm-vs-xgboost-5f93620723db
"""
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import numpy as np
import lightgbm as lgb
import catboost as cb
df_wine = pd.read_csv('../Data/winequality-red.csv', sep=';')
df_shape = df_wine.shape
X, y = df_wine.iloc[:, 0:df_shape[1]-1], df_wine.iloc[:, df_shape[1]-1]
y = y - np.min(y)
X = X.values #covert to numpy array
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=0)
gbt = GradientBoostingClassifier( n_estimators=100, learning_rate=0.1, random_state=1)
gbt.fit(X_train, y_train)
print( "score: {}".format( gbt.score(X_test, y_test) ) )
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_test = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
'num_leaves': 6,
'metric': ('l1', 'l2'),
'verbose': 0
}
print('Starting training...')
# train
evals_result = {}
gbm = lgb.train(params,
lgb_train,
num_boost_round=100,
valid_sets=[lgb_train, lgb_test],
feature_name=['f' + str(i + 1) for i in range(X_train.shape[-1])],
categorical_feature=[11],
evals_result=evals_result,
verbose_eval=10)
print('Plotting feature importances...')
ax = lgb.plot_importance(gbm, max_num_features=5)
plt.show()
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:{}'.format( mean_squared_error(y_test, y_pred) ** 0.5) )
|
[
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"lightgbm.Dataset",
"numpy.min",
"sklearn.ensemble.GradientBoostingClassifier",
"lightgbm.plot_importance",
"matplotlib.pyplot.show"
] |
[((768, 819), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/winequality-red.csv"""'], {'sep': '""";"""'}), "('../Data/winequality-red.csv', sep=';')\n", (779, 819), True, 'import pandas as pd\n'), ((1008, 1061), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, y, test_size=0.2, random_state=0)\n', (1024, 1061), False, 'from sklearn.model_selection import train_test_split\n'), ((1072, 1151), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'n_estimators': '(100)', 'learning_rate': '(0.1)', 'random_state': '(1)'}), '(n_estimators=100, learning_rate=0.1, random_state=1)\n', (1098, 1151), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1279, 1308), 'lightgbm.Dataset', 'lgb.Dataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1290, 1308), True, 'import lightgbm as lgb\n'), ((1320, 1368), 'lightgbm.Dataset', 'lgb.Dataset', (['X_test', 'y_test'], {'reference': 'lgb_train'}), '(X_test, y_test, reference=lgb_train)\n', (1331, 1368), True, 'import lightgbm as lgb\n'), ((1933, 1977), 'lightgbm.plot_importance', 'lgb.plot_importance', (['gbm'], {'max_num_features': '(5)'}), '(gbm, max_num_features=5)\n', (1952, 1977), True, 'import lightgbm as lgb\n'), ((1978, 1988), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1986, 1988), True, 'import matplotlib.pyplot as plt\n'), ((926, 935), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (932, 935), True, 'import numpy as np\n'), ((2115, 2149), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2133, 2149), False, 'from sklearn.metrics import mean_squared_error\n')]
|
import numpy
from scipy.ndimage import gaussian_filter
from skimage.data import binary_blobs
from skimage.util import random_noise
from aydin.it.transforms.fixedpattern import FixedPatternTransform
def add_patterned_noise(image, n):
image = image.copy()
image *= 1 + 0.1 * (numpy.random.rand(n, n) - 0.5)
image += 0.1 * numpy.random.rand(n, n)
# image += 0.1*numpy.random.rand(n)[]
image = random_noise(image, mode="gaussian", var=0.00001, seed=0)
image = random_noise(image, mode="s&p", amount=0.000001, seed=0)
return image
def test_fixed_pattern_real():
n = 128
image = binary_blobs(length=n, seed=1, n_dim=3, volume_fraction=0.01).astype(
numpy.float32
)
image = gaussian_filter(image, sigma=4)
noisy = add_patterned_noise(image, n).astype(numpy.float32)
bs = FixedPatternTransform(sigma=0)
preprocessed = bs.preprocess(noisy)
postprocessed = bs.postprocess(preprocessed)
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(noisy, name='noisy')
# viewer.add_image(preprocessed, name='preprocessed')
# viewer.add_image(postprocessed, name='postprocessed')
assert image.shape == postprocessed.shape
assert image.dtype == postprocessed.dtype
assert numpy.abs(preprocessed - image).mean() < 0.007
assert preprocessed.dtype == postprocessed.dtype
assert numpy.abs(postprocessed - noisy).mean() < 1e-8
# import napari
# with napari.gui_qt():
# viewer = napari.Viewer()
# viewer.add_image(image, name='image')
# viewer.add_image(noisy, name='noisy')
# viewer.add_image(corrected, name='corrected')
|
[
"numpy.abs",
"aydin.it.transforms.fixedpattern.FixedPatternTransform",
"numpy.random.rand",
"skimage.data.binary_blobs",
"skimage.util.random_noise",
"scipy.ndimage.gaussian_filter"
] |
[((413, 468), 'skimage.util.random_noise', 'random_noise', (['image'], {'mode': '"""gaussian"""', 'var': '(1e-05)', 'seed': '(0)'}), "(image, mode='gaussian', var=1e-05, seed=0)\n", (425, 468), False, 'from skimage.util import random_noise\n'), ((483, 536), 'skimage.util.random_noise', 'random_noise', (['image'], {'mode': '"""s&p"""', 'amount': '(1e-06)', 'seed': '(0)'}), "(image, mode='s&p', amount=1e-06, seed=0)\n", (495, 536), False, 'from skimage.util import random_noise\n'), ((724, 755), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['image'], {'sigma': '(4)'}), '(image, sigma=4)\n', (739, 755), False, 'from scipy.ndimage import gaussian_filter\n'), ((830, 860), 'aydin.it.transforms.fixedpattern.FixedPatternTransform', 'FixedPatternTransform', ([], {'sigma': '(0)'}), '(sigma=0)\n', (851, 860), False, 'from aydin.it.transforms.fixedpattern import FixedPatternTransform\n'), ((335, 358), 'numpy.random.rand', 'numpy.random.rand', (['n', 'n'], {}), '(n, n)\n', (352, 358), False, 'import numpy\n'), ((614, 675), 'skimage.data.binary_blobs', 'binary_blobs', ([], {'length': 'n', 'seed': '(1)', 'n_dim': '(3)', 'volume_fraction': '(0.01)'}), '(length=n, seed=1, n_dim=3, volume_fraction=0.01)\n', (626, 675), False, 'from skimage.data import binary_blobs\n'), ((285, 308), 'numpy.random.rand', 'numpy.random.rand', (['n', 'n'], {}), '(n, n)\n', (302, 308), False, 'import numpy\n'), ((1361, 1392), 'numpy.abs', 'numpy.abs', (['(preprocessed - image)'], {}), '(preprocessed - image)\n', (1370, 1392), False, 'import numpy\n'), ((1473, 1505), 'numpy.abs', 'numpy.abs', (['(postprocessed - noisy)'], {}), '(postprocessed - noisy)\n', (1482, 1505), False, 'import numpy\n')]
|
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: <NAME> (<EMAIL>)
"""FFT methods for computing / analyzing frequency response of audio.
This is simply a wrapper around FFT support in numpy.
Initial FFT code inspired from the code posted here:
http://www.raspberrypi.org/phpBB3/viewtopic.php?t=35838&p=454041
Optimizations from work by S<NAME>:
http://www.instructables.com/id/Raspberry-Pi-Spectrum-Analyzer-with-RGB-LED-Strip-/
Third party dependencies:
numpy: for FFT calculation - http://www.numpy.org/
"""
from numpy import sum as npsum
from numpy import abs as npabs
from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros
def calculate_levels(data, chunk_size, sample_rate, frequency_limits, num_bins, input_channels=2):
"""Calculate frequency response for each channel defined in frequency_limits
:param data: decoder.frames(), audio data for fft calculations
:type data: decoder.frames
:param chunk_size: chunk size of audio data
:type chunk_size: int
:param sample_rate: audio file sample rate
:type sample_rate: int
:param frequency_limits: list of frequency_limits
:type frequency_limits: list
:param num_bins: length of gpio to process
:type num_bins: int
:param input_channels: number of audio input channels to process for (default=2)
:type input_channels: int
:return:
:rtype: numpy.array
"""
# create a numpy array, taking just the left channel if stereo
data_stereo = frombuffer(data, dtype=int16)
if input_channels == 2:
# data has 2 bytes per channel
data = empty(len(data) / (2 * input_channels))
# pull out the even values, just using left channel
data[:] = data_stereo[::2]
elif input_channels == 1:
data = data_stereo
# if you take an FFT of a chunk of audio, the edges will look like
# super high frequency cutoffs. Applying a window tapers the edges
# of each end of the chunk down to zero.
data = data * hanning(len(data))
# Apply FFT - real data
fourier = fft.rfft(data)
# Remove last element in array to make it the same size as chunk_size
fourier = delete(fourier, len(fourier) - 1)
# Calculate the power spectrum
power = npabs(fourier) ** 2
matrix = zeros(num_bins, dtype='float64')
for pin in range(num_bins):
# take the log10 of the resulting sum to approximate how human ears
# perceive sound levels
# Get the power array index corresponding to a particular frequency.
idx1 = int(chunk_size * frequency_limits[pin][0] / sample_rate)
idx2 = int(chunk_size * frequency_limits[pin][1] / sample_rate)
# if index1 is the same as index2 the value is an invalid value
# we can fix this by incrementing index2 by 1, This is a temporary fix
# for RuntimeWarning: invalid value encountered in double_scalars
# generated while calculating the standard deviation. This warning
# results in some channels not lighting up during playback.
if idx1 == idx2:
idx2 += 1
npsums = npsum(power[idx1:idx2:1])
# if the sum is 0 lets not take log10, just use 0
# eliminates RuntimeWarning: divide by zero encountered in log10, does not insert -inf
if npsums == 0:
matrix[pin] = 0
else:
matrix[pin] = log10(npsums)
return matrix
|
[
"numpy.abs",
"numpy.log10",
"numpy.fft.rfft",
"numpy.sum",
"numpy.zeros",
"numpy.frombuffer"
] |
[((1559, 1588), 'numpy.frombuffer', 'frombuffer', (['data'], {'dtype': 'int16'}), '(data, dtype=int16)\n', (1569, 1588), False, 'from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros\n'), ((2132, 2146), 'numpy.fft.rfft', 'fft.rfft', (['data'], {}), '(data)\n', (2140, 2146), False, 'from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros\n'), ((2352, 2384), 'numpy.zeros', 'zeros', (['num_bins'], {'dtype': '"""float64"""'}), "(num_bins, dtype='float64')\n", (2357, 2384), False, 'from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros\n'), ((2318, 2332), 'numpy.abs', 'npabs', (['fourier'], {}), '(fourier)\n', (2323, 2332), True, 'from numpy import abs as npabs\n'), ((3207, 3232), 'numpy.sum', 'npsum', (['power[idx1:idx2:1]'], {}), '(power[idx1:idx2:1])\n', (3212, 3232), True, 'from numpy import sum as npsum\n'), ((3487, 3500), 'numpy.log10', 'log10', (['npsums'], {}), '(npsums)\n', (3492, 3500), False, 'from numpy import log10, frombuffer, empty, hanning, fft, delete, int16, zeros\n')]
|
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
default_params = {
'text.usetex': False,
'font.family': 'Times New Roman',
'font.serif': 'Times New Roman'
}
if __name__ == '__main__':
plt.rcParams.update(default_params)
myfont1 = matplotlib.font_manager.FontProperties(fname='C:\\times.ttf', size=14)
myfont2 = matplotlib.font_manager.FontProperties(fname='C:\\times.ttf', size=12)
plt.figure(figsize=(5, 3))
x = np.linspace(0.001, 5, 1000)
y1 = 0.001 * x ** 2 + 0.02 * 1 / x + 0.02
y2 = 0.12 * x ** 2 + 0.04 * 1 / x + 0.06
plt.plot(x, y1, color='b', linestyle='--', label='Training error')
plt.plot(x, y2, color='g', linestyle='-', label='Generalization error')
cx = 0.55
cy = 0.12 * cx ** 2 + 0.04 * 1 / cx + 0.06
plt.plot([cx, cx], [-0.01, cy], color='r', linestyle=':')
plt.plot([-0.01, cx], [cy, cy], color='r', linestyle=':')
plt.text(cx-0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)
plt.arrow(1.6, 0.21, 0.0, 0.12, head_width=0.03, head_length=0.03, shape='full', fc='black', ec='black', linewidth=1)
plt.arrow(1.6, 0.21, 0.0, -0.12, head_width=0.03, head_length=0.03, shape='full', fc='black', ec='black', linewidth=1)
plt.text(1.65, 0.18, 'Generalization gap', fontproperties=myfont2)
plt.legend(loc='upper right', prop=myfont1)
plt.xticks([0])
plt.yticks([])
plt.xlabel('Capacity', fontproperties=myfont1)
plt.ylabel('Error', fontproperties=myfont1)
plt.xlim((-0.01, 2.5))
plt.ylim((-0.01, 1.2))
plt.savefig('gap1.pdf', format='pdf', dpi=900, bbox_inches='tight')
plt.figure(figsize=(5, 3))
x = np.linspace(0.001, 5, 1000)
y1 = 0.005 * x ** 2 + 0.03 * 1 / x + 0.03
y2 = 0.04 * x ** 2 + 0.05 * 1 / x + 0.03
plt.plot(x, y1, color='b', linestyle='--', label='Training error')
plt.plot(x, y2, color='g', linestyle='-', label='Generalization error')
cx = 0.855
cy = 0.04 * cx ** 2 + 0.05 * 1 / cx + 0.03
plt.plot([cx, cx], [-0.01, cy], color='r', linestyle=':')
plt.plot([-0.01, cx], [cy, cy], color='r', linestyle=':')
plt.text(cx-0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)
plt.legend(loc='upper right', prop=myfont1)
plt.xticks([0])
plt.yticks([])
plt.xlabel('Capacity', fontproperties=myfont1)
plt.ylabel('Error', fontproperties=myfont1)
plt.xlim((-0.01, 2.5))
plt.ylim((-0.01, 1.2))
plt.savefig('gap2.pdf', format='pdf', dpi=900, bbox_inches='tight')
|
[
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.arrow",
"matplotlib.pyplot.legend"
] |
[((223, 258), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['default_params'], {}), '(default_params)\n', (242, 258), True, 'import matplotlib.pyplot as plt\n'), ((273, 343), 'matplotlib.font_manager.FontProperties', 'matplotlib.font_manager.FontProperties', ([], {'fname': '"""C:\\\\times.ttf"""', 'size': '(14)'}), "(fname='C:\\\\times.ttf', size=14)\n", (311, 343), False, 'import matplotlib\n'), ((358, 428), 'matplotlib.font_manager.FontProperties', 'matplotlib.font_manager.FontProperties', ([], {'fname': '"""C:\\\\times.ttf"""', 'size': '(12)'}), "(fname='C:\\\\times.ttf', size=12)\n", (396, 428), False, 'import matplotlib\n'), ((433, 459), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 3)'}), '(figsize=(5, 3))\n', (443, 459), True, 'import matplotlib.pyplot as plt\n'), ((468, 495), 'numpy.linspace', 'np.linspace', (['(0.001)', '(5)', '(1000)'], {}), '(0.001, 5, 1000)\n', (479, 495), True, 'import numpy as np\n'), ((591, 657), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'color': '"""b"""', 'linestyle': '"""--"""', 'label': '"""Training error"""'}), "(x, y1, color='b', linestyle='--', label='Training error')\n", (599, 657), True, 'import matplotlib.pyplot as plt\n'), ((662, 733), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'color': '"""g"""', 'linestyle': '"""-"""', 'label': '"""Generalization error"""'}), "(x, y2, color='g', linestyle='-', label='Generalization error')\n", (670, 733), True, 'import matplotlib.pyplot as plt\n'), ((799, 856), 'matplotlib.pyplot.plot', 'plt.plot', (['[cx, cx]', '[-0.01, cy]'], {'color': '"""r"""', 'linestyle': '""":"""'}), "([cx, cx], [-0.01, cy], color='r', linestyle=':')\n", (807, 856), True, 'import matplotlib.pyplot as plt\n'), ((861, 918), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.01, cx]', '[cy, cy]'], {'color': '"""r"""', 'linestyle': '""":"""'}), "([-0.01, cx], [cy, cy], color='r', linestyle=':')\n", (869, 918), True, 'import matplotlib.pyplot as plt\n'), ((923, 992), 'matplotlib.pyplot.text', 'plt.text', (['(cx - 0.3)', '(-0.12)', '"""Optimal capacity"""'], {'fontproperties': 'myfont2'}), "(cx - 0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)\n", (931, 992), True, 'import matplotlib.pyplot as plt\n'), ((995, 1117), 'matplotlib.pyplot.arrow', 'plt.arrow', (['(1.6)', '(0.21)', '(0.0)', '(0.12)'], {'head_width': '(0.03)', 'head_length': '(0.03)', 'shape': '"""full"""', 'fc': '"""black"""', 'ec': '"""black"""', 'linewidth': '(1)'}), "(1.6, 0.21, 0.0, 0.12, head_width=0.03, head_length=0.03, shape=\n 'full', fc='black', ec='black', linewidth=1)\n", (1004, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1117, 1240), 'matplotlib.pyplot.arrow', 'plt.arrow', (['(1.6)', '(0.21)', '(0.0)', '(-0.12)'], {'head_width': '(0.03)', 'head_length': '(0.03)', 'shape': '"""full"""', 'fc': '"""black"""', 'ec': '"""black"""', 'linewidth': '(1)'}), "(1.6, 0.21, 0.0, -0.12, head_width=0.03, head_length=0.03, shape=\n 'full', fc='black', ec='black', linewidth=1)\n", (1126, 1240), True, 'import matplotlib.pyplot as plt\n'), ((1240, 1306), 'matplotlib.pyplot.text', 'plt.text', (['(1.65)', '(0.18)', '"""Generalization gap"""'], {'fontproperties': 'myfont2'}), "(1.65, 0.18, 'Generalization gap', fontproperties=myfont2)\n", (1248, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1311, 1354), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'prop': 'myfont1'}), "(loc='upper right', prop=myfont1)\n", (1321, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1359, 1374), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0]'], {}), '([0])\n', (1369, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1379, 1393), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1389, 1393), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1444), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Capacity"""'], {'fontproperties': 'myfont1'}), "('Capacity', fontproperties=myfont1)\n", (1408, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1449, 1492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {'fontproperties': 'myfont1'}), "('Error', fontproperties=myfont1)\n", (1459, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1497, 1519), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.01, 2.5)'], {}), '((-0.01, 2.5))\n', (1505, 1519), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1546), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.01, 1.2)'], {}), '((-0.01, 1.2))\n', (1532, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1618), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gap1.pdf"""'], {'format': '"""pdf"""', 'dpi': '(900)', 'bbox_inches': '"""tight"""'}), "('gap1.pdf', format='pdf', dpi=900, bbox_inches='tight')\n", (1562, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1624, 1650), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 3)'}), '(figsize=(5, 3))\n', (1634, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1659, 1686), 'numpy.linspace', 'np.linspace', (['(0.001)', '(5)', '(1000)'], {}), '(0.001, 5, 1000)\n', (1670, 1686), True, 'import numpy as np\n'), ((1782, 1848), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {'color': '"""b"""', 'linestyle': '"""--"""', 'label': '"""Training error"""'}), "(x, y1, color='b', linestyle='--', label='Training error')\n", (1790, 1848), True, 'import matplotlib.pyplot as plt\n'), ((1853, 1924), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'color': '"""g"""', 'linestyle': '"""-"""', 'label': '"""Generalization error"""'}), "(x, y2, color='g', linestyle='-', label='Generalization error')\n", (1861, 1924), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2048), 'matplotlib.pyplot.plot', 'plt.plot', (['[cx, cx]', '[-0.01, cy]'], {'color': '"""r"""', 'linestyle': '""":"""'}), "([cx, cx], [-0.01, cy], color='r', linestyle=':')\n", (1999, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2110), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.01, cx]', '[cy, cy]'], {'color': '"""r"""', 'linestyle': '""":"""'}), "([-0.01, cx], [cy, cy], color='r', linestyle=':')\n", (2061, 2110), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2184), 'matplotlib.pyplot.text', 'plt.text', (['(cx - 0.3)', '(-0.12)', '"""Optimal capacity"""'], {'fontproperties': 'myfont2'}), "(cx - 0.3, -0.12, 'Optimal capacity', fontproperties=myfont2)\n", (2123, 2184), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2230), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'prop': 'myfont1'}), "(loc='upper right', prop=myfont1)\n", (2197, 2230), True, 'import matplotlib.pyplot as plt\n'), ((2235, 2250), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0]'], {}), '([0])\n', (2245, 2250), True, 'import matplotlib.pyplot as plt\n'), ((2255, 2269), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2265, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2320), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Capacity"""'], {'fontproperties': 'myfont1'}), "('Capacity', fontproperties=myfont1)\n", (2284, 2320), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2368), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {'fontproperties': 'myfont1'}), "('Error', fontproperties=myfont1)\n", (2335, 2368), True, 'import matplotlib.pyplot as plt\n'), ((2373, 2395), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.01, 2.5)'], {}), '((-0.01, 2.5))\n', (2381, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2400, 2422), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.01, 1.2)'], {}), '((-0.01, 1.2))\n', (2408, 2422), True, 'import matplotlib.pyplot as plt\n'), ((2427, 2494), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gap2.pdf"""'], {'format': '"""pdf"""', 'dpi': '(900)', 'bbox_inches': '"""tight"""'}), "('gap2.pdf', format='pdf', dpi=900, bbox_inches='tight')\n", (2438, 2494), True, 'import matplotlib.pyplot as plt\n')]
|
#
# plot-sine-wave.py
# Produce a PNG file of a sine wave plot
#
# <NAME> | https://butiran.github.io
#
# Execute: py plot-sine-wave.py
# Output: sine-t-<time>.png
#
# 20210212
# 1901 Create this by modifying moving-sine-wave.py from [1].
# 1902 Remove FuncAnimation from matplotlib.animation.
# 1904 Can save as PNG as in [2].
# 1949 Add comments and can show figure, learn Line2D [3].
# 1955 Can set axes label [4].
# 2002 Show grid [5].
# 2011 Use arange but modify [6] from xtics to set_xtics.
# 2021 Add text box [7].
# 2027 Set figure size [8], but in inch?
# 2038 Convert time with certain precision for output [9].
# 2024 Change size for Jekyll blog, hopefully better.
# 2120 Add _varphi to the function wave.
#
# References
# 1. <NAME>, "Animations with Mathplotlib", Towards Data Science, 14 Apr 2019, url https://towardsdatascience.com/animation-with-matplotlib-d96375c5442c [20210212].
# 2. Yann, <NAME>, "Answer to 'matplotlib savefig in jpeg format'", StackOverflow, 01 Aug 2018 at 01:48, url https://stackoverflow.com/a/8827350 [20210212].
# 3. SHUBHAMSINGH10, "Matplotlib.axes.Axes.plot() in Python", GeeksforGeeks, 12 Apr 2020, url https://www.geeksforgeeks.org/matplotlib-axes-axes-plot-in-python/ [20210212].
# 4. <NAME>, "Answer to 'How to set X and Y axis Title in matplotlib.pyplot'", StackOverflow, 08 Jun 2020 at 06:29, url https://stackoverflow.com/a/62256244 [20210212].
# 5. <NAME>, <NAME>, "Answer to 'How do I draw a grid onto a plot in Python?'", StackOverflow, 20 Mar 2017 at 17:42, url https://stackoverflow.com/a/8210686 [20210212].
# 6. unutbu, "Answer to 'Changing the “tick frequency” on x or y axis in matplotlib?'", StackOverflow, 26 Sep 20212 at 19:24, url https://stackoverflow.com/a/12608937 [20210212].
# 7. Anake, "Answer to 'automatically position text box in matplotlib'", StackOverflow, 29 Oct 2015 at 14:59, url https://stackoverflow.com/a/33417697 [20210212].
# 8. iPas, cbare, "Answer to 'How do you change the size of figures drawn with matplotlib?'", StackOverflow, 01 Feb 2015 at 06:21, url https://stackoverflow.com/a/24073700 [20210212].
# 9. HAL 9001, "Answer to 'Convert floating point number to a certain precision, and then copy to string'", StackOverflow, 06 Mar 2019 at 19:57, url https://stackoverflow.com/a/15263885 [20210212].
#
# Import necessary packages
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.offsetbox import AnchoredText
# Define a function representing a sine wave
def swave(x, t):
A = 1.5
_lambda = 1
k = 2 * np.pi / _lambda
T = 1
_omega = 2 * np.pi / T
_varphi = 0
y = A * np.sin(k * x - _omega *t + _varphi)
return y
# Use style
plt.style.use("seaborn-pastel")
# Create figure with certain size in inch
fig = plt.figure(figsize=(2.5, 2.5))
# Set x range
xmin = 0
xmax = 2
xrange = (xmin, xmax)
# Set y range
ymin = -2
ymax = 2
yrange = (ymin, ymax)
# Set x and y axes
ax = plt.axes(xlim=xrange, ylim=yrange)
# Set axes label
ax.set_xlabel("x")
ax.set_ylabel("y")
# Set xtics
dx = 0.5
xtics = np.arange(xmin, xmax + dx, dx)
ax.set_xticks(xtics)
# Set ytics
dy = 1
ytics = np.arange(ymin, ymax + dy, dy)
ax.set_yticks(ytics)
# Get Line2D object representing plotted data
line, = ax.plot([], [], lw=3)
# Show grid or with True
plt.grid()
# Create data
t = 0
x = np.linspace(0, 4, 100)
y = swave(x, t)
line.set_data(x, y)
# Add time information
ts = "{:.2f}".format(t)
atext = AnchoredText("t = " + ts, loc=1)
ax.add_artist(atext)
# Save plot as PNG image
plt.savefig("sine-t-" + ts + ".png")
# Show plot
plt.show()
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.style.use",
"matplotlib.offsetbox.AnchoredText",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.axes",
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((2653, 2684), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-pastel"""'], {}), "('seaborn-pastel')\n", (2666, 2684), True, 'from matplotlib import pyplot as plt\n'), ((2734, 2764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.5, 2.5)'}), '(figsize=(2.5, 2.5))\n', (2744, 2764), True, 'from matplotlib import pyplot as plt\n'), ((2901, 2935), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': 'xrange', 'ylim': 'yrange'}), '(xlim=xrange, ylim=yrange)\n', (2909, 2935), True, 'from matplotlib import pyplot as plt\n'), ((3022, 3052), 'numpy.arange', 'np.arange', (['xmin', '(xmax + dx)', 'dx'], {}), '(xmin, xmax + dx, dx)\n', (3031, 3052), True, 'import numpy as np\n'), ((3102, 3132), 'numpy.arange', 'np.arange', (['ymin', '(ymax + dy)', 'dy'], {}), '(ymin, ymax + dy, dy)\n', (3111, 3132), True, 'import numpy as np\n'), ((3257, 3267), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3265, 3267), True, 'from matplotlib import pyplot as plt\n'), ((3293, 3315), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(100)'], {}), '(0, 4, 100)\n', (3304, 3315), True, 'import numpy as np\n'), ((3408, 3440), 'matplotlib.offsetbox.AnchoredText', 'AnchoredText', (["('t = ' + ts)"], {'loc': '(1)'}), "('t = ' + ts, loc=1)\n", (3420, 3440), False, 'from matplotlib.offsetbox import AnchoredText\n'), ((3488, 3524), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('sine-t-' + ts + '.png')"], {}), "('sine-t-' + ts + '.png')\n", (3499, 3524), True, 'from matplotlib import pyplot as plt\n'), ((3538, 3548), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3546, 3548), True, 'from matplotlib import pyplot as plt\n'), ((2593, 2629), 'numpy.sin', 'np.sin', (['(k * x - _omega * t + _varphi)'], {}), '(k * x - _omega * t + _varphi)\n', (2599, 2629), True, 'import numpy as np\n')]
|
import numpy as np
from nlpaug.model.audio import Audio
class Normalization(Audio):
def manipulate(self, data, method, start_pos, end_pos):
aug_data = data.copy()
if method == 'minmax':
new_data = self._min_max(aug_data[start_pos:end_pos])
elif method == 'max':
new_data = self._max(aug_data[start_pos:end_pos])
elif method == 'standard':
new_data = self._standard(aug_data[start_pos:end_pos])
aug_data[start_pos:end_pos] = new_data
return aug_data
def get_support_methods(self):
return ['minmax', 'max', 'standard']
def _standard(self, data):
return (data - np.mean(data)) / np.std(data)
def _max(self, data):
return data / np.amax(np.abs(data))
def _min_max(self, data):
lower = np.amin(np.abs(data))
return (data - lower) / (np.amax(np.abs(data)) - lower)
|
[
"numpy.abs",
"numpy.mean",
"numpy.std"
] |
[((611, 623), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (617, 623), True, 'import numpy as np\n'), ((732, 744), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (738, 744), True, 'import numpy as np\n'), ((594, 607), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (601, 607), True, 'import numpy as np\n'), ((672, 684), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (678, 684), True, 'import numpy as np\n'), ((781, 793), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (787, 793), True, 'import numpy as np\n')]
|
from __future__ import print_function, division
import numpy as np
weights = np.transpose(np.load('w0.npy'))
print(weights.shape)
feature_names = ["" for i in range(125)]
prev = 0
prev_name = ''
for line in open('feature_names.txt'):
if line.startswith('#'):
continue
words = line.split()
index = int(words[0])
feature_name = words[1][:-1]
feature_type = words[2]
if prev_name != '':
for i in range(prev, index + 1):
if prev + 1 < index:
feature_names[i] = prev_name + '_' + str(i - prev)
else:
feature_names[i] = prev_name
prev = index
prev_name = feature_name
feature_names[-1] = prev_name
print(feature_names, len(feature_names))
sorted_indices = np.argsort(np.absolute(weights), axis=1)
print(sorted_indices[:, 120:124])
|
[
"numpy.absolute",
"numpy.load"
] |
[((92, 109), 'numpy.load', 'np.load', (['"""w0.npy"""'], {}), "('w0.npy')\n", (99, 109), True, 'import numpy as np\n'), ((772, 792), 'numpy.absolute', 'np.absolute', (['weights'], {}), '(weights)\n', (783, 792), True, 'import numpy as np\n')]
|
"""
Generate and save maps for each template.
"""
import random
import numpy as np
from scipy import stats
import healpy as hp
import matplotlib.pyplot as plt
import os
import pickle
from .data_utils import get_fermi_pdf_sampler, masked_to_full
from .utils import multipage, auto_garbage_collect
import ray
import time
import warnings
def generate_template_maps(params, temp_dict, ray_settings, n_example_plots, job_id=0):
"""
Generate simulated template maps for each template (output format: NESTED!)
:param params: DotDict containing the settings (see parameters.py)
:param temp_dict: DotDict containing the templates
:param ray_settings: dictionary containing the settings for ray
:param n_example_plots: number of maps to plot and save for each template (as a quick check)
:param job_id: if running several jobs for the data generation: ID of the current job
"""
start_time = time.time()
# Get settings that will be stored in a separate file together with the maps
t_p = params.mod["models_P"]
t_ps = params.mod["models_PS"]
nside = params.data["nside"]
outer_rad = params.data["outer_rad"]
inner_band = params.data["inner_band"]
mask_type = params.data["mask_type"]
do_fermi_psf = params.data["psf"]
leakage_delta = params.data["leakage_delta"] if do_fermi_psf else 0
if "db" in params.keys():
do_poisson_scatter_p = False if params.db["deactivate_poiss_scatter_for_P"] else True
else:
do_poisson_scatter_p = True
name = params.tt["filename_base"]
n_chunk = params.tt["n_chunk"]
n_sim_per_chunk = params.tt["n_sim_per_chunk"]
poisson_a_is_log = params.tt["poisson_A_is_log"]
add_two_temps_ps = params.tt["add_two_temps_PS"]
output_path = params.gen["template_maps_folder"]
prior_dict = params.tt.priors
save_example_plot = n_example_plots > 0
exp = temp_dict["exp"]
rescale_compressed = temp_dict["rescale_compressed"]
# Set output dtypes
dtype_data = np.uint32 if do_poisson_scatter_p else np.float32 # without Poisson draw, counts are non-integer
dtype_flux_arr = np.float32
# Set a random seed for numpy (using random because numpy duplicates random number generator for multiple processes)
random_seed = random.randint(0, int(2 ** 32 - 1))
np.random.seed(random_seed)
print("Job ID:", job_id, "Random Seed:", random_seed)
# PSF: use Fermi-LAT PSF
if do_fermi_psf:
pdf = get_fermi_pdf_sampler()
else:
pdf = None
# Get the masks
total_mask_neg = temp_dict["mask_ROI_full"] # uncompressed, nest format, contains PS mask if desired
total_mask_neg_safety = temp_dict["mask_safety_full"] # the same for the slightly larger ROI
# Initialise the output dictionary
data_out = dict()
# Create the output folder (if it doesn't exist yet)
os.makedirs(output_path, exist_ok=True)
# Print
print("Starting map generation for '{0}'.".format(params.tt["data_name"]))
print("Number of chunks: {0}, number of simulations per chunk: "
"{1}\n -> {2} maps per model.".format(n_chunk, n_sim_per_chunk, n_chunk * n_sim_per_chunk))
if len(add_two_temps_ps) > 0:
print(" Twice as many maps will be created for", add_two_temps_ps)
# Start with the Poissonian models
for temp in t_p:
print("Starting with Poissonian model '{:}'".format(temp))
t = temp_dict["T_counts"][temp] # exposure-corrected template in counts space
# Get pixels that are not masked
indices_roi = temp_dict["indices_roi"]
# Mask template and compress
t_masked = t * (1 - total_mask_neg)
t_masked_compressed = t_masked[indices_roi]
# Make a subfolder
temp_folder = os.path.join(output_path, temp)
os.makedirs(temp_folder, exist_ok=True)
# For each chunk
for chunk in range(n_chunk):
# Draw the (log) amplitude
a = np.asarray([random.uniform(prior_dict[temp][0], prior_dict[temp][1])
for _ in range(n_sim_per_chunk)])
# Generate the maps: NOTE: exposure-correction is included in the Poissonian templates ("T_counts")
random_draw_fn = np.random.poisson if do_poisson_scatter_p else lambda x: x
if poisson_a_is_log:
sim_maps = np.asarray([random_draw_fn((10.0 ** a[i]) * t_masked_compressed)
for i in range(n_sim_per_chunk)])
else:
sim_maps = np.asarray([random_draw_fn(a[i] * t_masked_compressed)
for i in range(n_sim_per_chunk)])
# Save settings
if chunk == 0 and int(job_id) == 0:
settings_out = dict()
settings_out["T"] = t
settings_out["priors"] = prior_dict[temp]
settings_out["is_log_A"] = poisson_a_is_log
settings_out["exp"] = exp
settings_out["rescale_compressed"] = rescale_compressed
settings_out["indices_roi"] = indices_roi
settings_out["format"] = "NEST"
settings_out["mask_type"] = mask_type
settings_out["outer_rad"] = outer_rad
settings_out["inner_band"] = inner_band
settings_out["leakage_delta"] = leakage_delta
settings_out["nside"] = nside
print(" Writing settings file...")
with open(os.path.join(temp_folder, name + "_settings.pickle"), 'wb') as f:
pickle.dump(settings_out, f)
# Save maps
# The full map can be recovered as
# map_full = np.zeros(npix), map_full[data_out["indices_roi"]] = data_out["val"]
data_out["data"] = sim_maps.astype(dtype_data)
data_out["info"] = dict()
data_out["info"]["A"] = a
with open(os.path.join(temp_folder, name + "_" + str(job_id) + "_" + str(chunk) + ".pickle"), 'wb') as f:
pickle.dump(data_out, f)
# Plot some maps and save
if chunk == 0 and int(job_id) == 0 and save_example_plot:
plt.ioff()
hp.mollview(t_masked, title="Template (exposure-corrected)", nest=True)
hp.mollview(exp, title="Exposure (nside = " + str(nside) + ")", nest=True)
hp.mollview(total_mask_neg, title="Mask (" + str(mask_type) + ")", nest=True)
for i in range(n_example_plots):
hp.mollview(masked_to_full(sim_maps[i, :], indices_roi, nside=nside),
title=int(np.round(sim_maps[i, :].sum())), nest=True)
multipage(os.path.join(output_path, temp + "_examples.pdf"))
plt.close("all")
# Initialise Ray
if t_ps:
ray.init(**ray_settings)
if "num_cpus" in ray_settings.keys():
print("Ray: running on", ray_settings["num_cpus"], "CPUs.")
# Put the large array / objects that are template-independent into the object store
exp_id = ray.put(exp)
pdf_id = ray.put(pdf)
# Define a function for the simulation of the point-source models
@ray.remote
def create_simulated_map(skew_, loc_, scale_, flux_lims_, enforce_upper_flux_, t_, exp_, pdf_, name_,
inds_outside_roi_, size_approx_mean_=10000, flux_log_=False):
from .ps_mc import run
assert np.all(np.isfinite(flux_lims_)), "Flux limits must be finite!"
max_total_flux = flux_lims_[1] if enforce_upper_flux_ else -np.infty
# Draw the desired flux
if flux_log_:
flux_desired = 10 ** np.random.uniform(*flux_lims_)
else:
flux_desired = np.random.uniform(*flux_lims_)
# Calculate the expected value of 10^X
exp_value = (10 ** stats.skewnorm.rvs(skew_, loc=loc_, scale=scale_, size=int(size_approx_mean_))).mean()
# Determine the expected number of sources
n_sources_exp = flux_desired / exp_value
# Draw the observed number of sources from a Poisson distribution
n_sources = np.random.poisson(n_sources_exp)
# Initialise total flux
tot_flux = np.infty
# Draw fluxes until total flux is in valid range
flux_arr_ = []
while tot_flux >= max_total_flux:
flux_arr_ = 10 ** stats.skewnorm.rvs(skew_, loc=loc_, scale=scale_, size=n_sources)
tot_flux = flux_arr_.sum()
if not enforce_upper_flux_:
break
# If total flux > max-total_flux: reduce n_sources
if tot_flux > max_total_flux:
n_sources = int(max(1, int(n_sources // 1.05)))
# Do MC run
map_, n_phot_, flux_arr_out = run(np.asarray(flux_arr_), t_, exp_, pdf_, name_, save=False, getnopsf=True,
getcts=True, upscale_nside=16384, verbose=False, is_nest=True,
inds_outside_roi=inds_outside_roi_, clean_count_list=False)
return map_, n_phot_, flux_arr_out
# Do the point-source models
for temp in t_ps:
print("Starting with point-source model '{:}'".format(temp))
t = temp_dict["T_flux"][temp] # for point-sources: template after REMOVING the exposure correction is used
# Apply slightly larger mask
t_masked = t * (1 - total_mask_neg_safety)
# Correct flux limit priors for larger mask (after simulating the counts, ROI mask will be applied)
flux_corr_fac = t_masked.sum() / (t * (1 - total_mask_neg)).sum()
flux_lims_corr = [None] * 2
for i in range(2):
if prior_dict[temp]["flux_log"]:
flux_lims_corr[i] = prior_dict[temp]["flux_lims"][i] + np.log10(flux_corr_fac)
else:
flux_lims_corr[i] = prior_dict[temp]["flux_lims"][i] * flux_corr_fac
# Get indices where PSs are sampled although they lie outside ROI
inds_ps_outside_roi = set(np.setdiff1d(temp_dict["indices_safety"], temp_dict["indices_roi"]))
# Template needs to be normalised to sum up to unity for the new implementation!
# Might need to do this twice because of rounding errors
t_final = t_masked / t_masked.sum()
while t_final.sum() > 1.0:
t_final /= t_final.sum()
if t_final.sum() != 1.0:
warnings.warn("Template sum is not exactly 1, but {:}!".format(t_final.sum()))
# Make a subfolder
temp_folder = os.path.join(output_path, temp)
os.makedirs(temp_folder, exist_ok=True)
# Put the large arrays / objects to the object store
t_final_id = ray.put(t_final)
inds_ps_outside_roi_id = ray.put(inds_ps_outside_roi)
# For each chunk
this_n_chunk = 2 * n_chunk if temp in add_two_temps_ps else n_chunk
for chunk in range(this_n_chunk):
print(" Starting with chunk", chunk)
# Draw the parameters
mean_draw = np.random.uniform(*prior_dict[temp]["mean_exp"], size=n_sim_per_chunk)
var_draw = prior_dict[temp]["var_exp"] * np.random.chisquare(1, size=n_sim_per_chunk)
skew_draw = np.random.normal(loc=0, scale=prior_dict[temp]["skew_std"], size=n_sim_per_chunk)
# This code is for debugging without ray
# sim_maps, n_phot, flux_arr = create_simulated_map(skew_draw[0], mean_draw[0], np.sqrt(var_draw[0]),
# flux_lims_corr,
# prior_dict[temp]["enforce_upper_flux"],
# t_final, exp, pdf, "map_" + temp,
# flux_log_=prior_dict[temp]["flux_log"],
# inds_outside_roi_=inds_ps_outside_roi)
sim_maps, n_phot, flux_arr = map(list, zip(*ray.get(
[create_simulated_map.remote(skew_draw[i_PS], mean_draw[i_PS], np.sqrt(var_draw[i_PS]),
flux_lims_corr, prior_dict[temp]["enforce_upper_flux"],
t_final_id, exp_id, pdf_id, "map_" + temp,
flux_log_=prior_dict[temp]["flux_log"],
inds_outside_roi_=inds_ps_outside_roi_id)
for i_PS in range(n_sim_per_chunk)])))
# Apply ROI mask again and cut off counts outside ROI
sim_maps = np.asarray(sim_maps) * np.expand_dims((1 - total_mask_neg), [0, -1])
# The following assert is for the scenario where there is NO leakage INTO the ROI, and counts leaking
# OUT OF the ROI are deleted from photon-count list n_phot
# assert np.all(sim_maps[:, :, 0].sum(1) == [n_phot[i].sum() for i in range(n_sim_per_chunk)]), \
# "Photons counts in maps and n_phot lists are not consistent! Aborting..."
# The following assert is for the scenario where there is leakage INTO and OUT OF the ROI, and n_phot
# contains ALL the counts (and only those counts) from PSs within the ROI.
assert np.all(sim_maps[:, :, 1].sum(1) == [n_phot[i].sum() for i in range(n_sim_per_chunk)]), \
"Photons counts in maps and n_phot lists are not consistent! Aborting..."
# Collect garbage
auto_garbage_collect()
# Save settings
if chunk == 0 and int(job_id) == 0:
settings_out = dict()
settings_out["T"] = t
settings_out["priors"] = prior_dict[temp]
settings_out["exp"] = exp # exposure
settings_out["rescale_compressed"] = rescale_compressed
settings_out["max_NP_sources"] = np.nan # not set here
settings_out["indices_roi"] = np.argwhere(1 - total_mask_neg).flatten()
settings_out["format"] = "NEST"
settings_out["mask_type"] = mask_type
settings_out["outer_rad"] = outer_rad
settings_out["inner_band"] = inner_band
settings_out["leakage_delta"] = leakage_delta
settings_out["nside"] = nside
print(" Writing settings file...")
with open(os.path.join(temp_folder, name + "_settings.pickle"), 'wb') as f:
pickle.dump(settings_out, f)
# Save maps
data_out["data"] = (sim_maps[:, temp_dict["indices_roi"], :]).astype(dtype_data)
data_out["n_phot"] = n_phot
data_out["flux_arr"] = [np.asarray(f, dtype=dtype_flux_arr) for f in flux_arr]
data_out["info"] = dict()
data_out["info"]["tot_flux"] = np.asarray([np.sum(f) for f in flux_arr])
data_out["info"]["means"] = mean_draw
data_out["info"]["vars"] = var_draw
data_out["info"]["skew"] = skew_draw
with open(os.path.join(temp_folder, name + "_"
+ str(job_id) + "_" + str(chunk) + ".pickle"), 'wb') as f:
pickle.dump(data_out, f)
# Plot some maps and save
if chunk == 0 and int(job_id) == 0 and save_example_plot:
plt.ioff()
hp.mollview(t * (1 - total_mask_neg), title="Template (not exposure-corrected)", nest=True)
hp.mollview(exp, title="Exposure (nside = " + str(nside) + ")", nest=True)
hp.mollview(total_mask_neg, title="Mask (" + str(mask_type) + ")", nest=True)
hp.mollview(total_mask_neg_safety, title="Extended mask (allowing leakage into ROI)", nest=True)
for i in range(n_example_plots):
hp.mollview(sim_maps[i, :, 0], title=int(np.round(sim_maps[i, :, 0].sum())), nest=True)
multipage(os.path.join(output_path, temp + "_examples.pdf"))
plt.close("all")
dash = 80 * "="
print(dash)
print("Done! Computation took {0} seconds.".format(time.time() - start_time))
print(dash)
# Loading pickle file e.g.: data = pickle.load( open( "./data/<...>.pickle", "rb" ) )
|
[
"numpy.log10",
"numpy.sqrt",
"healpy.mollview",
"numpy.isfinite",
"ray.init",
"numpy.random.chisquare",
"numpy.random.poisson",
"numpy.asarray",
"matplotlib.pyplot.close",
"numpy.random.seed",
"numpy.random.normal",
"random.uniform",
"matplotlib.pyplot.ioff",
"time.time",
"pickle.dump",
"os.makedirs",
"os.path.join",
"numpy.sum",
"scipy.stats.skewnorm.rvs",
"numpy.setdiff1d",
"numpy.argwhere",
"numpy.expand_dims",
"numpy.random.uniform",
"ray.put"
] |
[((919, 930), 'time.time', 'time.time', ([], {}), '()\n', (928, 930), False, 'import time\n'), ((2321, 2348), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (2335, 2348), True, 'import numpy as np\n'), ((2874, 2913), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (2885, 2913), False, 'import os\n'), ((3774, 3805), 'os.path.join', 'os.path.join', (['output_path', 'temp'], {}), '(output_path, temp)\n', (3786, 3805), False, 'import os\n'), ((3814, 3853), 'os.makedirs', 'os.makedirs', (['temp_folder'], {'exist_ok': '(True)'}), '(temp_folder, exist_ok=True)\n', (3825, 3853), False, 'import os\n'), ((6880, 6904), 'ray.init', 'ray.init', ([], {}), '(**ray_settings)\n', (6888, 6904), False, 'import ray\n'), ((7133, 7145), 'ray.put', 'ray.put', (['exp'], {}), '(exp)\n', (7140, 7145), False, 'import ray\n'), ((7163, 7175), 'ray.put', 'ray.put', (['pdf'], {}), '(pdf)\n', (7170, 7175), False, 'import ray\n'), ((8264, 8296), 'numpy.random.poisson', 'np.random.poisson', (['n_sources_exp'], {}), '(n_sources_exp)\n', (8281, 8296), True, 'import numpy as np\n'), ((10842, 10873), 'os.path.join', 'os.path.join', (['output_path', 'temp'], {}), '(output_path, temp)\n', (10854, 10873), False, 'import os\n'), ((10886, 10925), 'os.makedirs', 'os.makedirs', (['temp_folder'], {'exist_ok': '(True)'}), '(temp_folder, exist_ok=True)\n', (10897, 10925), False, 'import os\n'), ((11017, 11033), 'ray.put', 'ray.put', (['t_final'], {}), '(t_final)\n', (11024, 11033), False, 'import ray\n'), ((11071, 11099), 'ray.put', 'ray.put', (['inds_ps_outside_roi'], {}), '(inds_ps_outside_roi)\n', (11078, 11099), False, 'import ray\n'), ((6067, 6091), 'pickle.dump', 'pickle.dump', (['data_out', 'f'], {}), '(data_out, f)\n', (6078, 6091), False, 'import pickle\n'), ((6217, 6227), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (6225, 6227), True, 'import matplotlib.pyplot as plt\n'), ((6244, 6315), 'healpy.mollview', 'hp.mollview', (['t_masked'], {'title': '"""Template (exposure-corrected)"""', 'nest': '(True)'}), "(t_masked, title='Template (exposure-corrected)', nest=True)\n", (6255, 6315), True, 'import healpy as hp\n'), ((6820, 6836), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6829, 6836), True, 'import matplotlib.pyplot as plt\n'), ((7537, 7560), 'numpy.isfinite', 'np.isfinite', (['flux_lims_'], {}), '(flux_lims_)\n', (7548, 7560), True, 'import numpy as np\n'), ((7854, 7884), 'numpy.random.uniform', 'np.random.uniform', (['*flux_lims_'], {}), '(*flux_lims_)\n', (7871, 7884), True, 'import numpy as np\n'), ((8964, 8985), 'numpy.asarray', 'np.asarray', (['flux_arr_'], {}), '(flux_arr_)\n', (8974, 8985), True, 'import numpy as np\n'), ((10292, 10359), 'numpy.setdiff1d', 'np.setdiff1d', (["temp_dict['indices_safety']", "temp_dict['indices_roi']"], {}), "(temp_dict['indices_safety'], temp_dict['indices_roi'])\n", (10304, 10359), True, 'import numpy as np\n'), ((11377, 11447), 'numpy.random.uniform', 'np.random.uniform', (["*prior_dict[temp]['mean_exp']"], {'size': 'n_sim_per_chunk'}), "(*prior_dict[temp]['mean_exp'], size=n_sim_per_chunk)\n", (11394, 11447), True, 'import numpy as np\n'), ((11578, 11664), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': "prior_dict[temp]['skew_std']", 'size': 'n_sim_per_chunk'}), "(loc=0, scale=prior_dict[temp]['skew_std'], size=\n n_sim_per_chunk)\n", (11594, 11664), True, 'import numpy as np\n'), ((16829, 16840), 'time.time', 'time.time', ([], {}), '()\n', (16838, 16840), False, 'import time\n'), ((3985, 4041), 'random.uniform', 'random.uniform', (['prior_dict[temp][0]', 'prior_dict[temp][1]'], {}), '(prior_dict[temp][0], prior_dict[temp][1])\n', (3999, 4041), False, 'import random\n'), ((5604, 5632), 'pickle.dump', 'pickle.dump', (['settings_out', 'f'], {}), '(settings_out, f)\n', (5615, 5632), False, 'import pickle\n'), ((6753, 6802), 'os.path.join', 'os.path.join', (['output_path', "(temp + '_examples.pdf')"], {}), "(output_path, temp + '_examples.pdf')\n", (6765, 6802), False, 'import os\n'), ((7774, 7804), 'numpy.random.uniform', 'np.random.uniform', (['*flux_lims_'], {}), '(*flux_lims_)\n', (7791, 7804), True, 'import numpy as np\n'), ((8533, 8598), 'scipy.stats.skewnorm.rvs', 'stats.skewnorm.rvs', (['skew_'], {'loc': 'loc_', 'scale': 'scale_', 'size': 'n_sources'}), '(skew_, loc=loc_, scale=scale_, size=n_sources)\n', (8551, 8598), False, 'from scipy import stats\n'), ((11505, 11549), 'numpy.random.chisquare', 'np.random.chisquare', (['(1)'], {'size': 'n_sim_per_chunk'}), '(1, size=n_sim_per_chunk)\n', (11524, 11549), True, 'import numpy as np\n'), ((13058, 13078), 'numpy.asarray', 'np.asarray', (['sim_maps'], {}), '(sim_maps)\n', (13068, 13078), True, 'import numpy as np\n'), ((13081, 13124), 'numpy.expand_dims', 'np.expand_dims', (['(1 - total_mask_neg)', '[0, -1]'], {}), '(1 - total_mask_neg, [0, -1])\n', (13095, 13124), True, 'import numpy as np\n'), ((15318, 15353), 'numpy.asarray', 'np.asarray', (['f'], {'dtype': 'dtype_flux_arr'}), '(f, dtype=dtype_flux_arr)\n', (15328, 15353), True, 'import numpy as np\n'), ((15858, 15882), 'pickle.dump', 'pickle.dump', (['data_out', 'f'], {}), '(data_out, f)\n', (15869, 15882), False, 'import pickle\n'), ((16020, 16030), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (16028, 16030), True, 'import matplotlib.pyplot as plt\n'), ((16051, 16147), 'healpy.mollview', 'hp.mollview', (['(t * (1 - total_mask_neg))'], {'title': '"""Template (not exposure-corrected)"""', 'nest': '(True)'}), "(t * (1 - total_mask_neg), title=\n 'Template (not exposure-corrected)', nest=True)\n", (16062, 16147), True, 'import healpy as hp\n'), ((16356, 16457), 'healpy.mollview', 'hp.mollview', (['total_mask_neg_safety'], {'title': '"""Extended mask (allowing leakage into ROI)"""', 'nest': '(True)'}), "(total_mask_neg_safety, title=\n 'Extended mask (allowing leakage into ROI)', nest=True)\n", (16367, 16457), True, 'import healpy as hp\n'), ((16720, 16736), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16729, 16736), True, 'import matplotlib.pyplot as plt\n'), ((5518, 5570), 'os.path.join', 'os.path.join', (['temp_folder', "(name + '_settings.pickle')"], {}), "(temp_folder, name + '_settings.pickle')\n", (5530, 5570), False, 'import os\n'), ((10040, 10063), 'numpy.log10', 'np.log10', (['flux_corr_fac'], {}), '(flux_corr_fac)\n', (10048, 10063), True, 'import numpy as np\n'), ((15079, 15107), 'pickle.dump', 'pickle.dump', (['settings_out', 'f'], {}), '(settings_out, f)\n', (15090, 15107), False, 'import pickle\n'), ((15474, 15483), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (15480, 15483), True, 'import numpy as np\n'), ((16649, 16698), 'os.path.join', 'os.path.join', (['output_path', "(temp + '_examples.pdf')"], {}), "(output_path, temp + '_examples.pdf')\n", (16661, 16698), False, 'import os\n'), ((14516, 14547), 'numpy.argwhere', 'np.argwhere', (['(1 - total_mask_neg)'], {}), '(1 - total_mask_neg)\n', (14527, 14547), True, 'import numpy as np\n'), ((14989, 15041), 'os.path.join', 'os.path.join', (['temp_folder', "(name + '_settings.pickle')"], {}), "(temp_folder, name + '_settings.pickle')\n", (15001, 15041), False, 'import os\n'), ((12498, 12521), 'numpy.sqrt', 'np.sqrt', (['var_draw[i_PS]'], {}), '(var_draw[i_PS])\n', (12505, 12521), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.testing import assert_allclose
from robogym.envs.rearrange.common.utils import (
get_mesh_bounding_box,
make_block,
make_blocks_and_targets,
)
from robogym.envs.rearrange.simulation.composer import RandomMeshComposer
from robogym.mujoco.mujoco_xml import MujocoXML
def _get_default_xml():
xml_source = """
<mujoco>
<asset>
<material name="block_mat" specular="0" shininess="0.5" reflectance="0" rgba="1 0 0 1"></material>
</asset>
</mujoco>
"""
xml = MujocoXML.from_string(xml_source)
return xml
def test_mesh_composer():
for path in [
None,
RandomMeshComposer.GEOM_ASSET_PATH,
RandomMeshComposer.GEOM_ASSET_PATH,
]:
composer = RandomMeshComposer(mesh_path=path)
for num_geoms in range(1, 6):
xml = _get_default_xml()
composer.reset()
xml.append(composer.sample("object0", num_geoms, object_size=0.05))
sim = xml.build()
assert len(sim.model.geom_names) == num_geoms
pos, size = get_mesh_bounding_box(sim, "object0")
assert np.isclose(np.max(size), 0.05)
pos2, size2 = composer.get_bounding_box(sim, "object0")
assert np.allclose(pos, pos2)
assert np.allclose(size, size2)
def test_block_object():
xml = _get_default_xml()
xml.append(make_block("object0", object_size=np.ones(3) * 0.05))
sim = xml.build()
assert len(sim.model.geom_size) == 1
assert_allclose(sim.model.geom_size, 0.05)
def test_blocks_and_targets():
xml = _get_default_xml()
for obj_xml, target_xml in make_blocks_and_targets(num_objects=5, block_size=0.05):
xml.append(obj_xml)
xml.append(target_xml)
sim = xml.build()
assert len(sim.model.geom_size) == 10
assert_allclose(sim.model.geom_size, 0.05)
|
[
"numpy.allclose",
"numpy.ones",
"robogym.envs.rearrange.common.utils.make_blocks_and_targets",
"numpy.testing.assert_allclose",
"robogym.envs.rearrange.simulation.composer.RandomMeshComposer",
"robogym.envs.rearrange.common.utils.get_mesh_bounding_box",
"numpy.max",
"robogym.mujoco.mujoco_xml.MujocoXML.from_string"
] |
[((536, 569), 'robogym.mujoco.mujoco_xml.MujocoXML.from_string', 'MujocoXML.from_string', (['xml_source'], {}), '(xml_source)\n', (557, 569), False, 'from robogym.mujoco.mujoco_xml import MujocoXML\n'), ((1524, 1566), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.model.geom_size', '(0.05)'], {}), '(sim.model.geom_size, 0.05)\n', (1539, 1566), False, 'from numpy.testing import assert_allclose\n'), ((1660, 1715), 'robogym.envs.rearrange.common.utils.make_blocks_and_targets', 'make_blocks_and_targets', ([], {'num_objects': '(5)', 'block_size': '(0.05)'}), '(num_objects=5, block_size=0.05)\n', (1683, 1715), False, 'from robogym.envs.rearrange.common.utils import get_mesh_bounding_box, make_block, make_blocks_and_targets\n'), ((1845, 1887), 'numpy.testing.assert_allclose', 'assert_allclose', (['sim.model.geom_size', '(0.05)'], {}), '(sim.model.geom_size, 0.05)\n', (1860, 1887), False, 'from numpy.testing import assert_allclose\n'), ((759, 793), 'robogym.envs.rearrange.simulation.composer.RandomMeshComposer', 'RandomMeshComposer', ([], {'mesh_path': 'path'}), '(mesh_path=path)\n', (777, 793), False, 'from robogym.envs.rearrange.simulation.composer import RandomMeshComposer\n'), ((1090, 1127), 'robogym.envs.rearrange.common.utils.get_mesh_bounding_box', 'get_mesh_bounding_box', (['sim', '"""object0"""'], {}), "(sim, 'object0')\n", (1111, 1127), False, 'from robogym.envs.rearrange.common.utils import get_mesh_bounding_box, make_block, make_blocks_and_targets\n'), ((1265, 1287), 'numpy.allclose', 'np.allclose', (['pos', 'pos2'], {}), '(pos, pos2)\n', (1276, 1287), True, 'import numpy as np\n'), ((1307, 1331), 'numpy.allclose', 'np.allclose', (['size', 'size2'], {}), '(size, size2)\n', (1318, 1331), True, 'import numpy as np\n'), ((1158, 1170), 'numpy.max', 'np.max', (['size'], {}), '(size)\n', (1164, 1170), True, 'import numpy as np\n'), ((1437, 1447), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1444, 1447), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 9 09:42:00 2021
@author: barraly
"""
import sabs_pkpd
import numpy as np
import matplotlib.pyplot as plt
import os
# Select the folder in which this repo is downloaded in the line below
os.chdir('The/location/of/the/root/folder/of/this/repo')
# In[Load the model]
filename = './Models/Ohara CiPA - analytical voltage.mmt'
s = sabs_pkpd.load_model.load_simulation_from_mmt(filename)
s.set_tolerance(1e-08, 1e-08)
default_state = s.state()
# Save the initial conditions published in OHara CiPA model
Ohara_init_conds = default_state.copy()
# In[Defin the needed functions]
# Define the functions to make sure there is consistency between the initial conditions
def G0_calc(Ki = 144.65559, Kss = 144.65556, Nai = 7.268, Nass = 7.26809,
Cai = 8.6e-5, Cansr = 1.61957, Cajsr = 1.571234014, Cass = 8.49e-5,
V=-88, extraK = 5.4, extraNa = 140, extraCa = 1.8):
tot_cai = Cai * (1 + 0.05 / (Cai + 0.00238) + 0.07/(Cai + 0.0005))
tot_cass = Cass * (1 + 0.047 / (Cass + 0.00087) + 1.124/(Cass + 0.0087))
tot_cajsr = Cajsr * (1 + 10 / (Cajsr + 0.8))
return V / (96485 * 2.583592e-05) * 0.0001533576 - (Ki + Kss * 0.029411764705882353 + Nai + Nass * 0.029411764705882353 + 2*(tot_cai + tot_cass * 0.029411764705882353 + Cansr * 0.08117647059 + tot_cajsr * 0.007059) - extraK - extraNa - 2 * extraCa)
def Ki_calc(G0, Nai = 7.268, Nass = 7.26809, Cai = 8.6e-5, Cansr = 1.61957, Cajsr = 1.571234014, Cass = 8.49e-5, V=-88, extraK = 5.4, extraNa = 140, extraCa = 1.8):
tot_cai = Cai * (1 + 0.05 / (Cai + 0.00238) + 0.07/(Cai + 0.0005))
tot_cass = Cass * (1 + 0.047 / (Cass + 0.00087) + 1.124/(Cass + 0.0087))
tot_cajsr = Cajsr * (1 + 10 / (Cajsr + 0.8))
return (V / (96485 * 2.583592e-05) * 0.0001533576 + extraK + extraNa + 2 * extraCa - G0 - Nai - Nass * 0.029411764705882353 - 2*(tot_cai + tot_cass * 0.029411764705882353 + Cansr * 0.08117647059 + tot_cajsr * 0.007059)) / 1.029411764705882353
def compute(Gamma_0):
# Reinitialise the myokit.Simulation
s.reset()
# Set the initial conditions for Ki and Kss so that the initial conditions match with the value of Gamma_0
initial_state = default_state.copy()
initial_K = Ki_calc(Gamma_0,
Nai = default_state[1],
Nass = default_state[2],
Cai = default_state[5],
Cansr = default_state[7],
Cajsr = default_state[8],
Cass = default_state[6])
initial_state[3] = initial_K
initial_state[4] = initial_K
s.set_state(initial_state)
# Set the value of Gamma_0 in the myokit.Simulation
s.set_constant('membrane.c0', Gamma_0)
# Record the action potential at the limit cycle
s.pre(2000000)
out = s.run(1000, log_interval = 1)
print('Potassium at steady-state: ' + str(np.round(out['intracellular_ions.ki'][-1], decimals = 2)))
return np.array(out['membrane.V'])
# In[Reuse the fitting instructions]
# Define the time points on which to read the voltage
time_points = np.linspace(0, 999, 1000)
# Define the fitted parameters and initial point
parameters_to_fit = ['ical.rescale', 'ikr.rescale', 'IKs.rescale', 'INa.rescale', 'INaL.rescale']
true_values = np.array([1, 1, 1, 1, 1])
# In[Compute the fitted data]
# Set the parameters values
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, true_values[p])
# Run the model with the published original initial conditions and the Gamma_0 value associated with it
Gamma_0_for_fitting = -7.80116
data_to_fit = compute(Gamma_0_for_fitting)
# For validation with 50% IKr inhibition
s.set_constant(parameters_to_fit[1], 0.5 * true_values[1])
validation_data = compute(Gamma_0_for_fitting)
# In[Report the results from the fitting with Ohara initial conditions]
default_state = Ohara_init_conds.copy()
found_parameters = [1.000, 1.000, 1.000, 1.000, 1.000]
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, found_parameters[p])
fitting_with_Ohara_ICs = compute(Gamma_0_for_fitting)
print('Gamma_0 for fitting : ' + str(Gamma_0_for_fitting))
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_Ohara_ICs, time_points= time_points, upstroke_time = 50)
print('APD_90 at baseline : ' + str(APD90))
# Predict IKr block AP
s.set_constant(parameters_to_fit[1], 0.5 * found_parameters[1])
fitting_with_Ohara_ICs_Kr_blocked = compute(Gamma_0_for_fitting)
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_Ohara_ICs_Kr_blocked, time_points= time_points, upstroke_time = 50)
print('APD_90 after 50% IKr block : ' + str(APD90))
# In[Report the results from the fitting with TT06 initial conditions]
default_state[1] = 10.134
default_state[2] = 10.134
default_state[3] = 135.369
default_state[4] = 135.369
default_state[5] = 1.058e-04
default_state[6] = 2.142e-04
default_state[7] = 3.556
default_state[8] = 3.556
initial_voltage = -84.936
Gamma_0_for_fitting = G0_calc(Nai = default_state[1],
Nass = default_state[2],
Ki = default_state[3],
Kss = default_state[4],
Cai = default_state[5],
Cass = default_state[6],
Cansr = default_state[7],
Cajsr = default_state[8],
V=initial_voltage)
found_parameters = [0.8278844, 1.13276793, 0.74292672, 1.08754243, 1.4459109]
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, found_parameters[p])
fitting_with_TT06_ICs = compute(Gamma_0_for_fitting)
print('Gamma_0 for fitting : ' + str(Gamma_0_for_fitting))
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_TT06_ICs, time_points= time_points, upstroke_time = 50)
print('APD_90 at baseline : ' + str(APD90))
# Predict IKr block AP
s.set_constant(parameters_to_fit[1], 0.5 * found_parameters[1])
fitting_with_TT06_ICs_Kr_blocked = compute(Gamma_0_for_fitting)
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_TT06_ICs_Kr_blocked, time_points= time_points, upstroke_time = 50)
print('APD_90 after 50% IKr block : ' + str(APD90))
# In[Report the results from the fitting with TT06 initial conditions and Gamma_0]
default_state = Ohara_init_conds.copy()
found_parameters = [0.9999947, 0.99999936, 0.99995396, 0.999993485, 0.9999772, -7.801077]
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, found_parameters[p])
fitting_with_TT06_ICs_gamma_0 = compute(found_parameters[-1])
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_TT06_ICs_gamma_0, time_points= time_points, upstroke_time = 50)
print('APD_90 at baseline : ' + str(APD90))
# Predict IKr block AP
s.set_constant(parameters_to_fit[1], 0.5 * found_parameters[1])
fitting_with_TT06_ICs_gamma_0_Kr_blocked = compute(found_parameters[-1])
APD90 = sabs_pkpd.cardiac.compute_APD(AP = fitting_with_TT06_ICs_gamma_0_Kr_blocked, time_points= time_points, upstroke_time = 50)
print('APD_90 after 50% IKr block : ' + str(APD90))
# In[Plot the comparison]
def place_caption_label(ax, label, loc='upper left', fontsize=35):
from matplotlib.offsetbox import AnchoredText
at = AnchoredText(label, loc=loc, prop=dict(size=fontsize), frameon=True, borderpad = 0)
ax.add_artist(at)
return None
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, ax = plt.subplots(1, 2, figsize=[15, 7])
size_ticks = 22
size_labels = 25
# Shift x-axis so that AP starts at 0 ms (in the simulations, the stimulus fires at t=50 ms)
x = np.linspace(-50, 599, 650)
# Plot the fitted APs
ax[0].plot(x, data_to_fit[:650], label = 'Data to fit', color = 'k', linewidth = 5)
ax[0].plot(x, fitting_with_Ohara_ICs[:650], label = 'Fitting #1', linestyle = '--', linewidth = 3)
ax[0].plot(x, fitting_with_TT06_ICs[:650], label = 'Fitting #2', linestyle = '--', linewidth = 3)
ax[0].plot(x, fitting_with_TT06_ICs_gamma_0[:650], label = 'Fitting #3', linestyle = '--', linewidth = 3)
ax[0].legend(fontsize = 25)
ax[0].set_xlabel('Time (ms)', fontsize = size_labels)
ax[0].set_ylabel('Voltage (mV)', fontsize = size_labels)
ax[0].tick_params(axis = 'both', labelsize = size_ticks)
place_caption_label(ax[0], 'A', 'lower right')
# Add an inset to zoom into the short pacing periods
axins1 = ax[0].inset_axes(bounds = [0.7, 0.2, 0.3, 0.3])
x_inset = np.linspace(270, 299, 30)
axins1.plot(x_inset, data_to_fit[320:350], color = 'k', linewidth = 5)
axins1.plot(x_inset, fitting_with_Ohara_ICs[320:350], linestyle = '--', linewidth = 3)
axins1.plot(x_inset, fitting_with_TT06_ICs[320:350], linestyle = '--', linewidth = 3)
axins1.plot(x_inset, fitting_with_TT06_ICs_gamma_0[320:350], linestyle = '--', linewidth = 3)
# set up the inset ticks
axins1.set_xticks([270, 285, 300])
axins1.tick_params(axis = 'both', labelsize = 15)
# Plot the predicted APs with Kr block
ax[1].plot(x, validation_data[:650], label = 'Validation data', linestyle = '-', linewidth = 5, color = 'k')
ax[1].plot(x, fitting_with_Ohara_ICs_Kr_blocked[:650], label = 'Prediction #1', linestyle = '-', linewidth = 3)
ax[1].plot(x, fitting_with_TT06_ICs_Kr_blocked[:650], label = 'Prediction #2', linestyle = '-', linewidth = 3)
ax[1].plot(x, fitting_with_TT06_ICs_gamma_0_Kr_blocked[:650], label = 'Prediction #3', linestyle = '-', linewidth = 3)
ax[1].legend(fontsize = 25, loc = 'upper right')
ax[1].set_xlabel('Time (ms)', fontsize = size_labels)
ax[1].set_ylabel('Voltage (mV)', fontsize = size_labels)
ax[1].tick_params(axis = 'both', labelsize = size_ticks)
place_caption_label(ax[1], 'B', 'lower right')
# Add an inset to zoom into the short pacing periods
axins2 = ax[1].inset_axes(bounds = [0.7, 0.2, 0.3, 0.3])
x_inset = np.linspace(375, 424, 50)
axins2.plot(x_inset, validation_data[425:475], linestyle = '-', linewidth = 5, color = 'k')
axins2.plot(x_inset, fitting_with_Ohara_ICs_Kr_blocked[425:475], linestyle = '-', linewidth = 3)
axins2.plot(x_inset, fitting_with_TT06_ICs_Kr_blocked[425:475], linestyle = '-', linewidth = 3)
axins2.plot(x_inset, fitting_with_TT06_ICs_gamma_0_Kr_blocked[425:475], linestyle = '-', linewidth = 3)
# set up the inset ticks
axins2.set_xticks([375, 400, 425])
axins2.tick_params(axis = 'both', labelsize = 15)
# Save
plt.tight_layout()
plt.savefig('./Figures/Comparison of optimal APs.png', dpi = 300)
|
[
"matplotlib.pyplot.savefig",
"sabs_pkpd.cardiac.compute_APD",
"os.chdir",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.round",
"sabs_pkpd.load_model.load_simulation_from_mmt"
] |
[((250, 306), 'os.chdir', 'os.chdir', (['"""The/location/of/the/root/folder/of/this/repo"""'], {}), "('The/location/of/the/root/folder/of/this/repo')\n", (258, 306), False, 'import os\n'), ((397, 452), 'sabs_pkpd.load_model.load_simulation_from_mmt', 'sabs_pkpd.load_model.load_simulation_from_mmt', (['filename'], {}), '(filename)\n', (442, 452), False, 'import sabs_pkpd\n'), ((3196, 3221), 'numpy.linspace', 'np.linspace', (['(0)', '(999)', '(1000)'], {}), '(0, 999, 1000)\n', (3207, 3221), True, 'import numpy as np\n'), ((3388, 3413), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1])\n', (3396, 3413), True, 'import numpy as np\n'), ((4298, 4402), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_Ohara_ICs', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_Ohara_ICs, time_points=\n time_points, upstroke_time=50)\n', (4327, 4402), False, 'import sabs_pkpd\n'), ((4614, 4728), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_Ohara_ICs_Kr_blocked', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_Ohara_ICs_Kr_blocked,\n time_points=time_points, upstroke_time=50)\n', (4643, 4728), False, 'import sabs_pkpd\n'), ((5906, 6009), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_TT06_ICs', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_TT06_ICs, time_points=\n time_points, upstroke_time=50)\n', (5935, 6009), False, 'import sabs_pkpd\n'), ((6220, 6333), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_TT06_ICs_Kr_blocked', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_TT06_ICs_Kr_blocked,\n time_points=time_points, upstroke_time=50)\n', (6249, 6333), False, 'import sabs_pkpd\n'), ((6779, 6890), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_TT06_ICs_gamma_0', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_TT06_ICs_gamma_0, time_points\n =time_points, upstroke_time=50)\n', (6808, 6890), False, 'import sabs_pkpd\n'), ((7110, 7231), 'sabs_pkpd.cardiac.compute_APD', 'sabs_pkpd.cardiac.compute_APD', ([], {'AP': 'fitting_with_TT06_ICs_gamma_0_Kr_blocked', 'time_points': 'time_points', 'upstroke_time': '(50)'}), '(AP=fitting_with_TT06_ICs_gamma_0_Kr_blocked,\n time_points=time_points, upstroke_time=50)\n', (7139, 7231), False, 'import sabs_pkpd\n'), ((7651, 7686), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '[15, 7]'}), '(1, 2, figsize=[15, 7])\n', (7663, 7686), True, 'import matplotlib.pyplot as plt\n'), ((7823, 7849), 'numpy.linspace', 'np.linspace', (['(-50)', '(599)', '(650)'], {}), '(-50, 599, 650)\n', (7834, 7849), True, 'import numpy as np\n'), ((8641, 8666), 'numpy.linspace', 'np.linspace', (['(270)', '(299)', '(30)'], {}), '(270, 299, 30)\n', (8652, 8666), True, 'import numpy as np\n'), ((10023, 10048), 'numpy.linspace', 'np.linspace', (['(375)', '(424)', '(50)'], {}), '(375, 424, 50)\n', (10034, 10048), True, 'import numpy as np\n'), ((10570, 10588), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10586, 10588), True, 'import matplotlib.pyplot as plt\n'), ((10590, 10653), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./Figures/Comparison of optimal APs.png"""'], {'dpi': '(300)'}), "('./Figures/Comparison of optimal APs.png', dpi=300)\n", (10601, 10653), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3083), 'numpy.array', 'np.array', (["out['membrane.V']"], {}), "(out['membrane.V'])\n", (3064, 3083), True, 'import numpy as np\n'), ((2979, 3033), 'numpy.round', 'np.round', (["out['intracellular_ions.ki'][-1]"], {'decimals': '(2)'}), "(out['intracellular_ions.ki'][-1], decimals=2)\n", (2987, 3033), True, 'import numpy as np\n')]
|
import sys
import argparse
import numpy as np
import tensorflow as tf
from tensorflow import keras
class SampleModel(keras.Model):
def __init__(self, num_classes=10):
super(SampleModel, self).__init__(name='my_model')
self.num_classes = num_classes
# Define your layers here.
self.dense_1 = keras.layers.Dense(32, activation='relu')
self.dense_2 = keras.layers.Dense(num_classes, activation='sigmoid')
def call(self, inputs):
"""
Define your forward pass here, using layers you previously defined in
`__init__`).
"""
x = self.dense_1(inputs)
x = self.dense_2(x)
return x
def compute_output_shape(self, input_shape):
# You need to override this function if you want
# to use the subclassed model
# as part of a functional-style model.
# Otherwise, this method is optional.
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.num_classes
return tf.TensorShape(shape)
def generate_toy_dataset(num_samples=1):
# Make toy data.
data = np.random.random((num_samples, 32))
labels = np.random.random((num_samples, 10))
# Instantiates a toy dataset instance.
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
return(dataset)
def generate_toy_image(num_samples=1):
# Make toy data.
data = np.random.random((num_samples, 32))
labels = np.random.random((num_samples, 10))
# Instantiates a toy dataset instance.
dataset = tf.data.Dataset.from_tensor_slices((data, labels))
dataset = dataset.batch(32)
dataset = dataset.repeat()
return(dataset)
def main(_):
dataset = generate_toy_dataset(num_samples=1000)
val_dataset = generate_toy_dataset(num_samples=100)
if FLAGS.train_with_keras_fit:
# Instantiates the subclassed model.
sample_model = SampleModel(num_classes=10)
# The compile step specifies the training configuration.
sample_model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
sample_model.fit(dataset, epochs=100, steps_per_epoch=30,
validation_data=val_dataset,
validation_steps=3)
if FLAGS.train_with_estimator:
# Instantiates the subclassed model.
sample_model = SampleModel(num_classes=10)
# The compile step specifies the training configuration.
sample_model.compile(optimizer=tf.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
# Create an Estimator from the compiled Keras model. Note the initial
# model state of the keras model is preserved in the created Estimator.
sample_est = tf.keras.estimator.model_to_estimator(
keras_model=sample_model)
sample_est.train(input_fn=generate_toy_dataset, steps=2000)
if __name__ == '__main__':
# Instantiates an arg parser
parser = argparse.ArgumentParser()
# Establishes default arguments
parser.add_argument("--output_dir",
type=str,
default="C:\\path\\to\\output\\directory\\",
help="The complete desired output filepath.")
parser.add_argument("--train_with_estimator",
type=bool,
default=True,
help="")
parser.add_argument("--train_with_keras_fit",
type=bool,
default=True,
help="")
# Parses known arguments
FLAGS, unparsed = parser.parse_known_args()
# Runs the tensorflow app
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
[
"tensorflow.keras.estimator.model_to_estimator",
"argparse.ArgumentParser",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.random.random",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.keras.layers.Dense",
"tensorflow.TensorShape",
"tensorflow.app.run"
] |
[((1126, 1161), 'numpy.random.random', 'np.random.random', (['(num_samples, 32)'], {}), '((num_samples, 32))\n', (1142, 1161), True, 'import numpy as np\n'), ((1175, 1210), 'numpy.random.random', 'np.random.random', (['(num_samples, 10)'], {}), '((num_samples, 10))\n', (1191, 1210), True, 'import numpy as np\n'), ((1269, 1319), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(data, labels)'], {}), '((data, labels))\n', (1303, 1319), True, 'import tensorflow as tf\n'), ((1477, 1512), 'numpy.random.random', 'np.random.random', (['(num_samples, 32)'], {}), '((num_samples, 32))\n', (1493, 1512), True, 'import numpy as np\n'), ((1526, 1561), 'numpy.random.random', 'np.random.random', (['(num_samples, 10)'], {}), '((num_samples, 10))\n', (1542, 1561), True, 'import numpy as np\n'), ((1620, 1670), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(data, labels)'], {}), '((data, labels))\n', (1654, 1670), True, 'import tensorflow as tf\n'), ((3217, 3242), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3240, 3242), False, 'import argparse\n'), ((3920, 3972), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main', 'argv': '([sys.argv[0]] + unparsed)'}), '(main=main, argv=[sys.argv[0]] + unparsed)\n', (3930, 3972), True, 'import tensorflow as tf\n'), ((334, 375), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (352, 375), False, 'from tensorflow import keras\n'), ((399, 452), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['num_classes'], {'activation': '"""sigmoid"""'}), "(num_classes, activation='sigmoid')\n", (417, 452), False, 'from tensorflow import keras\n'), ((1028, 1049), 'tensorflow.TensorShape', 'tf.TensorShape', (['shape'], {}), '(shape)\n', (1042, 1049), True, 'import tensorflow as tf\n'), ((2995, 3058), 'tensorflow.keras.estimator.model_to_estimator', 'tf.keras.estimator.model_to_estimator', ([], {'keras_model': 'sample_model'}), '(keras_model=sample_model)\n', (3032, 3058), True, 'import tensorflow as tf\n'), ((938, 965), 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (952, 965), True, 'import tensorflow as tf\n'), ((2117, 2149), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['(0.001)'], {}), '(0.001)\n', (2142, 2149), True, 'import tensorflow as tf\n'), ((2668, 2700), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['(0.001)'], {}), '(0.001)\n', (2693, 2700), True, 'import tensorflow as tf\n')]
|
import os
import threading
import time
from collections import deque
import numpy as np
from threading import Thread
from agents.dqn_agent import DqnAgent
from main import App
# Number of games to play
from utils.logger import DataLogger
n_episodes = 10000
save_period = 50 # Saves off every n episodes' model
batch_size = 32 # multiples of 2
state_size = 10
action_size = 5 # 7 if we want to move, not doing that for now
output_dir = 'models/'
class Handler:
def __init__(self):
self.lock = threading.Lock()
self.callback_triggered = False
self.next_state = None
self.reward = None
self.game_over = None
def callback(self, next_state, reward, game_over):
with self.lock:
# print("SET TRUE")
self.callback_triggered = True
self.next_state = next_state
self.reward = reward
self.game_over = game_over
def wait_for_callback(self,):
while True:
with self.lock:
if self.callback_triggered:
# print("Next State received!")
self.callback_triggered = False
break
time.sleep(0.0001)
return self.next_state, self.reward, self.game_over
# Setup our output dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Create a game environment
handler = Handler()
game = App(training_mode=True, ml_step_callback=handler.callback)
thread = Thread(target=game.on_execute)
thread.start()
# Create the agent
agent = DqnAgent(state_size, action_size, force_continue=True) # Set true to continue with low epsilon and loaded model
# Create a data logger
logger = DataLogger(
n_episodes,
save_period,
batch_size,
state_size,
action_size
)
# Let the game start up
time.sleep(5)
# Track some times
last_play_time = 0
last_train_time = 0
# Sliding window so we can check the winning rate, and see if its increasing
winners_window = []
window_size = int(n_episodes*0.1)
p1_win_ratio = 0
p2_win_ratio = 0
# Track winner count
winners = {}
# Play n_episodes count games
for e in range(n_episodes): # iterate over new episodes of the game
try:
# Reset the state of the game with a restart, wait for it to take
print("Resetting game state...")
game.queue_ml_action(-1) # -1 restarts, -2 quits
_ = handler.wait_for_callback()
state = np.reshape(game.get_game_state(), [1, state_size])
game_over = False
print("Reset. Starting game " + str(e))
time_start = time.time()
msg = "Game " + str(e + 1) + " of " + str(n_episodes) + ", LPT: " + \
str(last_play_time) + ", LTT: " + str(last_train_time) + ", epsilon: " + str(agent.get_epsilon())
game.show_message(msg)
print(msg)
for winner in winners:
print(winner + " has " + str(winners[winner]) + " wins so far.")
while not game_over:
# print("**********************************************")
# print("****************** NEW ROUND *****************")
# print("**********************************************")
# Make our agent act
action = agent.act(state)
# print("queue action: " + str(action))
game.queue_ml_action(action) # Sends the 'step' commanad
# Get the next state, etc from the action
# print("wait for next state")
next_state, reward, game_over = handler.wait_for_callback()
# print("handle next state")
# Remember the action
next_state = np.reshape(next_state, [1, state_size])
agent.remember(state, action, reward, next_state, game_over)
# Save off this round
#logger.add_step({
# "state": state,
# "action": action,
# "reward": reward,
# "next_state": next_state,
# "game_over": game_over
#})
# Save the state as next state
state = next_state
if game_over:
print("GAME OVER: " + game.get_winner().get_name() + " wins!")
if game.get_winner().get_name() not in winners:
winners[game.get_winner().get_name()] = 1
else:
winners[game.get_winner().get_name()] += 1
winners_window.append(game.get_winner().get_name())
print("episode: {}/{}, e: {:.2}" # print the episode's score and agent's epsilon
.format(e, n_episodes, agent.get_epsilon()))
game_end = time.time()
# Train the agent off the game we just played
if len(agent.get_memory()) > batch_size:
agent.replay(batch_size)
train_end = time.time()
last_play_time = (int((game_end-time_start) / 60 * 10000)) / 10000
last_train_time = (int((train_end-game_end) / 60 * 10000)) / 10000
print("Playing took: " + str(last_play_time) + " minutes.")
print("Training took: " + str(last_train_time) + " minutes.")
if len(winners_window) == window_size:
win_count_1 = winners_window.count(game.get_player_1().get_name())
win_count_2 = winners_window.count(game.get_player_2().get_name())
p1_win_ratio = win_count_1/window_size
p2_win_ratio = win_count_2/window_size
winners_window = []
print("Player 1 win ratio: " + str(p1_win_ratio))
print("Player 2 win ratio: " + str(p2_win_ratio))
logger.add_game({
"winner": "Player 1" if game.get_winner() == game.get_player_1() else "Player 2",
"play_time": last_play_time,
"train_time": last_train_time,
"epsilon": agent.get_epsilon(),
"player_1_health": game.get_player_1().get_health(),
"player_2_health": game.get_player_2().get_health(),
"p1_win_ratio": p1_win_ratio,
"p2_win_ratio": p2_win_ratio
})
# Save off every 50 episodes
if e % save_period == 0:
agent.save(output_dir + "weights_" + '{:04d}'.format(e + agent.restart_file_number_offset) + ".hdf5")
logger.write_object_to_file()
logger.add_any('winners', winners)
except KeyboardInterrupt:
break
# End game
print("Ending game...")
game.queue_ml_action(-2)
print("Ended.")
print("Writing out log file...")
logger.write_object_to_file()
print("Log written")
print("Showing win graphs...")
logger.show_graphs()
print("Graphs closed.")
|
[
"os.path.exists",
"numpy.reshape",
"os.makedirs",
"threading.Lock",
"time.sleep",
"utils.logger.DataLogger",
"main.App",
"threading.Thread",
"time.time",
"agents.dqn_agent.DqnAgent"
] |
[((1426, 1484), 'main.App', 'App', ([], {'training_mode': '(True)', 'ml_step_callback': 'handler.callback'}), '(training_mode=True, ml_step_callback=handler.callback)\n', (1429, 1484), False, 'from main import App\n'), ((1494, 1524), 'threading.Thread', 'Thread', ([], {'target': 'game.on_execute'}), '(target=game.on_execute)\n', (1500, 1524), False, 'from threading import Thread\n'), ((1568, 1622), 'agents.dqn_agent.DqnAgent', 'DqnAgent', (['state_size', 'action_size'], {'force_continue': '(True)'}), '(state_size, action_size, force_continue=True)\n', (1576, 1622), False, 'from agents.dqn_agent import DqnAgent\n'), ((1714, 1786), 'utils.logger.DataLogger', 'DataLogger', (['n_episodes', 'save_period', 'batch_size', 'state_size', 'action_size'], {}), '(n_episodes, save_period, batch_size, state_size, action_size)\n', (1724, 1786), False, 'from utils.logger import DataLogger\n'), ((1834, 1847), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1844, 1847), False, 'import time\n'), ((1314, 1340), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1328, 1340), False, 'import os\n'), ((1346, 1369), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1357, 1369), False, 'import os\n'), ((517, 533), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (531, 533), False, 'import threading\n'), ((2592, 2603), 'time.time', 'time.time', ([], {}), '()\n', (2601, 2603), False, 'import time\n'), ((4671, 4682), 'time.time', 'time.time', ([], {}), '()\n', (4680, 4682), False, 'import time\n'), ((4845, 4856), 'time.time', 'time.time', ([], {}), '()\n', (4854, 4856), False, 'import time\n'), ((1202, 1220), 'time.sleep', 'time.sleep', (['(0.0001)'], {}), '(0.0001)\n', (1212, 1220), False, 'import time\n'), ((3659, 3698), 'numpy.reshape', 'np.reshape', (['next_state', '[1, state_size]'], {}), '(next_state, [1, state_size])\n', (3669, 3698), True, 'import numpy as np\n')]
|
import numpy as np
from random import sample, seed
#import matplotlib.pyplot as plt
from sys import argv, stdout
#from scipy.stats import gumbel_r
from score_matrix import readScoreMatrix, getMatrix
from seqali import smithWaterman, smithFast, plotMat, plotTraceMat
from multiprocessing import Process, Manager
def scrambler_aligner(pn, ssd, N, sa, sb, ms, go, ge):
seed()
sscores = []
for i in range(N):
#print("Process {}, pass {} ".format(pn,i+1))
sa = "".join(sample(sa, len(sa)))
s, a, ma, ta = smithFast(
sa, sb, ms, gapO=go, gapE=ge)
sscores.append(s)
ssd[pn] = sscores
#seqB = "HEAGAWGHEE"
#seqA = "PAWHEAE"
# seqB = "GVTAH"
# seqA = "AVTLI"
seqB = "MVLSPADKTNVKAAWGKVGAHAGEYGAEALERMFLSFPTTKTYFPHFDLSHGSAQVKGHG"
seqA = "MVHLTPEEKSAVTALWGKVNVDEVGGEALGRLLVVYPWTQRFFESFGDLSTPDAVMGNPK"
#seqB = "MVLSPADKTNVKAAWGKVGAHAGEYG"
#seqA = "MVHLTPEEKSAVTALWGKVNVDEVGG"
gapOpen = -10
gapExtend = -1
#gapOpen = -8
#gapExtend = -8
matrix = "BLOSUM50"
if(len(argv) > 1):
N = int(argv[1])
else:
N = 100
# init score matrix
#matScore = np.zeros((26, 26), dtype=np.int8)
#readMat("blosum50.txt", matScore)
readScoreMatrix(matrix)
matScore = getMatrix()
# Calculate unscrambled aligment and score
s, a, ma, ta = smithWaterman(
seqA, seqB, matScore, gapO=gapOpen, gapE=gapExtend)
ua = a
uscore = s
print("Scoring matrix: ", matrix)
print("Unscrambled score:", uscore)
print("Unscrambled identity: {:.2%}".format(sum([ua[0][i] == ua[1][i] and
ua[0][i] != '-' for i in range(len(ua[0]))])/len(ua[0])))
print("Unscrambled alignment:")
print("SeqA - ", ua[0])
print("SeqB - ", ua[1])
print()
if N==0 :
exit(0)
print("Calculating distribution of scrambled alignment scores.")
proc_count = 4
procs = []
sscores_dict = Manager().dict()
for i in range(proc_count):
proc = Process(target=scrambler_aligner, args=(i, sscores_dict, N, seqA, seqB, matScore, gapOpen, gapExtend))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
#print(sscores_dict.values())
sscores = sum(sscores_dict.values(),[])
#print(sscores)
#exit(0)
N = len(sscores) # for 4 cores its 4 times the initial value
# Fit extreme value distribution to data
#miu, beta = gumbel_r.fit(sscores)
print("Length of sscores: ", len(sscores))
print("Calculed histogram for {} scramble scores".format(N))
print("Max scrambled score:", max(sscores))
print("Min scrambled score:", min(sscores))
print("Median of scrambled scores:", np.median(sscores))
print("Gumbel miu:", miu)
print("Gumbel beta:", beta)
print()
# print("Aligment matrix:")
# np.savetxt(sys.stdout, ma, fmt="%3d")
print("Saving data to","'smith_{}_{}_{}_{:3.1f}_{:3.1f}.npy'".format(
N, len(seqA), matrix, abs(gapOpen), abs(gapExtend)))
np.save("smith_{}_{}_{}_{:3.1f}_{:3.1f}".format(
N, len(seqA), matrix, abs(gapOpen), abs(gapExtend)),sscores)
|
[
"score_matrix.readScoreMatrix",
"numpy.median",
"score_matrix.getMatrix",
"multiprocessing.Process",
"seqali.smithFast",
"seqali.smithWaterman",
"random.seed",
"multiprocessing.Manager"
] |
[((1193, 1216), 'score_matrix.readScoreMatrix', 'readScoreMatrix', (['matrix'], {}), '(matrix)\n', (1208, 1216), False, 'from score_matrix import readScoreMatrix, getMatrix\n'), ((1229, 1240), 'score_matrix.getMatrix', 'getMatrix', ([], {}), '()\n', (1238, 1240), False, 'from score_matrix import readScoreMatrix, getMatrix\n'), ((1300, 1365), 'seqali.smithWaterman', 'smithWaterman', (['seqA', 'seqB', 'matScore'], {'gapO': 'gapOpen', 'gapE': 'gapExtend'}), '(seqA, seqB, matScore, gapO=gapOpen, gapE=gapExtend)\n', (1313, 1365), False, 'from seqali import smithWaterman, smithFast, plotMat, plotTraceMat\n'), ((371, 377), 'random.seed', 'seed', ([], {}), '()\n', (375, 377), False, 'from random import sample, seed\n'), ((1904, 2010), 'multiprocessing.Process', 'Process', ([], {'target': 'scrambler_aligner', 'args': '(i, sscores_dict, N, seqA, seqB, matScore, gapOpen, gapExtend)'}), '(target=scrambler_aligner, args=(i, sscores_dict, N, seqA, seqB,\n matScore, gapOpen, gapExtend))\n', (1911, 2010), False, 'from multiprocessing import Process, Manager\n'), ((2548, 2566), 'numpy.median', 'np.median', (['sscores'], {}), '(sscores)\n', (2557, 2566), True, 'import numpy as np\n'), ((537, 576), 'seqali.smithFast', 'smithFast', (['sa', 'sb', 'ms'], {'gapO': 'go', 'gapE': 'ge'}), '(sa, sb, ms, gapO=go, gapE=ge)\n', (546, 576), False, 'from seqali import smithWaterman, smithFast, plotMat, plotTraceMat\n'), ((1847, 1856), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (1854, 1856), False, 'from multiprocessing import Process, Manager\n')]
|
import numpy as np
def make_grid_edges(x, neighborhood=4, return_lists=False):
if neighborhood not in [4, 8]:
raise ValueError("neighborhood can only be '4' or '8', got %s" %
repr(neighborhood))
inds = np.arange(x.shape[0] * x.shape[1]).reshape(x.shape[:2])
inds = inds.astype(np.int64)
right = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
down = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
edges = [right, down]
if neighborhood == 8:
upright = np.c_[inds[1:, :-1].ravel(), inds[:-1, 1:].ravel()]
downright = np.c_[inds[:-1, :-1].ravel(), inds[1:, 1:].ravel()]
edges.extend([upright, downright])
if return_lists:
return edges
return np.vstack(edges)
def edge_list_to_features(edge_list):
edges = np.vstack(edge_list)
edge_features = np.zeros((edges.shape[0], 2))
edge_features[:len(edge_list[0]), 0] = 1
edge_features[len(edge_list[0]):, 1] = 1
return edge_features
def generate_binary_edges(length, window):
"""
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (0, 2), (1, 3), (2, 4), (3, 5)]
"""
edges = []
for w in range(1, window + 1):
for i in range(length - w):
edges.append((i, i + w))
return edges
|
[
"numpy.zeros",
"numpy.vstack",
"numpy.arange"
] |
[((745, 761), 'numpy.vstack', 'np.vstack', (['edges'], {}), '(edges)\n', (754, 761), True, 'import numpy as np\n'), ((814, 834), 'numpy.vstack', 'np.vstack', (['edge_list'], {}), '(edge_list)\n', (823, 834), True, 'import numpy as np\n'), ((855, 884), 'numpy.zeros', 'np.zeros', (['(edges.shape[0], 2)'], {}), '((edges.shape[0], 2))\n', (863, 884), True, 'import numpy as np\n'), ((245, 279), 'numpy.arange', 'np.arange', (['(x.shape[0] * x.shape[1])'], {}), '(x.shape[0] * x.shape[1])\n', (254, 279), True, 'import numpy as np\n')]
|
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from onnx import helper as oh
from finn.core.datatype import DataType
from finn.transformation import Transformation
from finn.util.basic import get_by_name
from finn.custom_op.registry import getCustomOp
from finn.transformation.infer_datatypes import InferDataTypes
class AbsorbAddIntoMultiThreshold(Transformation):
"""Absorb preceding Add ops into MultiThreshold by updating the threshold
values. Only scalar/1D add vectors can be absorbed."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Add":
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "MultiThreshold":
add_weight_name = n.input[1]
threshold_name = consumer.input[1]
A = model.get_initializer(add_weight_name)
T = model.get_initializer(threshold_name)
assert A is not None, "Initializer for add weights is not set."
assert T is not None, "Initializer for thresholds is not set."
start_name = n.input[0]
# we can only absorb 0d or 1d adds
is_scalar = A.ndim == 0 or all(x == 1 for x in A.shape)
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
if is_scalar or is_1d:
Tnew = T - A.reshape(-1, 1)
# Tnew = T - A.reshape(-1, T.shape[1])
# compute new thresholds and set initializer
model.set_initializer(threshold_name, Tnew)
# wire add input directly to MultiThreshold
consumer.input[0] = start_name
# remove the add node
graph.node.remove(n)
graph_modified = True
return (model, graph_modified)
class AbsorbMulIntoMultiThreshold(Transformation):
"""Absorb preceding Mul ops into MultiThreshold by updating the threshold
values. Only *positive* scalar/1D mul vectors can be absorbed."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Mul":
mul_weight_name = n.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_signed = (A < 0).any()
is_scalar = A.ndim == 0 or all(x == 1 for x in A.shape)
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "MultiThreshold":
if not is_signed and (is_1d or is_scalar):
threshold_name = consumer.input[1]
T = model.get_initializer(threshold_name)
assert T is not None, "Initializer for thresholds is not set."
start_name = n.input[0]
# compute new thresholds and set initializer
Tnew = T / A.reshape(-1, 1)
# TODO: need to handle negative A values correctly; produce
# mul sign mask and merge into preceding matmul?
model.set_initializer(threshold_name, Tnew)
# wire add input directly to MultiThreshold
consumer.input[0] = start_name
# remove the mul node
graph.node.remove(n)
graph_modified = True
return (model, graph_modified)
class FactorOutMulSignMagnitude(Transformation):
"""Split multiply-by-constant nodes into two multiply-by-constant nodes,
where the first node is a bipolar vector (of signs) and the second is a
vector of magnitudes."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Mul":
mul_weight_name = n.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_scalar = np.prod(A.shape) == 1
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
is_not_bipolar = (
model.get_tensor_datatype(mul_weight_name) != DataType.BIPOLAR
)
is_signed = (A < 0).any()
if is_signed and (is_scalar or is_1d) and is_not_bipolar:
start_name = n.input[0]
in_shape = model.get_tensor_shape(start_name)
middle_name = model.make_new_valueinfo_name()
model.set_tensor_shape(middle_name, in_shape)
sign_mul_param_name = model.make_new_valueinfo_name()
# create new mul node with sign(A) as the operand
sgn = np.sign(A)
model.set_initializer(sign_mul_param_name, sgn)
model.set_tensor_datatype(sign_mul_param_name, DataType.BIPOLAR)
# replace original mul weight by magnitudes
model.set_initializer(mul_weight_name, np.abs(A))
new_mul = oh.make_node(
"Mul", [start_name, sign_mul_param_name], [middle_name]
)
n.input[0] = middle_name
graph.node.insert(node_ind - 1, new_mul)
graph_modified = True
return (model, graph_modified)
class Absorb1BitMulIntoMatMul(Transformation):
"""Absorb bipolar or binary multiplications into the preciding matrix
multiply."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "MatMul":
matmul_weight_name = n.input[1]
W = model.get_initializer(matmul_weight_name)
Wdt = model.get_tensor_datatype(matmul_weight_name)
assert W is not None, "Initializer for matmul weights is not set."
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "Mul":
mul_weight_name = consumer.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_1bit = model.get_tensor_datatype(mul_weight_name).bitwidth() == 1
if is_1bit:
Wnew = A * W
assert (
Wnew.shape == W.shape
), """Shape of new weights is not
the same as the shape of the weight matrix before."""
check_fxn = np.vectorize(lambda x: Wdt.allowed(x))
# only absorb if permitted by W datatype
if check_fxn(Wnew).all():
model.set_initializer(matmul_weight_name, Wnew)
n.output[0] = consumer.output[0]
graph.node.remove(consumer)
graph_modified = True
return (model, graph_modified)
class Absorb1BitMulIntoConv(Transformation):
"""Absorb bipolar or binary multiplications into the preciding convolution."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Conv":
conv_weight_name = n.input[1]
W = model.get_initializer(conv_weight_name)
Wdt = model.get_tensor_datatype(conv_weight_name)
assert W is not None, "Initializer for conv weights is not set."
consumer = model.find_consumer(n.output[0])
if consumer is not None and consumer.op_type == "Mul":
mul_weight_name = consumer.input[1]
A = model.get_initializer(mul_weight_name)
assert A is not None, "Initializer for mul weights is not set."
is_1bit = model.get_tensor_datatype(mul_weight_name).bitwidth() == 1
is_scalar = np.prod(A.shape) == 1
actual_ndims = len(tuple(filter(lambda x: x > 1, A.shape)))
is_1d = actual_ndims == 1
if is_1bit and (is_1d or is_scalar):
# move the mul to the OFM position, since the mul is
# applied on the outputs channelwise or as scalar
Wnew = A.reshape(-1, 1, 1, 1) * W
assert (
Wnew.shape == W.shape
), """Shape of new weights is not
the same as the shape of the conv weights before."""
check_fxn = np.vectorize(lambda x: Wdt.allowed(x))
# only absorb if permitted by W datatype
if check_fxn(Wnew).all():
model.set_initializer(conv_weight_name, Wnew)
n.output[0] = consumer.output[0]
graph.node.remove(consumer)
graph_modified = True
return (model, graph_modified)
class AbsorbTransposeIntoMultiThreshold(Transformation):
"""Change (NHWCTranpose -> MultiThreshold -> NCHWTranspose) to (MultiThreshold)
with NHWC mode."""
def apply(self, model):
graph = model.graph
node_ind = 0
graph_modified = False
for n in graph.node:
node_ind += 1
if n.op_type == "Transpose":
perms = list(get_by_name(n.attribute, "perm").ints)
if perms == [0, 3, 1, 2]:
mt_cand = model.find_consumer(n.output[0])
if mt_cand.op_type == "MultiThreshold":
final_t_cand = model.find_consumer(mt_cand.output[0])
if final_t_cand.op_type == "Transpose":
perms = list(
get_by_name(final_t_cand.attribute, "perm").ints
)
if perms == [0, 2, 3, 1]:
mt = getCustomOp(mt_cand)
mt.set_nodeattr("data_layout", "NHWC")
# get rid of tranpose nodes, wire MT directly
mt_cand.input[0] = n.input[0]
mt_cand.output[0] = final_t_cand.output[0]
graph.node.remove(n)
graph.node.remove(final_t_cand)
graph_modified = True
elif final_t_cand.op_type == "Reshape":
oshape = model.get_tensor_shape(final_t_cand.output[0])
if len(oshape) == 2:
# transition to FC part, can still use NHWC
mt = getCustomOp(mt_cand)
mt.set_nodeattr("data_layout", "NHWC")
# get rid of first tranpose node
mt_cand.input[0] = n.input[0]
# fix output shape for MultiThreshold
mt_ishape = model.get_tensor_shape(mt_cand.input[0])
(b, h, w, c) = mt_ishape
assert (
h == 1 and w == 1
), """Untested spatial dim
in conv->fc transition, proceed with caution!"""
model.set_tensor_shape(mt_cand.output[0], mt_ishape)
graph.node.remove(n)
graph_modified = True
if graph_modified:
model = model.transform(InferDataTypes())
return (model, graph_modified)
|
[
"numpy.prod",
"finn.transformation.infer_datatypes.InferDataTypes",
"numpy.abs",
"onnx.helper.make_node",
"finn.util.basic.get_by_name",
"numpy.sign",
"finn.custom_op.registry.getCustomOp"
] |
[((14258, 14274), 'finn.transformation.infer_datatypes.InferDataTypes', 'InferDataTypes', ([], {}), '()\n', (14272, 14274), False, 'from finn.transformation.infer_datatypes import InferDataTypes\n'), ((6198, 6214), 'numpy.prod', 'np.prod', (['A.shape'], {}), '(A.shape)\n', (6205, 6214), True, 'import numpy as np\n'), ((7002, 7012), 'numpy.sign', 'np.sign', (['A'], {}), '(A)\n', (7009, 7012), True, 'import numpy as np\n'), ((7330, 7399), 'onnx.helper.make_node', 'oh.make_node', (['"""Mul"""', '[start_name, sign_mul_param_name]', '[middle_name]'], {}), "('Mul', [start_name, sign_mul_param_name], [middle_name])\n", (7342, 7399), True, 'from onnx import helper as oh\n'), ((7289, 7298), 'numpy.abs', 'np.abs', (['A'], {}), '(A)\n', (7295, 7298), True, 'import numpy as np\n'), ((10457, 10473), 'numpy.prod', 'np.prod', (['A.shape'], {}), '(A.shape)\n', (10464, 10473), True, 'import numpy as np\n'), ((11959, 11991), 'finn.util.basic.get_by_name', 'get_by_name', (['n.attribute', '"""perm"""'], {}), "(n.attribute, 'perm')\n", (11970, 11991), False, 'from finn.util.basic import get_by_name\n'), ((12549, 12569), 'finn.custom_op.registry.getCustomOp', 'getCustomOp', (['mt_cand'], {}), '(mt_cand)\n', (12560, 12569), False, 'from finn.custom_op.registry import getCustomOp\n'), ((12379, 12422), 'finn.util.basic.get_by_name', 'get_by_name', (['final_t_cand.attribute', '"""perm"""'], {}), "(final_t_cand.attribute, 'perm')\n", (12390, 12422), False, 'from finn.util.basic import get_by_name\n'), ((13337, 13357), 'finn.custom_op.registry.getCustomOp', 'getCustomOp', (['mt_cand'], {}), '(mt_cand)\n', (13348, 13357), False, 'from finn.custom_op.registry import getCustomOp\n')]
|
# coding: utf-8
#
# This code is part of qclib.
#
# Copyright (c) 2021, <NAME>
import numpy as np
from ..math import apply_statevec, apply_density, density_matrix
from .measure import measure_qubit, measure_qubit_rho
class DensityMatrix:
def __init__(self, mat):
self._data = np.asarray(mat)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, item):
return self._data[item]
def __str__(self):
return str(self._data)
def copy(self):
return self.__class__(self._data.copy())
def apply(self, operator):
self._data = apply_density(self._data, operator)
def measure(self, qubit, eigvals=None, eigvecs=None):
res, self._data = measure_qubit_rho(self._data, qubit, eigvals, eigvecs)
return res
class StateVector:
def __init__(self, vec):
self._data = np.asarray(vec)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, item):
return self._data[item]
def __str__(self):
return str(self._data)
def copy(self):
return self.__class__(self._data.copy())
def apply(self, operator):
self._data = apply_statevec(self._data, operator)
def measure(self, qubit, eigvals=None, eigvecs=None):
res, self._data = measure_qubit(self._data, qubit, eigvals, eigvecs)
return res
def to_density(self):
return DensityMatrix(density_matrix(self._data))
|
[
"numpy.asarray"
] |
[((292, 307), 'numpy.asarray', 'np.asarray', (['mat'], {}), '(mat)\n', (302, 307), True, 'import numpy as np\n'), ((931, 946), 'numpy.asarray', 'np.asarray', (['vec'], {}), '(vec)\n', (941, 946), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# #### Modeling the elemental stoichiometry of phytoplankton and surrounding surface waters in and upwelling or estuarine system
# >Steps to complete project:
# >1. Translate matlab physical model into python
# >2. Substitute Dynamic CFM into model for eco component
# >3. Analyze the results
#
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math as m
from math import pi
import time
import pylab as lab
# In[2]:
#create the time grid
sperd=60*60*24 #seconds per day
spery=365*sperd #seconds per year
nyr=1 #number of years to simulate. This can be chagned by the user
T=spery*nyr #length of time in seconds to simulate
dt=spery/720 #time step
t=np.arange(0,T+dt,dt) #create a time array
Nt=T/dt+1 #number of time grid points
Nt=int(Nt)
# In[3]:
#create the spatial grid
Lx=5e5 #length in meters
dx=1e3 #grid spacing
Nx=(Lx/dx)+1 #number of grid points
Nx=int(Nx)
xx=np.arange(0,Lx+dx,dx) #create a space array
# In[4]:
#wind forcing
U0=0.1 #velocity (m/s) subject to change
# In[5]:
#initial conditions of nutrient variables NO3 (Nnut) and PO4 (Pnut)
Pnut=2*np.ones(Nx) #creating a ones array the size of the number of spatial grid points
Pnut_i=Pnut[0] #initial value of phosphorus at the coastal boundary
Rup_Po=10*Pnut_i/spery #baseline P uptake rate (will be changed with CFM-Phyto)
Nnut=Pnut*15 #assume NO3 concentration higher than PO4. Change based on field observations
Nnut_i=Pnut_i*15 #intial value of NO3 available at the coastal boundary
Rup_No=10*Nnut_i/spery #baseline N uptake rate (will be changed with CFM-Phyto)
# In[6]:
#initial condition of biomass variables- to be replaced with CFM later
Pbio=0.01*Pnut
Pbio_i=0.01*Pnut_i #phytoplankton P at coast (boundary condition)
Nbio=0.01*Nnut
Nbio_i=0.01*Nnut_i #phytoplankton N at coast (boundary condition)
print(np.size(Nbio))
# In[7]:
#initial biological parameters
Kp=0.1 #half-saturation constant for Pnut
Kn=1 #half-saturation constant for Nnut
mu= 1/sperd #growth rate per sec
phi=0.5 #fraction of uptake remineralized locally
Snp=16 #redfield N:P ratio of phytoplankton
m2=mu*0.2 #quadratic mortality
# In[8]:
#
Period=1 #period of oscillation in forcing (velocity) (yr)
w=(2*pi)/Period #frequency of oscillation
A0=0.5 #amplitude of oscillation
nn=0 #year counter
# In[9]:
it_Nx=np.arange(0,Nx-1,1)
it_Nt=np.arange(0,Nt+1,1)
f=[]
Ua=[]
for n in it_Nt:
#vary the circulation rates
for y in t:
f=A0*(m.sin(w*y/spery))
#fn.append(f)
U0_array=np.full_like(f,U0)
Ua=U0*f
U=U0+Ua
#calculate the biological rates-to be replaced by CFM
RgrowN=mu*Nbio*(Nnut/(Nnut+Kn))
RmortN=m2*Nbio**2
RbioN=RgrowN-RmortN
RnutN=-RgrowN+phi*RmortN
RbioP=RbioN/Snp
RnutP=RnutN/Snp
#update the distribution: Advection scheme
for i in it_Nx:
Pnut[i+1]=((dt/dx)*U*Pnut[i]+Pnut[i+1]+RnutP[i]*dt)/(1+dt/dx*U)
Nnut[i+1]=((dt/dx)*U*Nnut[i]+Nnut[i+1]+RnutN[i]*dt)/(1+dt/dx*U)
Pbio[i+1]=((dt/dx)*U*Pbio[i]+Pbio[i+1]+RbioP[i]*dt)/(1+dt/dx*U)
Nbio[i+1]=((dt/dx)*U*Nbio[i]+Nbio[i+1]+RbioN[i]*dt)/(1+dt/dx*U)
print((Pnut))
# In[33]:
#some plotting
ax=plt.figure(1)
plt.subplot(2,2,1)
x=np.arange(0,Lx+dx,dx)
x=x*1e-3
plt.plot(x,Pnut,marker='o',color='orange')
plt.xlabel('horizontal distance (km)')
plt.ylabel('PO4 (uM)')
plt.subplot(2,2,2)
plt.plot(x,Nnut,marker='o',color='green')
plt.xlabel('horizontal distance (km)')
plt.ylabel('NO3 (uM)')
plt.subplot(2,2,3)
plt.plot(x,Pbio,marker='o',color='red')
plt.xlabel('horizontal distance (km)')
plt.ylabel('Phyto P (uM)')
plt.subplot(2,2,4)
plt.plot(x,Nbio,marker='o',color='blue')
plt.xlabel('horizontal distance (km)')
plt.ylabel('Phyto N (uM)')
plt.tight_layout()
plt.savefig('Nutrient_Concentrations.png')
plt.show()
# In[ ]:
|
[
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.full_like",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.size",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"math.sin",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((757, 781), 'numpy.arange', 'np.arange', (['(0)', '(T + dt)', 'dt'], {}), '(0, T + dt, dt)\n', (766, 781), True, 'import numpy as np\n'), ((1016, 1041), 'numpy.arange', 'np.arange', (['(0)', '(Lx + dx)', 'dx'], {}), '(0, Lx + dx, dx)\n', (1025, 1041), True, 'import numpy as np\n'), ((2479, 2502), 'numpy.arange', 'np.arange', (['(0)', '(Nx - 1)', '(1)'], {}), '(0, Nx - 1, 1)\n', (2488, 2502), True, 'import numpy as np\n'), ((2505, 2528), 'numpy.arange', 'np.arange', (['(0)', '(Nt + 1)', '(1)'], {}), '(0, Nt + 1, 1)\n', (2514, 2528), True, 'import numpy as np\n'), ((3347, 3360), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3357, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3361, 3381), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (3372, 3381), True, 'import matplotlib.pyplot as plt\n'), ((3382, 3407), 'numpy.arange', 'np.arange', (['(0)', '(Lx + dx)', 'dx'], {}), '(0, Lx + dx, dx)\n', (3391, 3407), True, 'import numpy as np\n'), ((3413, 3458), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'Pnut'], {'marker': '"""o"""', 'color': '"""orange"""'}), "(x, Pnut, marker='o', color='orange')\n", (3421, 3458), True, 'import matplotlib.pyplot as plt\n'), ((3456, 3494), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""horizontal distance (km)"""'], {}), "('horizontal distance (km)')\n", (3466, 3494), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3517), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PO4 (uM)"""'], {}), "('PO4 (uM)')\n", (3505, 3517), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3538), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (3529, 3538), True, 'import matplotlib.pyplot as plt\n'), ((3537, 3581), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'Nnut'], {'marker': '"""o"""', 'color': '"""green"""'}), "(x, Nnut, marker='o', color='green')\n", (3545, 3581), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3617), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""horizontal distance (km)"""'], {}), "('horizontal distance (km)')\n", (3589, 3617), True, 'import matplotlib.pyplot as plt\n'), ((3618, 3640), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""NO3 (uM)"""'], {}), "('NO3 (uM)')\n", (3628, 3640), True, 'import matplotlib.pyplot as plt\n'), ((3641, 3661), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (3652, 3661), True, 'import matplotlib.pyplot as plt\n'), ((3660, 3702), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'Pbio'], {'marker': '"""o"""', 'color': '"""red"""'}), "(x, Pbio, marker='o', color='red')\n", (3668, 3702), True, 'import matplotlib.pyplot as plt\n'), ((3700, 3738), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""horizontal distance (km)"""'], {}), "('horizontal distance (km)')\n", (3710, 3738), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3765), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phyto P (uM)"""'], {}), "('Phyto P (uM)')\n", (3749, 3765), True, 'import matplotlib.pyplot as plt\n'), ((3766, 3786), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (3777, 3786), True, 'import matplotlib.pyplot as plt\n'), ((3785, 3828), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'Nbio'], {'marker': '"""o"""', 'color': '"""blue"""'}), "(x, Nbio, marker='o', color='blue')\n", (3793, 3828), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3864), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""horizontal distance (km)"""'], {}), "('horizontal distance (km)')\n", (3836, 3864), True, 'import matplotlib.pyplot as plt\n'), ((3865, 3891), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phyto N (uM)"""'], {}), "('Phyto N (uM)')\n", (3875, 3891), True, 'import matplotlib.pyplot as plt\n'), ((3893, 3911), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3909, 3911), True, 'import matplotlib.pyplot as plt\n'), ((3912, 3954), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Nutrient_Concentrations.png"""'], {}), "('Nutrient_Concentrations.png')\n", (3923, 3954), True, 'import matplotlib.pyplot as plt\n'), ((3955, 3965), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3963, 3965), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1232), 'numpy.ones', 'np.ones', (['Nx'], {}), '(Nx)\n', (1228, 1232), True, 'import numpy as np\n'), ((1969, 1982), 'numpy.size', 'np.size', (['Nbio'], {}), '(Nbio)\n', (1976, 1982), True, 'import numpy as np\n'), ((2671, 2690), 'numpy.full_like', 'np.full_like', (['f', 'U0'], {}), '(f, U0)\n', (2683, 2690), True, 'import numpy as np\n'), ((2614, 2634), 'math.sin', 'm.sin', (['(w * y / spery)'], {}), '(w * y / spery)\n', (2619, 2634), True, 'import math as m\n')]
|
"""Test weighted path counting methods."""
# pylint: disable=redefined-outer-name,too-few-public-methods
# pylint: disable=too-many-branches
import pytest
from pytest import approx
import numpy as np
import pandas as pd
from pathcensus.definitions import PathDefinitionsWeighted
from pathcensus import PathCensus
@pytest.fixture(scope="session")
def paths_edges(random_graph):
"""Fixture for generating path census data frames
for edges and node/global counts based `random_graph` fixture.
"""
_, S = random_graph
E = S.census("edges")
return E, S
@pytest.fixture(scope="session")
def paths_edges_nodes(paths_edges):
"""Get edge and node path/cycle counts."""
E, S = paths_edges
return E, S.census("nodes")
@pytest.fixture(scope="session")
def paths_edges_global(paths_edges):
"""Get edge and global path/cycle counts."""
E, S = paths_edges
return E, S.census("global")
@pytest.fixture(scope="session")
def graph_weights_one(random_graph):
"""Pair of :py:class:`pathcensus.PathCensus` objects for weighted and
unweighted version of the same graph with all weights equal to ``1``.
"""
G, _ = random_graph
G.es["weight"] = np.ones((G.ecount(),))
P0 = PathCensus(G, weighted=False)
P1 = PathCensus(G, weighted=True)
return P0, P1
@pytest.fixture(scope="session")
def graph_weights_uniform(random_graph):
"""Pair of :py:class:`pathcensus.PathCensus` objects for weighted and
unweighted version of the same graph with all weights being uniform
but other than ``1``.
"""
G, _ = random_graph
G.es["weight"] = 3*np.ones((G.ecount(),))
P0 = PathCensus(G, weighted=False)
P1 = PathCensus(G, weighted=True)
return P0, P1
class TestPathCounting:
"""Tests of different path counting methods.
All main path counting methods are defined for overall graph counts,
node counts and node-pair (edge) counts. The below tests check whether
the results of all different counting methods are consistent in a sense
that they give the same answers after proper summing.
"""
class TestAggregationConsistency:
"""Tests of aggregation consistency between edge, node
and global counts.
"""
paths = PathDefinitionsWeighted().get_column_names()
@pytest.mark.parametrize("path", paths)
def test_edges_to_nodes(self, path, paths_edges_nodes):
"""Check consistency between edge and node counts
of paths and cycles.
"""
E, N = paths_edges_nodes
m0 = N[path].dropna()
m1 = E[path].groupby(level="i").sum() \
.reindex(N.index) \
.fillna(0)
arules = PathDefinitionsWeighted().aggregation.get("nodes", {})
m1 /= arules.get(path, 1)
assert np.allclose(m0, m1)
@pytest.mark.parametrize("path", paths)
def test_edges_to_global(self, path, paths_edges_global):
"""Check consistency between edge and global counts
of paths and cycles.
"""
E, G = paths_edges_global
m0 = G[path].iloc[0]
m1 = E[path].sum()
arules = PathDefinitionsWeighted().aggregation.get("global", {})
m1 /= arules.get(path, 1)
assert m0 == approx(m1)
class TestCountingAgainstOtherImplementations:
"""Test weighted path counting against mean weighted local
clustering coefficient as defined by Barrat et al.
and implemented in :py:mod:`igraph`.
In general, weighted `t`-clustering should be equal to
the method by Barrat et al.
"""
@pytest.mark.parametrize("undefined", ["nan", "zero"])
def test_mean_local_clustering(self, random_graph, undefined):
G, P = random_graph
c0 = G.transitivity_avglocal_undirected(weights="weight", mode=undefined)
c1 = P.tclust(undefined=undefined).mean(skipna=False)
assert np.isnan([c0, c1]).all() or c0 == approx(c1)
class TestConsistencyBounds:
"""Test consistency in terms of bounds between open
and closed paths. In particular, closed paths (e.g. triangles)
cannot be more frequent than their open counterparts.
Moreover, relational coefficients (similarity and complementarity)
must be bounded between their min/max of their corresponding
clustering and closure coefficients.
"""
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_path_counts_consistency(self, random_graph, mode):
_, P = random_graph
C = P.census(mode)
tol = 1e-6
assert (C.values >= 0).all()
assert (C["twc"] <= C["tw"] + tol).all()
assert (C["thc"] <= C["th"] + tol).all()
assert (C["q0wc"] <= C["qw"] + tol).all()
assert (C["q0hc"] <= C["qh"] + tol).all()
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_similarity_coefs_consistency(self, random_graph, mode):
_, P = random_graph
C = P.coefs(mode).dropna()
vals = C.values
assert (vals >= -1e-6).all() and (vals <= 1+1e-6).all()
if mode == "nodes":
m0 = C[["tclust", "tclosure"]].min(axis=1)
m1 = C[["tclust", "tclosure"]].max(axis=1)
assert (C["sim"].between(m0, m1)).all()
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_complementarity_coefs_consistency(self, random_graph, mode):
_, P = random_graph
C = P.coefs(mode).dropna()
vals = C.values
assert (vals >= -1e-6).all() and (vals <= 1+1e-6).all()
if mode == "nodes":
m0 = C[["qclust", "qclosure"]].min(axis=1)
m1 = C[["qclust", "qclosure"]].max(axis=1)
assert (C["comp"].between(m0, m1)).all()
class TestConsistencyWithUnweightedMethods:
"""Test whether weighted counts with uniform weights
are consistent with the unweighted counts etc.
"""
@staticmethod
def to_unweighted(df):
"""Combine weighted counts so they have the same columns
as unweighted counts.
"""
return pd.DataFrame({
"t": (df["twc"] + df["thc"]) / 2,
"tw": df["tw"],
"th": df["th"],
"q0": (df["q0wc"] + df["q0hc"]) / 2,
"qw": df["qw"],
"qh": df["qh"]
})
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_path_counts_consistency(self, graph_weights_one, mode):
"""Test consistency of path counts."""
P0, P1 = graph_weights_one
assert P1.weighted
p0 = P0.census(mode)
p1 = self.to_unweighted(P1.census(mode))
assert np.allclose(p0.values, p1.values)
@pytest.mark.parametrize("mode", ["edges", "nodes", "global"])
def test_coefs_consistency(self, graph_weights_uniform, mode):
"""Test consistency of coefficients."""
P0, P1 = graph_weights_uniform
assert P1.weighted
c0 = P0.coefs(mode, undefined="zero")
c1 = P1.coefs(mode, undefined="zero")
assert np.allclose(c0.values, c1.values)
class TestSimpleMotifs:
"""Test agreement with counts expected for simple motifs
such as triangle, quadrangle and star.
"""
simcoefs = ("sim_g", "sim", "tclust", "tclosure")
compcoefs = ("comp_g", "comp", "qclust", "qclosure")
def approx_in(self, obj, vals, allow_nan=False, **kwds):
"""Auxiliary method for approximate testing if
values in ``objs`` are in ``vals``.
"""
x = obj.values
l = np.zeros_like(x, dtype=bool)
for val in vals:
if allow_nan:
l |= np.isnan(x) | np.isclose(x, val, **kwds)
else:
l |= np.isclose(x, val, **kwds)
return l.all()
def approx_between(self, obj, lo, hi, allow_nan=False, tol=1e-6):
"""Auxiliary method for approximate testing if
valuesin ``obj`` are between ``lo`` and ``hi``.
"""
x = obj.values
l = np.isnan(x) if allow_nan else np.zeros_like(x, dtype=bool)
return (l | (x >= lo-tol) | (x <= hi+tol)).all()
@pytest.mark.parametrize("undefined", ["nan", "zero"])
def test_simple_motifs_global(self, simple_motif, undefined):
"""Check values of global structural coefficients
in simple motifs.
"""
motif, P = simple_motif
kwds = dict(undefined=undefined)
sim = P.simcoefs("global", **kwds)
comp = P.compcoefs("global", **kwds)
if motif == "triangle":
assert self.approx_in(sim, [1])
assert self.approx_in(comp, [0], allow_nan=True)
elif motif == "quadrangle":
assert self.approx_in(sim, [0])
assert self.approx_in(comp, [1])
@pytest.mark.parametrize("undefined", ["nan", "zero"])
def test_simple_motifs_nodes(self, simple_motif, undefined):
"""Check values of node-wise structural coefficients
in simple motifs.
"""
motif, P = simple_motif
kwds = dict(undefined=undefined)
sim = P.simcoefs("nodes", **kwds)
comp = P.compcoefs("nodes", **kwds)
if motif == "triangle":
assert self.approx_in(sim, [1])
assert self.approx_in(comp, [0], allow_nan=True)
elif motif == "quadrangle":
assert self.approx_in(sim, [0])
assert self.approx_in(comp, [1])
@pytest.mark.parametrize("undefined", ["nan", "zero"])
def test_simple_motifs_edges(self, simple_motif, undefined):
"""Check values of edge-wise structural coefficients
in simple motifs.
"""
motif, P = simple_motif
kwds = dict(undefined=undefined)
sim = P.similarity("edges", **kwds)
comp = P.complementarity("edges", **kwds)
if motif == "triangle":
assert self.approx_in(sim, [1])
assert self.approx_in(comp, [0], allow_nan=True)
elif motif == "quadrangle":
assert self.approx_in(sim, [0])
assert self.approx_in(comp, [1])
|
[
"pathcensus.PathCensus",
"pytest.approx",
"numpy.allclose",
"numpy.isclose",
"pandas.DataFrame",
"pathcensus.definitions.PathDefinitionsWeighted",
"pytest.mark.parametrize",
"numpy.isnan",
"pytest.fixture",
"numpy.zeros_like"
] |
[((316, 347), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (330, 347), False, 'import pytest\n'), ((575, 606), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (589, 606), False, 'import pytest\n'), ((746, 777), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (760, 777), False, 'import pytest\n'), ((922, 953), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (936, 953), False, 'import pytest\n'), ((1312, 1343), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1326, 1343), False, 'import pytest\n'), ((1224, 1253), 'pathcensus.PathCensus', 'PathCensus', (['G'], {'weighted': '(False)'}), '(G, weighted=False)\n', (1234, 1253), False, 'from pathcensus import PathCensus\n'), ((1263, 1291), 'pathcensus.PathCensus', 'PathCensus', (['G'], {'weighted': '(True)'}), '(G, weighted=True)\n', (1273, 1291), False, 'from pathcensus import PathCensus\n'), ((1644, 1673), 'pathcensus.PathCensus', 'PathCensus', (['G'], {'weighted': '(False)'}), '(G, weighted=False)\n', (1654, 1673), False, 'from pathcensus import PathCensus\n'), ((1683, 1711), 'pathcensus.PathCensus', 'PathCensus', (['G'], {'weighted': '(True)'}), '(G, weighted=True)\n', (1693, 1711), False, 'from pathcensus import PathCensus\n'), ((2307, 2345), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path"""', 'paths'], {}), "('path', paths)\n", (2330, 2345), False, 'import pytest\n'), ((2871, 2909), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path"""', 'paths'], {}), "('path', paths)\n", (2894, 2909), False, 'import pytest\n'), ((3688, 3741), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""undefined"""', "['nan', 'zero']"], {}), "('undefined', ['nan', 'zero'])\n", (3711, 3741), False, 'import pytest\n'), ((4499, 4560), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['edges', 'nodes', 'global']"], {}), "('mode', ['edges', 'nodes', 'global'])\n", (4522, 4560), False, 'import pytest\n'), ((4980, 5041), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['edges', 'nodes', 'global']"], {}), "('mode', ['edges', 'nodes', 'global'])\n", (5003, 5041), False, 'import pytest\n'), ((5498, 5559), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['edges', 'nodes', 'global']"], {}), "('mode', ['edges', 'nodes', 'global'])\n", (5521, 5559), False, 'import pytest\n'), ((6651, 6712), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['edges', 'nodes', 'global']"], {}), "('mode', ['edges', 'nodes', 'global'])\n", (6674, 6712), False, 'import pytest\n'), ((7056, 7117), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['edges', 'nodes', 'global']"], {}), "('mode', ['edges', 'nodes', 'global'])\n", (7079, 7117), False, 'import pytest\n'), ((8611, 8664), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""undefined"""', "['nan', 'zero']"], {}), "('undefined', ['nan', 'zero'])\n", (8634, 8664), False, 'import pytest\n'), ((9320, 9373), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""undefined"""', "['nan', 'zero']"], {}), "('undefined', ['nan', 'zero'])\n", (9343, 9373), False, 'import pytest\n'), ((10029, 10082), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""undefined"""', "['nan', 'zero']"], {}), "('undefined', ['nan', 'zero'])\n", (10052, 10082), False, 'import pytest\n'), ((2841, 2860), 'numpy.allclose', 'np.allclose', (['m0', 'm1'], {}), '(m0, m1)\n', (2852, 2860), True, 'import numpy as np\n'), ((6381, 6540), 'pandas.DataFrame', 'pd.DataFrame', (["{'t': (df['twc'] + df['thc']) / 2, 'tw': df['tw'], 'th': df['th'], 'q0': (\n df['q0wc'] + df['q0hc']) / 2, 'qw': df['qw'], 'qh': df['qh']}"], {}), "({'t': (df['twc'] + df['thc']) / 2, 'tw': df['tw'], 'th': df[\n 'th'], 'q0': (df['q0wc'] + df['q0hc']) / 2, 'qw': df['qw'], 'qh': df['qh']}\n )\n", (6393, 6540), True, 'import pandas as pd\n'), ((7012, 7045), 'numpy.allclose', 'np.allclose', (['p0.values', 'p1.values'], {}), '(p0.values, p1.values)\n', (7023, 7045), True, 'import numpy as np\n'), ((7434, 7467), 'numpy.allclose', 'np.allclose', (['c0.values', 'c1.values'], {}), '(c0.values, c1.values)\n', (7445, 7467), True, 'import numpy as np\n'), ((7973, 8001), 'numpy.zeros_like', 'np.zeros_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (7986, 8001), True, 'import numpy as np\n'), ((2252, 2277), 'pathcensus.definitions.PathDefinitionsWeighted', 'PathDefinitionsWeighted', ([], {}), '()\n', (2275, 2277), False, 'from pathcensus.definitions import PathDefinitionsWeighted\n'), ((3332, 3342), 'pytest.approx', 'approx', (['m1'], {}), '(m1)\n', (3338, 3342), False, 'from pytest import approx\n'), ((8481, 8492), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (8489, 8492), True, 'import numpy as np\n'), ((8511, 8539), 'numpy.zeros_like', 'np.zeros_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (8524, 8539), True, 'import numpy as np\n'), ((4050, 4060), 'pytest.approx', 'approx', (['c1'], {}), '(c1)\n', (4056, 4060), False, 'from pytest import approx\n'), ((8174, 8200), 'numpy.isclose', 'np.isclose', (['x', 'val'], {}), '(x, val, **kwds)\n', (8184, 8200), True, 'import numpy as np\n'), ((2729, 2754), 'pathcensus.definitions.PathDefinitionsWeighted', 'PathDefinitionsWeighted', ([], {}), '()\n', (2752, 2754), False, 'from pathcensus.definitions import PathDefinitionsWeighted\n'), ((3213, 3238), 'pathcensus.definitions.PathDefinitionsWeighted', 'PathDefinitionsWeighted', ([], {}), '()\n', (3236, 3238), False, 'from pathcensus.definitions import PathDefinitionsWeighted\n'), ((4016, 4034), 'numpy.isnan', 'np.isnan', (['[c0, c1]'], {}), '([c0, c1])\n', (4024, 4034), True, 'import numpy as np\n'), ((8086, 8097), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (8094, 8097), True, 'import numpy as np\n'), ((8100, 8126), 'numpy.isclose', 'np.isclose', (['x', 'val'], {}), '(x, val, **kwds)\n', (8110, 8126), True, 'import numpy as np\n')]
|
""" Custom Fairness Metrics
Note that ratio and difference computation is handled by AIF360's
sklearn.metrics module. As of the V 0.4.0 release, these are calculated as
[unprivileged/privileged] and [unprivileged - privileged], respectively
"""
from typing import Callable
from aif360.sklearn.metrics import difference, ratio
import numpy as np
import pandas as pd
from warnings import catch_warnings, filterwarnings
from .performance_metrics import (
false_positive_rate,
true_positive_rate,
true_negative_rate,
false_negative_rate,
precision,
)
def __manage_undefined_ratios(func: Callable):
""" Wraps ratio functions to return NaN values instead of 0.0 in cases
where the ratio is undefined
"""
def wrapper(*args, **kwargs):
funcname = getattr(func, "__name__", "an unknown function")
msg = (
"The ratio is ill-defined and being set to 0.0 because"
+ f" '{funcname}' for privileged samples is 0."
)
with catch_warnings(record=True) as w:
filterwarnings("ignore", message=msg)
res = func(*args, **kwargs)
if len(w) > 0:
return np.nan
else:
return res
return wrapper
@__manage_undefined_ratios
def ppv_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of Postive Predictive Values
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(precision, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp)
@__manage_undefined_ratios
def tpr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of True Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
true_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def fpr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of False Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
false_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def tnr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of True Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
true_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
@__manage_undefined_ratios
def fnr_ratio(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group ratio of False Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return ratio(
false_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def ppv_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of Positive Predictive Values
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(precision, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp)
def tpr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of True Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
true_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def fpr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of False Positive Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
false_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def tnr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of True Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
true_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
def fnr_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the between-group difference of False Negative Rates
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
return difference(
false_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp
)
""" Combined Metrics """
def eq_odds_diff(y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1):
""" Returns the greatest discrepancy between the between-group FPR
difference and the between-group TPR difference
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
prtc_attr (str): name of the protected attribute
priv_grp (int, optional): . Defaults to 1.
Returns:
Number
"""
fprD = fpr_diff(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
tprD = tpr_diff(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
if abs(fprD) > abs(tprD):
return fprD
else:
return tprD
def eq_odds_ratio(
y_true: pd.Series, y_pred: pd.Series, pa_name: str, priv_grp: int = 1
):
""" Returns the greatest discrepancy between the between-group FPR
ratio and the between-group TPR ratio
Args:
y_true (pd.Series): true target values
y_pred (pd.Series): predicted target values
priv_grp (int, optional): . Defaults to 1.
"""
fprR = fpr_ratio(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
tprR = tpr_ratio(y_true, y_pred, pa_name=pa_name, priv_grp=priv_grp)
if np.isnan(fprR) or np.isnan(tprR):
return np.nan
elif round(abs(fprR - 1), 6) > round(abs(tprR - 1), 6):
return fprR
else:
return tprR
|
[
"aif360.sklearn.metrics.ratio",
"aif360.sklearn.metrics.difference",
"warnings.catch_warnings",
"numpy.isnan",
"warnings.filterwarnings"
] |
[((1702, 1774), 'aif360.sklearn.metrics.ratio', 'ratio', (['precision', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(precision, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp)\n', (1707, 1774), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((2221, 2307), 'aif360.sklearn.metrics.ratio', 'ratio', (['true_positive_rate', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(true_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=\n priv_grp)\n', (2226, 2307), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((2764, 2851), 'aif360.sklearn.metrics.ratio', 'ratio', (['false_positive_rate', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(false_positive_rate, y_true, y_pred, prot_attr=pa_name, priv_group=\n priv_grp)\n', (2769, 2851), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((3307, 3393), 'aif360.sklearn.metrics.ratio', 'ratio', (['true_negative_rate', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(true_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=\n priv_grp)\n', (3312, 3393), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((3850, 3937), 'aif360.sklearn.metrics.ratio', 'ratio', (['false_negative_rate', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(false_negative_rate, y_true, y_pred, prot_attr=pa_name, priv_group=\n priv_grp)\n', (3855, 3937), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((4377, 4454), 'aif360.sklearn.metrics.difference', 'difference', (['precision', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(precision, y_true, y_pred, prot_attr=pa_name, priv_group=priv_grp)\n', (4387, 4454), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((4878, 4968), 'aif360.sklearn.metrics.difference', 'difference', (['true_positive_rate', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(true_positive_rate, y_true, y_pred, prot_attr=pa_name,\n priv_group=priv_grp)\n', (4888, 4968), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((5403, 5494), 'aif360.sklearn.metrics.difference', 'difference', (['false_positive_rate', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(false_positive_rate, y_true, y_pred, prot_attr=pa_name,\n priv_group=priv_grp)\n', (5413, 5494), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((5928, 6018), 'aif360.sklearn.metrics.difference', 'difference', (['true_negative_rate', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(true_negative_rate, y_true, y_pred, prot_attr=pa_name,\n priv_group=priv_grp)\n', (5938, 6018), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((6453, 6544), 'aif360.sklearn.metrics.difference', 'difference', (['false_negative_rate', 'y_true', 'y_pred'], {'prot_attr': 'pa_name', 'priv_group': 'priv_grp'}), '(false_negative_rate, y_true, y_pred, prot_attr=pa_name,\n priv_group=priv_grp)\n', (6463, 6544), False, 'from aif360.sklearn.metrics import difference, ratio\n'), ((7819, 7833), 'numpy.isnan', 'np.isnan', (['fprR'], {}), '(fprR)\n', (7827, 7833), True, 'import numpy as np\n'), ((7837, 7851), 'numpy.isnan', 'np.isnan', (['tprR'], {}), '(tprR)\n', (7845, 7851), True, 'import numpy as np\n'), ((1020, 1047), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1034, 1047), False, 'from warnings import catch_warnings, filterwarnings\n'), ((1066, 1103), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'message': 'msg'}), "('ignore', message=msg)\n", (1080, 1103), False, 'from warnings import catch_warnings, filterwarnings\n')]
|
#***************************************************#
# This file is part of PFNET. #
# #
# Copyright (c) 2015, <NAME>. #
# #
# PFNET is released under the BSD 2-clause license. #
#***************************************************#
# Optimization Problems - Constraints
import sys
sys.path.append('.')
import pfnet
net = pfnet.Parser(sys.argv[1]).parse(sys.argv[1])
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
print(net.num_vars == 2*net.num_buses)
constr = pfnet.Constraint('AC power balance',net)
print(constr.name == 'AC power balance')
x = net.get_var_values()
constr.analyze()
print(constr.num_extra_vars)
constr.eval(x + 0.01)
constr.eval(x)
import numpy as np
f = constr.f
print(type(f), f.shape)
print(np.linalg.norm(f,np.inf))
bus = net.get_bus(5)
Hi = constr.get_H_single(bus.dP_index)
print(type(Hi), Hi.shape, Hi.nnz)
coefficients = np.random.randn(f.size)
constr.combine_H(coefficients)
H = constr.H_combined
print(type(H), H.shape, H.nnz)
|
[
"pfnet.Parser",
"pfnet.Constraint",
"numpy.linalg.norm",
"sys.path.append",
"numpy.random.randn"
] |
[((413, 433), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (428, 433), False, 'import sys\n'), ((671, 712), 'pfnet.Constraint', 'pfnet.Constraint', (['"""AC power balance"""', 'net'], {}), "('AC power balance', net)\n", (687, 712), False, 'import pfnet\n'), ((1071, 1094), 'numpy.random.randn', 'np.random.randn', (['f.size'], {}), '(f.size)\n', (1086, 1094), True, 'import numpy as np\n'), ((932, 957), 'numpy.linalg.norm', 'np.linalg.norm', (['f', 'np.inf'], {}), '(f, np.inf)\n', (946, 957), True, 'import numpy as np\n'), ((454, 479), 'pfnet.Parser', 'pfnet.Parser', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (466, 479), False, 'import pfnet\n')]
|
import os
import pytest
import pandas as pd
import numpy as np
from shclassify.utils import (inverse_logit,
choose_from_multinomial_probs,
choose_from_binary_probs)
def test_inverse_logit():
assert inverse_logit(0) == 0.5
def test_choose_from_multinomial_probs():
n_obs = 3
classes = ['a', 'b', 'c']
df = pd.DataFrame(
np.random.uniform(size=(n_obs,len(classes))), columns=classes
)
classes = choose_from_multinomial_probs(df)
assert type(classes) is pd.DataFrame
assert classes.shape == (n_obs, 1)
assert classes.columns == ['class']
def in_classes(x, classes=classes):
x in classes
assert classes['class'].apply(in_classes).all()
def test_choose_from_multinomial_probs_with_bad_input():
n_obs = 3
classes = ['a']
df = pd.DataFrame(
np.random.uniform(size=(n_obs,len(classes))), columns=classes
)
with pytest.raises(ValueError) as e:
choose_from_multinomial_probs(df)
assert 'Data frame must have more than 1 column' in str(e.value)
def test_choose_from_binary_probs():
n_obs = 3
df = pd.DataFrame(
np.random.uniform(size=(n_obs,1))
)
classes = choose_from_binary_probs(df, 'true', 'false')
assert type(classes) is pd.DataFrame
assert classes.shape == (n_obs, 1)
assert classes.applymap(lambda x: x in ['true', 'false']).all()[0]
assert classes.columns == ['class']
def test_choose_from_binary_probs_with_bad_shape():
n_obs = 3
classes = ['a', 'b']
df = pd.DataFrame(
np.random.uniform(size=(n_obs,len(classes))), columns=classes
)
with pytest.raises(ValueError) as e:
choose_from_binary_probs(df, 'true', 'false')
assert 'Data frame must have 1 column' == str(e.value)
def test_choose_from_binary_probs_with_bad_args():
n_obs = 3
df = pd.DataFrame(
np.random.uniform(size=(n_obs,1))
)
with pytest.raises(ValueError) as e:
classes = choose_from_binary_probs(df, 'true', 'true')
assert 'Class names for true and false results must differ' == str(e.value)
with pytest.raises(ValueError) as e:
classes = choose_from_binary_probs(df, 'true', 'false', threshold=50)
assert 'Threshold must be between 0 and 1' == str(e.value)
|
[
"shclassify.utils.choose_from_multinomial_probs",
"shclassify.utils.inverse_logit",
"pytest.raises",
"numpy.random.uniform",
"shclassify.utils.choose_from_binary_probs"
] |
[((489, 522), 'shclassify.utils.choose_from_multinomial_probs', 'choose_from_multinomial_probs', (['df'], {}), '(df)\n', (518, 522), False, 'from shclassify.utils import inverse_logit, choose_from_multinomial_probs, choose_from_binary_probs\n'), ((1242, 1287), 'shclassify.utils.choose_from_binary_probs', 'choose_from_binary_probs', (['df', '"""true"""', '"""false"""'], {}), "(df, 'true', 'false')\n", (1266, 1287), False, 'from shclassify.utils import inverse_logit, choose_from_multinomial_probs, choose_from_binary_probs\n'), ((264, 280), 'shclassify.utils.inverse_logit', 'inverse_logit', (['(0)'], {}), '(0)\n', (277, 280), False, 'from shclassify.utils import inverse_logit, choose_from_multinomial_probs, choose_from_binary_probs\n'), ((960, 985), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (973, 985), False, 'import pytest\n'), ((1000, 1033), 'shclassify.utils.choose_from_multinomial_probs', 'choose_from_multinomial_probs', (['df'], {}), '(df)\n', (1029, 1033), False, 'from shclassify.utils import inverse_logit, choose_from_multinomial_probs, choose_from_binary_probs\n'), ((1187, 1221), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n_obs, 1)'}), '(size=(n_obs, 1))\n', (1204, 1221), True, 'import numpy as np\n'), ((1681, 1706), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1694, 1706), False, 'import pytest\n'), ((1721, 1766), 'shclassify.utils.choose_from_binary_probs', 'choose_from_binary_probs', (['df', '"""true"""', '"""false"""'], {}), "(df, 'true', 'false')\n", (1745, 1766), False, 'from shclassify.utils import inverse_logit, choose_from_multinomial_probs, choose_from_binary_probs\n'), ((1924, 1958), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n_obs, 1)'}), '(size=(n_obs, 1))\n', (1941, 1958), True, 'import numpy as np\n'), ((1974, 1999), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1987, 1999), False, 'import pytest\n'), ((2024, 2068), 'shclassify.utils.choose_from_binary_probs', 'choose_from_binary_probs', (['df', '"""true"""', '"""true"""'], {}), "(df, 'true', 'true')\n", (2048, 2068), False, 'from shclassify.utils import inverse_logit, choose_from_multinomial_probs, choose_from_binary_probs\n'), ((2160, 2185), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2173, 2185), False, 'import pytest\n'), ((2210, 2269), 'shclassify.utils.choose_from_binary_probs', 'choose_from_binary_probs', (['df', '"""true"""', '"""false"""'], {'threshold': '(50)'}), "(df, 'true', 'false', threshold=50)\n", (2234, 2269), False, 'from shclassify.utils import inverse_logit, choose_from_multinomial_probs, choose_from_binary_probs\n')]
|
import numpy as np
from scipy import interpolate, signal
from scipy.special import gamma
import ndmath
import warnings
import pkg_resources
class PlaningBoat():
"""Prismatic planing craft
Attributes:
speed (float): Speed (m/s). It is an input to :class:`PlaningBoat`.
weight (float): Weight (N). It is an input to :class:`PlaningBoat`.
beam (float): Beam (m). It is an input to :class:`PlaningBoat`.
lcg (float): Longitudinal center of gravity, measured from the stern (m). It is an input to :class:`PlaningBoat`.
vcg (float): Vertical center of gravity, measured from the keel (m). It is an input to :class:`PlaningBoat`.
r_g (float): Radius of gyration (m). It is an input to :class:`PlaningBoat`.
beta (float): Deadrise (deg). It is an input to :class:`PlaningBoat`.
epsilon (float): Thrust angle w.r.t. keel, CCW with body-fixed origin at 9 o'clock (deg). It is an input to :class:`PlaningBoat`.
vT (float): Thrust vertical distance, measured from keel, and positive up (m). It is an input to :class:`PlaningBoat`.
lT (float): Thrust horizontal distance, measured from stern, and positive forward (m). It is an input to :class:`PlaningBoat`.
length (float): Vessel LOA for seaway behavior estimates (m). Defaults to None. It is an input to :class:`PlaningBoat`.
H_sig (float): Significant wave heigth in an irregular sea state (m). Defaults to None. It is an input to :class:`PlaningBoat`.
ahr (float): Average hull roughness (m). Defaults to 150*10**-6. It is an input to :class:`PlaningBoat`.
Lf (float): Flap chord (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
sigma (float): Flap span-beam ratio (dimensionless). Defaults to 0. It is an input to :class:`PlaningBoat`.
delta (float): Flap deflection (deg). Defaults to 0. It is an input to :class:`PlaningBoat`.
l_air (float): Distance from stern to center of air pressure (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
h_air (float): Height from keel to top of square which bounds the air-drag-inducing area (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
b_air (float): Transverse width of square which bounds the air-drag-inducing area (m). Defaults to 0. It is an input to :class:`PlaningBoat`.
C_shape (float): Area coefficient for air-drag-inducing area (dimensionless). C_shape = 1 means the air drag reference area is h_air*b_air. Defaults to 0. It is an input to :class:`PlaningBoat`.
C_D (float): Air drag coefficient (dimensionless). Defaults to 0.7. It is an input to :class:`PlaningBoat`.
rho (float): Water density (kg/m^3). Defaults to 1025.87. It is an input to :class:`PlaningBoat`.
nu (float): Water kinematic viscosity (m^2/s). Defaults to 1.19*10**-6. It is an input to :class:`PlaningBoat`.
rho_air (float): Air density (kg/m^3). Defaults to 1.225. It is an input to :class:`PlaningBoat`.
g (float): Gravitational acceleration (m/s^2). Defaults to 9.8066. It is an input to :class:`PlaningBoat`.
z_wl (float): Vertical distance of center of gravity to the calm water line (m). Defaults to 0. It is an input to :class:`PlaningBoat`, but modified when running :meth:`get_steady_trim`.
tau (float): Trim angle (deg). Defaults to 5. It is an input to :class:`PlaningBoat`, but modified when running :meth:`get_steady_trim`.
eta_3 (float): Additional heave (m). Initiates to 0.
eta_5 (float): Additional trim (deg). Initiates to zero.
wetted_lengths_type (int): 1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky's '64 approach, 3 = Use Savitsky's '76 approach. Defaults to 1. It is an input to :class:`PlaningBoat`.
z_max_type (int): 1 = Uses 3rd order polynomial fit, 2 = Uses cubic interpolation from table. This is only used if wetted_lenghts_type == 1. Defaults to 1. It is an input to :class:`PlaningBoat`.
L_K (float): Keel wetted length (m). It is updated when running :meth:`get_geo_lengths`.
L_C (float): Chine wetted length (m). It is updated when running :meth:`get_geo_lengths`.
lambda_W (float): Mean wetted-length to beam ratio, (L_K+L_C)/(2*beam) (dimensionless). It is updated when running :meth:`get_geo_lengths`.
x_s (float): Distance from keel/water-line intersection to start of wetted chine (m). It is updated when running :meth:`get_geo_lengths`.
z_max (float): Maximum pressure coordinate coefficient, z_max/Ut (dimensionless). It is updated when running :meth:`get_geo_lengths`.
hydrodynamic_force ((3,) ndarray): Hydrodynamic force (N, N, N*m). [F_x, F_z, M_cg] with x, y, rot directions in intertial coordinates. It is updated when running :meth:`get_forces`.
skin_friction ((3,) ndarray): Skin friction force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
air_resistance ((3,) ndarray): Air resistance force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
flap_force ((3,) ndarray): Flap resultant force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
thrust_force ((3,) ndarray): Thrust resultant force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
net_force ((3,) ndarray): Net force (N, N, N*m). [F_x, F_z, M_cg]. It is updated when running :meth:`get_forces`.
mass_matrix ((2, 2) ndarray): Mass coefficients matrix. [[A_33 (kg), A_35 (kg*m/rad)], [A_53 (kg*m), A_55 (kg*m^2/rad)]]. It is updated when running :meth:`get_eom_matrices`.
damping_matrix ((2, 2) ndarray): Damping coefficients matrix. [[B_33 (kg/s), B_35 (kg*m/(s*rad))], [B_53 (kg*m/s), B_55 (kg*m**2/(s*rad))]]. It is updated when running :meth:`get_eom_matrices`.
restoring_matrix ((2, 2) ndarray): Restoring coefficients matrix. [[C_33 (N/m), C_35 (N/rad)], [C_53 (N), C_55 (N*m/rad)]]. It is updated when running :meth:`get_eom_matrices`.
porpoising (list): [[eigenvalue result (bool), est. pitch settling time (s)], [Savitsky chart result (bool), critical trim angle (deg)]]. It is updated when running :meth:`check_porpoising`.
seaway_drag_type (int): 1 = Use Savitsky's '76 approximation, 2 = Use Fridsma's '71 designs charts. Defaults to 1. It is an input to :class:`PlaningBoat`.
avg_impact_acc ((2,) ndarray): Average impact acceleration at center of gravity and bow (g's). [n_cg, n_bow]. It is updated when running :meth:`get_seaway_behavior`.
R_AW (float): Added resistance in waves (N). It is updated when running :meth:`get_seaway_behavior`.
"""
def __init__(self, speed, weight, beam, lcg, vcg, r_g, beta, epsilon, vT, lT, length=None, H_sig=None, ahr=150e-6, Lf=0, sigma=0, delta=0, l_air=0, h_air=0, b_air=0, C_shape=0, C_D=0.7, z_wl=0, tau=5, rho=1025.87, nu=1.19e-6, rho_air=1.225, g=9.8066, wetted_lengths_type=1, z_max_type=1, seaway_drag_type=1):
"""Initialize attributes for PlaningBoat
Args:
speed (float): Speed (m/s).
weight (float): Weidght (N).
beam (float): Beam (m).
lcg (float): Longitudinal center of gravity, measured from the stern (m).
vcg (float): Vertical center of gravity, measured from the keel (m).
r_g (float): Radius of gyration (m).
beta (float): Deadrise (deg).
epsilon (float): Thrust angle w.r.t. keel, CCW with body-fixed origin at 9 o'clock (deg).
vT (float): Thrust vertical distance, measured from keel, and positive up (m).
lT (float): Thrust horizontal distance, measured from stern, and positive forward (m).
length (float, optional): Vessel LOA for seaway behavior estimates (m). Defaults to None.
H_sig (float, optional): Significant wave heigth in an irregular sea state (m). Defaults to None.
ahr (float, optional): Average hull roughness (m). Defaults to 150*10**-6.
Lf (float, optional): Flap chord (m). Defaults to 0.
sigma (float, optional): Flap span-beam ratio (dimensionless). Defaults to 0.
delta (float, optional): Flap deflection (deg). Defaults to 0.
l_air (float, optional): Distance from stern to center of air pressure (m). Defaults to 0.
h_air (float, optional): Height from keel to top of square which bounds the air-drag-inducing area (m). Defaults to 0.
b_air (float, optional): Transverse width of square which bounds the air-drag-inducing area (m). Defaults to 0.
C_shape (float, optional): Area coefficient for air-drag-inducing area (dimensionless). C_shape = 1 means the air drag reference area is h_air*b_air. Defaults to 0.
C_D (float, optional): Air drag coefficient (dimensionless). Defaults to 0.7.
z_wl (float, optional): Vertical distance of center of gravity to the calm water line (m). Defaults to 0.
tau (float, optional): Trim angle (deg). Defaults to 5.
rho (float, optional): Water density (kg/m^3). Defaults to 1025.87.
nu (float, optional): Water kinematic viscosity (m^2/s). Defaults to 1.19*10**-6.
rho_air (float, optional): Air density (kg/m^3). Defaults to 1.225.
g (float, optional): Gravitational acceleration (m/s^2). Defaults to 9.8066.
wetted_lengths_type (int, optional): 1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky's '64 approach, 3 = Use Savitsky's '76 approach. Defaults to 1.
z_max_type (int, optional): 1 = Uses 3rd order polynomial fit, 2 = Uses cubic interpolation from table. This is only used if wetted_lenghts_type == 1. Defaults to 1.
seaway_drag_type (int, optional): 1 = Use Savitsky's '76 approximation, 2 = Use Fridsma's '71 designs charts. Defaults to 1.
"""
self.speed = speed
self.weight = weight
self.beam = beam
self.lcg = lcg
self.vcg = vcg
self.r_g = r_g
self.beta = beta
self.epsilon = epsilon
self.vT = vT
self.lT = lT
self.length = length
self.H_sig = H_sig
self.ahr = ahr
self.Lf = Lf
self.sigma = sigma
self.delta = delta
self.l_air = l_air
self.h_air = h_air
self.b_air= b_air
self.C_shape = C_shape
self.z_wl = z_wl
self.tau = tau
self.eta_3 = 0
self.eta_5 = 0
self.rho = rho
self.nu = nu
self.rho_air = rho_air
self.C_D = C_D
self.g = g
self.gravity_force = np.array([0, -self.weight, 0])
self.wetted_lengths_type = wetted_lengths_type
self.z_max_type = z_max_type
self.seaway_drag_type = seaway_drag_type
def print_description(self, sigFigs=7, runAllFunctions=True):
"""Returns a formatted description of the vessel.
Args:
sigFigs (int, optional): Number of significant figures to display. Defaults to 7.
runAllFunctions (bool, optional): Runs all functions with default values before printing results. Defaults to True.
"""
if runAllFunctions:
self.get_geo_lengths()
self.get_forces(runGeoLengths=False)
self.get_eom_matrices(runGeoLengths=False)
self.get_seaway_behavior()
self.check_porpoising()
volume = self.weight/(self.g*self.rho)
table = [
['---VESSEL---'],
['Speed', self.speed, 'm/s'],
['V_k', self.speed*1.944, 'knot'],
['Fn (beam)', self.speed/np.sqrt(self.g*self.beam), ''],
['Fn (volume)', self.speed/np.sqrt(self.g*(self.weight/(self.g*self.rho))**(1/3)), ''],
[''],
['Weight', self.weight, 'N'],
['Mass', self.weight/self.g, 'kg'],
['Volume', self.weight/(self.g*self.rho), 'm\u00B3'],
['Beam', self.beam, 'm'],
['LCG', self.lcg, 'm from stern'],
['VCG', self.vcg, 'm from keel'],
['R_g', self.r_g, 'm'],
['Deadrise', self.beta, 'deg'], #'\N{greek small letter beta}'
[''],
['LOA', self.length, 'm'],
['AHR', self.ahr, 'm, average hull roughness'],
[''],
['---ATTITUDE---'],
['z_wl', self.z_wl, 'm, vertical distance of center of gravity to the calm water line'],
['tau', self.tau, 'deg, trim angle'],
['\u03B7\u2083', self.eta_3, 'deg, additional heave'],
['\u03B7\u2085', self.eta_5, 'deg, additional trim'],
['Transom draft', self.L_K*np.sin((self.tau+self.eta_5)*np.pi/180), 'm, draft of keel at transom'],
[''],
['---PROPULSION---'],
['Thrust angle', self.epsilon, 'deg w.r.t. keel (CCW with body-fixed origin at 9 o\'clock)'],
['LCT', self.lT, 'm from stern, positive forward'],
['VCT', self.vT, 'm from keel, positive up'],
[''],
['---FLAP---'],
['Chord', self.Lf, 'm'],
['Span/Beam', self.sigma, ''],
['Angle', self.delta, 'deg w.r.t. keel (CCW with body-fixed origin at 9 o\'clock)'],
[''],
['---AIR DRAG---'],
['l_air', self.l_air, 'm, distance from stern to center of air pressure'],
['h_air', self.h_air, 'm, height from keel to top of square which bounds the air-drag-inducing shape'],
['b_air', self.b_air, 'm, transverse width of square which bounds the air-drag-inducing shape'],
['C_shape', self.C_shape, 'area coefficient for air-drag-inducing shape. C_shape = 1 means the air drag reference area is h_air*b_air'],
['C_D', self.C_D, 'air drag coefficient'],
[''],
['---ENVIRONMENT---'],
['\u03C1', self.rho, 'kg/m\u00B3, water density'],
['\u03BD', self.nu, 'm\u00B2/s, water kinematic viscosity'],
['\u03C1_air', self.rho_air, 'kg/m\u00B3, air density'],
['g', self.g, 'm/s\u00B2, gravitational acceleration'],
[''],
['---WETTED LENGTH OPTIONS---'],
['wetted_lengths_type', self.wetted_lengths_type, '(1 = Use Faltinsen 2005 wave rise approximation, 2 = Use Savitsky\'s \'64 approach, 3 = Use Savitsky\'s \'76 approach)'],
['z_max_type', self.z_max_type, '(1 = Uses 3rd order polynomial fit (faster, recommended), 2 = Use cubic interpolation)'],
[''],
['---WETTED LENGTHS---'],
['L_K', self.L_K, 'm, keel wetted length'],
['L_C', self.L_C, 'm, chine wetted length'],
['\u03BB', self.lambda_W, 'mean wetted-length to beam ratio (L_K+L_C)/(2*beam)'],
['x_s', self.x_s, 'm, distance from keel/water-line intersection to start of wetted chine'],
['z_max', self.z_max, 'maximum pressure coordinate coefficient (z_max/Ut)'],
[''],
['---FORCES [F_x (N, +aft), F_z (N, +up), M_cg (N*m, +pitch up)]---'],
['Hydrodynamic Force', self.hydrodynamic_force, ''],
['Skin Friction', self.skin_friction, ''],
['Air Resistance', self.air_resistance, ''],
['Flap Force', self.flap_force, ''],
['Net Force', self.net_force, ''],
['Resultant Thrust', self.thrust_force, ''],
[''],
['---THURST & POWER---'],
['Thrust Magnitude', np.sqrt(self.thrust_force[0]**2+self.thrust_force[1]**2), 'N'],
['Effective Thrust', -self.thrust_force[0], 'N'],
['Eff. Power', -self.thrust_force[0]*self.speed/1000, 'kW'],
['Eff. Horsepower', -self.thrust_force[0]*self.speed/1000/0.7457, 'hp'],
[''],
['---EOM MATRICES---'],
['Mass matrix, [kg, kg*m/rad; kg*m, kg*m\u00B2/rad]', self.mass_matrix, ''],
['Damping matrix, [kg/s, kg*m/(s*rad); kg*m/s, kg*m\u00B2/(s*rad)]', self.damping_matrix, ''],
['Restoring matrix, [N/m, N/rad; N, N*m/rad]', self.restoring_matrix, ''],
[''],
['---PORPOISING---'],
['[[Eigenvalue check result, Est. pitch settling time (s)],\n [Savitsky chart result, Critical trim angle (deg)]]', np.array(self.porpoising), ''],
[''],
['---BEHAVIOR IN WAVES---'],
['H_sig', self.H_sig, 'm, significant wave heigth'],
['R_AW', self.R_AW, 'N, added resistance in waves'],
['Average impact acceleration [n_cg, n_bow] (g\'s)', self.avg_impact_acc, ''],
]
cLens=[16,0,0] #Min spacing for columns
for row in table:
if len(row)==3:
if row[1] is None:
print('{desc:<{cL0}} {val:<{cL1}} {unit:<{cL2}}'.format(desc=row[0], val=row[1], unit='None', cL0='', cL1=cLens[1], cL2=cLens[2]))
elif isinstance(row[1], (list,np.ndarray)):
print(row[0]+' =')
with np.printoptions(formatter={'float': f'{{:.{sigFigs}g}}'.format}):
print(row[1])
print(row[2])
else:
print('{desc:<{cL0}} {val:<{cL1}.{sNum}g} {unit:<{cL2}}'.format(desc=row[0], val=row[1], unit=row[2], cL0=cLens[0], cL1=cLens[1], cL2=cLens[2], sNum=sigFigs))
else:
print(row[0])
def get_geo_lengths(self):
"""This function outputs the geometric lengths.
Adds/updates the following attributes:
- :attr:`L_K`
- :attr:`L_C`
- :attr:`lambda_W`
- :attr:`x_s`
- :attr:`z_max`
"""
b = self.beam
lcg = self.lcg
vcg = self.vcg
z_wl = self.z_wl
tau = self.tau
beta = self.beta
eta_3 = self.eta_3
eta_5 = self.eta_5
pi = np.pi
wetted_lengths_type = self.wetted_lengths_type
z_max_type = self.z_max_type
#Keel wetted length, Eq. 9.50 of Faltinsen 2005, page 367
L_K = lcg + vcg / np.tan(pi/180*(tau + eta_5)) - (z_wl + eta_3) / np.sin(pi/180*(tau + eta_5))
if L_K < 0:
L_K = 0
if wetted_lengths_type == 1:
#z_max/Vt coefficient, Table 8.3 of Faltinsen 2005, page 303---------------
beta_table = [4, 7.5, 10, 15, 20, 25, 30, 40]
z_max_table = [0.5695, 0.5623, 0.5556, 0.5361, 0.5087, 0.4709, 0.4243, 0.2866]
#Extrapolation warning
if beta < beta_table[0] or beta > beta_table[-1]:
warnings.warn('Deadrise ({0:.3f}) outside the interpolation range of 4-40 deg (Table 8.3 of Faltinsen 2005). Extrapolated values might be inaccurate.'.format(beta), stacklevel=2)
if z_max_type == 1:
z_max = np.polyval([-2.100644618790201e-006, -6.815747611588763e-005, -1.130563334939335e-003, 5.754510457848798e-001], beta)
elif z_max_type == 2:
z_max_func = interpolate.interp1d(beta_table, z_max_table, kind='cubic', fill_value='extrapolate') #Interpolation of the table
z_max = z_max_func(beta)
#--------------------------------------------------------------------------
#Distance from keel/water-line intersection to start of wetted chine (Eq. 9.10 of Faltinsen)
x_s = 0.5 * b * np.tan(pi/180*beta) / ((1 + z_max) * (pi/180)*(tau + eta_5))
if x_s < 0:
x_s = 0
#Chine wetted length, Eq. 9.51 of Faltinsen 2005
L_C = L_K - x_s
if L_C < 0:
L_C = 0
x_s = L_K
warnings.warn('Vessel operating with dry chines (L_C = 0).', stacklevel=2)
#Mean wetted length-to-beam ratio
lambda_W = (L_K + L_C) / (2 * b)
elif wetted_lengths_type == 2:
#Eq. 3 of Savitsky '64
x_s = b/pi*np.tan(pi/180*beta)/np.tan(pi/180*(tau + eta_5))
#Chine wetted length
L_C = L_K - x_s
if L_C < 0:
L_C = 0
x_s = L_K
warnings.warn('Vessel operating with dry chines (L_C = 0).', stacklevel=2)
#Mean wetted length-to-beam ratio
lambda_W = (L_K + L_C)/(2*b)
#z_max/Vt coefficient (E. 9.10 of Faltinsen 2005 rearranged)
z_max = 0.5 * b * np.tan(pi/180*beta) / (x_s * (pi/180)*(tau + eta_5)) - 1
elif wetted_lengths_type == 3:
#Eq. 12 of Savitsky '76
w = (0.57 + beta/1000)*(np.tan(pi/180*beta)/(2*np.tan(pi/180*(tau+eta_5)))-beta/167)
lambda_K = L_K/b
#Eq. 14 of Savitsky '76
lambda_C = (lambda_K-w)-0.2*np.exp(-(lambda_K-w)/0.3)
if lambda_C < 0:
lambda_C = 0
L_C = lambda_C*b
#Mean wetted length-to-beam ratio, Eq. 15 of Savitsky '76
lambda_W = (lambda_K + lambda_C)/2+0.03
x_s = L_K-L_C
#z_max/Vt coefficient (E. 9.10 of Faltinsen 2005 rearranged)
z_max = 0.5 * b * np.tan(pi/180*beta) / (x_s * (pi/180)*(tau + eta_5)) - 1
if self.length is not None:
if L_K > self.length:
warnings.warn('The estimated wetted chine length ({0:.3f}) is larger than the vessel length ({1:.3f}).'.format(L_K, self.length), stacklevel=2)
#Update values
self.L_K = L_K
self.L_C = L_C
self.lambda_W = lambda_W
self.x_s = x_s
self.z_max = z_max
def get_forces(self, runGeoLengths=True):
"""This function calls all the force functions to update the respective object attributes.
Adds/updates the following attributes:
- :attr:`hydrodynamic_force`
- :attr:`skin_friction`
- :attr:`air_resistance`
- :attr:`flap_force`
- :attr:`thrust_force`
- :attr:`net_force`
Args:
runGeoLengths (boolean, optional): Calculate the wetted lengths before calculating the forces. Defaults to True.
Methods:
get_hydrodynamic_force(): This function follows Savitsky 1964 and Faltinsen 2005 in calculating the vessel's hydrodynamic forces and moment.
get_skin_friction(): This function outputs the frictional force of the vessel using ITTC 1957 and the Bowden and Davison 1974 roughness coefficient.
get_air_resistance(): This function estimates the air drag. It assumes a square shape projected area with a shape coefficient.
get_flap_force(): This function outputs the flap forces w.r.t. global coordinates (Savitsky & Brown 1976). Horz: Positive Aft, Vert: Positive Up, Moment: Positive CCW.
sum_forces(): This function gets the sum of forces and moments, and consequently the required net thrust. The coordinates are positive aft, positive up, and positive counterclockwise.
"""
if runGeoLengths:
self.get_geo_lengths() #Calculated wetted lengths in get_forces()
g = self.g
rho_air = self.rho_air
C_D = self.C_D
rho = self.rho
nu = self.nu
AHR = self.ahr
W = self.weight
epsilon = self.epsilon
vT = self.vT
lT = self.lT
U = self.speed
b = self.beam
lcg = self.lcg
vcg = self.vcg
Lf = self.Lf
sigma = self.sigma
delta = self.delta
beam = self.beam
l_air = self.l_air
h_air = self.h_air
b_air = self.b_air
C_shape = self.C_shape
z_wl = self.z_wl
tau = self.tau
beta = self.beta
eta_3 = self.eta_3
eta_5 = self.eta_5
L_K = self.L_K
L_C = self.L_C
lambda_W = self.lambda_W
x_s = self.x_s
z_max = self.z_max
pi = np.pi
def get_hydrodynamic_force():
"""This function follows Savitsky 1964 and Faltinsen 2005 in calculating the vessel's hydrodynamic forces and moment.
"""
#Beam Froude number
Fn_B = U/np.sqrt(g*b)
#Warnings
if Fn_B < 0.6 or Fn_B > 13:
warnings.warn('Beam Froude number = {0:.3f}, outside of range of applicability (0.60 <= U/sqrt(g*b) <= 13.00) for planing lift equation. Results are extrapolations.'.format(Fn_B), stacklevel=2)
if lambda_W > 4:
warnings.warn('Mean wetted length-beam ratio = {0:.3f}, outside of range of applicability (lambda <= 4) for planing lift equation. Results are extrapolations.'.format(lambda_W), stacklevel=2)
if tau < 2 or tau > 15:
warnings.warn('Vessel trim = {0:.3f}, outside of range of applicability (2 deg <= tau <= 15 deg) for planing lift equation. Results are extrapolations.'.format(tau), stacklevel=2)
#0-Deadrise lift coefficient
C_L0 = (tau + eta_5)**1.1 * (0.012 * lambda_W**0.5 + 0.0055 * lambda_W**2.5 / Fn_B**2)
#Lift coefficient with deadrise, C_Lbeta
C_Lbeta = C_L0 - 0.0065 * beta * C_L0**0.6
#Vertical force (lift)
F_z = C_Lbeta * 0.5 * rho * U**2 * b**2
#Horizontal force
F_x = F_z*np.tan(pi/180*(tau + eta_5))
#Lift's Normal force w.r.t. keel
F_N = F_z / np.cos(pi/180*(tau + eta_5))
#Longitudinal position of the center of pressure, l_p (Eq. 4.41, Doctors 1985)
l_p = lambda_W * b * (0.75 - 1 / (5.21 * (Fn_B / lambda_W)**2 + 2.39)) #Limits for this is (0.60 < Fn_B < 13.0, lambda < 4.0)
#Moment about CG (Axis consistent with Fig. 9.24 of Faltinsen (P. 366)
M_cg = - F_N * (lcg - l_p)
#Update values
self.hydrodynamic_force = np.array([F_x, F_z, M_cg])
def get_skin_friction():
"""This function outputs the frictional force of the vessel using ITTC 1957 and the Bowden and Davison 1974 roughness coefficient.
"""
#Surface area of the dry-chine region
S1 = x_s * b / (2 * np.cos(pi/180*beta))
if L_K < x_s:
S1 = S1 * (L_K / x_s)**2
#Surface area of the wetted-chine region
S2 = b * L_C / np.cos(pi/180*beta)
#Total surface area
S = S1 + S2
if S == 0:
F_x = 0
F_z = 0
M_cg = 0
else:
#Mean bottom fluid velocity, Savitsky 1964 - Hadler's empirical formula
V_m = U * np.sqrt(1 - (0.012 * tau**1.1 * np.sqrt(lambda_W) - 0.0065 * beta * (0.012 * np.sqrt(lambda_W) * tau**1.1)**0.6) / (lambda_W * np.cos(tau * pi/180)))
#Reynolds number (with bottom fluid velocity)
Rn = V_m * lambda_W * b / nu
#'Friction coefficient' ITTC 1957
C_f = 0.075/(np.log10(Rn) - 2)**2
#Additional 'friction coefficient' due to skin friction, Bowden and Davison (1974)
deltaC_f = (44*((AHR/(lambda_W*b))**(1/3) - 10*Rn**(-1/3)) + 0.125)/10**3
#Frictional force
R_f = 0.5 * rho * (C_f + deltaC_f) * S * U**2
#Geometric vertical distance from keel
l_f = (b / 4 * np.tan(pi/180*beta) * S2 + b / 6 * np.tan(pi/180*beta) * S1) / (S1 + S2)
#Horizontal force
F_x = R_f * np.cos(pi/180*(tau + eta_5))
#Vertical force
F_z = - R_f * np.sin(pi/180*(tau + eta_5))
#Moment about CG (Axis consistent with Fig. 9.24 of Faltinsen (P. 366))
M_cg = R_f * (l_f - vcg)
#Update values
self.skin_friction = np.array([F_x, F_z, M_cg])
def get_air_resistance():
"""This function estimates the air drag. It assumes a square shape projected area with a shape coefficient.
"""
if C_shape == 0 or b_air == 0:
self.air_resistance = np.array([0, 0, 0])
return
#Vertical distance from calm water line to keel at LOA
a_dist = np.sin(pi/180*(tau + eta_5))*(l_air-L_K)
#Vertical distance from keel to horizontal line level with boat's height
b_dist = np.cos(pi/180*(tau + eta_5))*h_air
#Vertical distance from CG to center of square (moment arm, positive is CG above)
momArm = z_wl - (a_dist + b_dist)/2
#Square projected area
Area = (a_dist+b_dist)*b_air*C_shape
if Area < 0:
Area = 0
#Horizontal force (Positive aft)
F_x = 0.5*rho_air*C_D*Area*U**2
#Vertical force (Positive up)
F_z = 0
#Moment (positve CCW)
M_cg = -F_x*momArm
#Update values
self.air_resistance = np.array([F_x, F_x, M_cg])
def get_flap_force():
"""This function outputs the flap forces w.r.t. global coordinates (Savitsky & Brown 1976). Horz: Positive Aft, Vert: Positive Up, Moment: Positive CCW.
"""
if Lf == 0:
self.flap_force = np.array([0, 0, 0])
return
#Warnings
if Lf > 0.10*(L_K + L_C)/2 or Lf < 0:
warnings.warn('Flap chord = {0:.3f} outside of bounds (0-10% of mean wetted length) for flap forces estimates with Savitsky & Brown 1976'.format(Lf), stacklevel=2)
if delta < 0 or delta > 15:
warnings.warn('Flap deflection angle = {0:.3f} out of bounds (0-15 deg) for flap forces estimates with Savitsky & Brown 1976'.format(delta), stacklevel=2)
Fn_B = U/np.sqrt(g*b)
if Fn_B < 2 or Fn_B > 7:
warnings.warn('Beam-based Froude number Fn_B = {0:.3f} out of bounds (2-7) for flap forces estimates with Savitsky & Brown 1976'.format(Fn_B), stacklevel=2)
F_z = 0.046*(Lf*3.28084)*delta*sigma*(b*3.28084)*(rho/515.379)/2*(U*3.28084)**2*4.44822
F_x = 0.0052*F_z*(tau+eta_5+delta)
l_flap = 0.6*b+Lf*(1-sigma)
M_cg = -F_z*(lcg-l_flap)
#Update values
self.flap_force = np.array([F_x, F_z, M_cg])
def sum_forces():
"""This function gets the sum of forces and moments, and consequently the required net thrust. The coordinates are positive aft, positive up, and positive counterclockwise.
"""
#Call all force functions-------
get_hydrodynamic_force()
get_skin_friction()
get_air_resistance()
get_flap_force()
#-------------------------------
forcesMatrix = np.column_stack((self.gravity_force, self.hydrodynamic_force, self.skin_friction, self.air_resistance, self.flap_force)) #Forces and moments
F_sum = np.sum(forcesMatrix, axis=1) #F[0] is x-dir, F[1] is z-dir, and F[2] is moment
#Required thrust and resultant forces
T = F_sum[0]/np.cos(pi/180*(epsilon+tau+eta_5)); #Magnitude
T_z = T*np.sin(pi/180*(epsilon+tau+eta_5)); #Vertical
T_cg = T*np.cos(pi/180*epsilon)*(vcg - vT) - T*np.sin(pi/180*epsilon)*(lcg - lT); #Moment about cg
#Update resultant thurst values
self.thrust_force = np.array([-F_sum[0], T_z, T_cg])
#Include resultant thrust forces in sum
F_sum[1] = F_sum[1]+T_z
F_sum[2] = F_sum[2]+T_cg
#Update values
self.net_force = F_sum
#Call functions
sum_forces()
def get_steady_trim(self, x0=[0, 3], tauLims=[0.5, 35], tolF=10**-6, maxiter=50):
"""This function finds and sets the equilibrium point when the vessel is steadily running in calm water.
Updates the following attributes:
- :attr:`z_wl`
- :attr:`tau`
Args:
x0 (list of float): Initial guess for equilibirum point [z_wl (m), tau (deg)]. Defaults to [0, 3].
tauLims (list of float): Limits for equilibrium trim search. Defaults to [0.5, 35].
tolF (float): Tolerance for convergence to zero. Defaults to 10**-6.
maxiter (float): Maximum iterations. Defaults to 50.
"""
def _boatForces(x):
self.z_wl = x[0]/10 #the division is for normalization of the variables
self.tau = x[1]
self.get_forces()
return self.net_force[1:3]
def _boatForcesPrime(x):
return ndmath.complexGrad(_boatForces, x)
def _L_K(x):
# self.z_wl = x[0]/10
# self.tau = x[1]
# self.get_geo_lengths() #No need to call, because ndmath's nDimNewton allways calls the obj function before calling this "constraint"
return [-self.L_K]
xlims = np.array([[-np.Inf, np.Inf], tauLims])
warnings.filterwarnings("ignore", category=UserWarning)
[self.z_wl, self.tau] = ndmath.nDimNewton(_boatForces, x0, _boatForcesPrime, tolF, maxiter, xlims, hehcon=_L_K)/[10, 1]
warnings.filterwarnings("default", category=UserWarning)
def get_eom_matrices(self, runGeoLengths=True):
"""This function returns the mass, damping, and stiffness matrices following Faltinsen 2005.
Adds/updates the following parameters:
- :attr:`mass_matrix`
- :attr:`damping_matrix`
- :attr:`restoring_matrix`
Args:
runGeoLengths (boolean, optional): Calculate the wetted lengths before calculating the EOM matrices. Defaults to True.
Methods:
get_mass_matrix(): This function returns the added mass coefficients following Sec. 9.4.1 of Faltinsen 2005, including weight and moment of inertia.
get_damping_matrix(): This function returns the damping coefficients following Sec. 9.4.1 of Faltinsen 2005.
get_restoring_matrix(diffType=1, step=10**-6.6): This function returns the restoring coefficients following the approach in Sec. 9.4.1 of Faltinsen 2005.
"""
if runGeoLengths:
self.get_geo_lengths() #Calculated wetted lengths in get_eom_matrices()
W = self.weight
U = self.speed
rho = self.rho
b = self.beam
lcg = self.lcg
tau = self.tau
beta = self.beta
g = self.g
r_g = self.r_g
eta_5 = self.eta_5
L_K = self.L_K
L_C = self.L_C
lambda_W = self.lambda_W
x_s = self.x_s
z_max = self.z_max
pi = np.pi
def get_mass_matrix():
"""This function returns the added mass coefficients following Sec. 9.4.1 of Faltinsen 2005, including weight and moment of inertia
"""
#Distance of CG from keel-WL intersection
x_G = L_K - lcg
#K constant (Eq. 9.63 of Faltinsen 2005)
K = (pi / np.sin(pi/180*beta) * gamma(1.5 - beta/180) / (gamma(1 - beta/180)**2 * gamma(0.5 + beta/180)) - 1) / np.tan(pi/180*beta)
kappa = (1 + z_max) * (pi/180)*(tau + eta_5) #User defined constant
#Based on Faltinsen's
A1_33 = rho * kappa**2 * K * x_s**3 / 3
A1_35 = A1_33 * (x_G - x_s * 3/4)
A1_53 = A1_35
A1_55 = A1_33 * (x_G**2 - 3/2 * x_G * x_s + 3/5 * x_s**2)
#Contribution from wet-chine region
if L_C > 0:
C_1 = 2 * np.tan(pi/180*beta)**2 / pi * K
A2_33 = (rho * b**3) * C_1 * pi / 8 * L_C / b
A2_35 = (rho * b**4) * (- C_1 * pi / 16 * ((L_K / b)**2 - (x_s / b)**2) + x_G / b * A2_33 / (rho * b**3))
A2_53 = A2_35
A2_55 = (rho * b**5) * (C_1 * pi / 24 * ((L_K / b)**3 - (x_s / b)**3) - C_1 / 8 * pi * (x_G / b) * ((L_K / b)**2 - (x_s / b)**2) + (x_G / b)**2 * A2_33 / (rho * b**3))
else:
A2_33 = 0
A2_35 = 0
A2_53 = 0
A2_55 = 0
#Total added mass & update values
A_33 = A1_33 + A2_33 + W/g # kg, A_33
A_35 = A1_35 + A2_35 # kg*m/rad, A_35
A_53 = A1_53 + A2_53 # kg*m, A_53
A_55 = A1_55 + A2_55 + W/g*r_g**2 # kg*m^2/rad, A_55
self.mass_matrix = np.array([[A_33, A_35], [A_53, A_55]])
def get_damping_matrix():
"""This function returns the damping coefficients following Sec. 9.4.1 of Faltinsen 2005
"""
#Heave-heave added mass (need to substract W/g since it was added)
A_33 = self.mass_matrix[0,0] - W/g
if L_C > 0:
d = 0.5 * b * np.tan(pi/180*beta)
else:
d = (1 + z_max) * (pi/180)*(tau + eta_5) * L_K
#K constant (Eq. 9.63 of Faltinsen 2005, P. 369)
K = (pi / np.sin(pi/180*beta) * gamma(1.5 - beta/180) / (gamma(1 - beta/180)**2 * gamma(0.5 + beta/180)) - 1) / np.tan(pi/180*beta)
#2D Added mass coefficient in heave
a_33 = rho * d**2 * K
#Infinite Fn lift coefficient
C_L0 = (tau + eta_5)**1.1 * 0.012 * lambda_W**0.5
#Derivative w.r.t. tau (rad) of inf. Fn C_L0
dC_L0 = (180 / pi)**1.1 * 0.0132 * (pi/180*(tau + eta_5))**0.1 * lambda_W**0.5
#Derivative w.r.t. tau (rad) of inf. Fn C_Lbeta
dC_Lbeta = dC_L0 * (1 - 0.0039 * beta * C_L0**-0.4)
#Damping coefficients & update values
B_33 = rho / 2 * U * b**2 * dC_Lbeta # kg/s, B_33, Savitsky based
B_35 = - U * (A_33 + lcg * a_33) # kg*m/(s*rad), B_35, Infinite frequency based
B_53 = B_33 * (0.75 * lambda_W * b - lcg) # kg*m/s, B_53, Savitsky based
B_55 = U * lcg**2 * a_33 # kg*m**2/(s*rad), B_55, Infinite frequency based
self.damping_matrix = np.array([[B_33, B_35], [B_53, B_55]])
def get_restoring_matrix(diffType=1, step=10**-6.6):
"""This function returns the restoring coefficients following the approach in Sec. 9.4.1 of Faltinsen 2005
Args:
diffType (int, optional): 1 (recommended) = Complex step method, 2 = Foward step difference. Defaults to 1.
step (float, optional): Step size if using diffType == 2. Defaults to 10**-6.
"""
def _func(eta):
self.eta_3 = eta[0]
self.eta_5 = eta[1]
self.get_forces()
return self.net_force[1:3]
temp_eta_3 = self.eta_3
temp_eta_5 = self.eta_5
if diffType == 1:
C_full = -ndmath.complexGrad(_func, [temp_eta_3, temp_eta_5])
elif diffType == 2:
C_full = -ndmath.finiteGrad(_func, [temp_eta_3, temp_eta_5], 10**-6.6)
#Reset values
self.eta_3 = temp_eta_3
self.eta_5 = temp_eta_5
self.get_forces()
#Conversion deg to rad (degree in denominator)
C_full[0,1] = C_full[0,1] / (pi/180) # N/rad, C_35
C_full[1,1] = C_full[1,1] / (pi/180) # N*m/rad, C_55
#Update values
self.restoring_matrix = C_full
#Call functions
get_mass_matrix()
get_damping_matrix()
get_restoring_matrix()
def check_porpoising(self, stepEstimateType=1):
"""This function checks for porpoising.
Adds/updates the following parameters:
- :attr:`porpoising` (list):
Args:
stepEstimateType (int, optional): Pitch step response settling time estimate type, 1 = -3/np.real(eigVals[0])], 2 = Time-domain simulation estimate. Defaults to 1.
"""
#Eigenvalue analysis
try:
self.mass_matrix
except AttributeError:
warnings.warn('No Equation Of Motion (EOM) matrices found. Running get_eom_matrices().', stacklevel=2)
self.get_eom_matrices()
M = self.mass_matrix
C = self.damping_matrix
K = self.restoring_matrix
nDim = len(M)
A_ss = np.concatenate((np.concatenate((np.zeros((nDim,nDim)), np.identity(nDim)), axis=1), np.concatenate((-np.linalg.solve(M,K), -np.linalg.solve(M,C)), axis=1))) #State space reprecentation
eigVals = np.linalg.eigvals(A_ss)
eig_porpoise = any(eigVal >= 0 for eigVal in eigVals)
if stepEstimateType == 1:
settling_time = -3/np.real(eigVals[0])
elif stepEstimateType == 2:
B_ss = np.array([[1],[0],[0],[0]]) #Pitch only
C_ss = np.array([[1,0,0,0]]) #Pitch only
D_ss = np.array([[0]])
system = (A_ss,B_ss,C_ss,D_ss)
t, y = signal.step(system)
settling_time = (t[next(len(y)-i for i in range(2,len(y)-1) if abs(y[-i]/y[-1])>1.02)]-t[0])
#Savitsky '64 chart method
C_L = self.weight/(1/2*self.rho*self.speed**2*self.beam**2)
x = np.sqrt(C_L/2)
#Warnings
if x > 0.3 or x < 0.13:
warnings.warn('Lift Coefficient = {0:.3f} outside of bounds (0.0338-0.18) for porpoising estimates with Savitsky 1964. Results are extrapolations.'.format(C_L), stacklevel=2)
if self.beta > 20:
warnings.warn('Deadrise = {0:.3f} outside of bounds (0-20 deg) for porpoising estimates with Savitsky 1964. Results are extrapolations.'.format(self.beta), stacklevel=2)
tau_crit_0 = -376.37*x**3 + 329.74*x**2 - 38.485*x + 1.3415
tau_crit_10 = -356.05*x**3 + 314.36*x**2 - 41.674*x + 3.5786
tau_crit_20 = -254.51*x**3 + 239.65*x**2 - 23.936*x + 3.0195
tau_crit_func = interpolate.interp1d([0, 10, 20], [tau_crit_0, tau_crit_10, tau_crit_20], kind='quadratic', fill_value='extrapolate')
tau_crit = tau_crit_func(self.beta)
if self.tau > tau_crit:
chart_porpoise = True
else:
chart_porpoise = False
#Update values
self.porpoising = [[eig_porpoise, settling_time], [chart_porpoise, float(tau_crit)]]
def get_seaway_behavior(self):
"""This function calculates the seaway behavior as stated in Savitsky & Brown '76.
Adds/updates the following parameters:
- :attr:`avg_impact_acc`
- :attr:`R_AW`
"""
if self.H_sig is None:
self.H_sig = self.beam*0.5 #Arbitrary wave height if no user-defined wave height
warnings.warn('Significant wave height has not been specified. Using beam*0.5 = {0:.3f} m.'.format(self.H_sig), stacklevel=2)
if self.length is None:
self.length = self.beam*3
warnings.warn('Vessel length has not been specified. Using beam*3 = {0:.3f} m.'.format(self.length), stacklevel=2)
H_sig = self.H_sig
W = self.weight
beta = self.beta
tau = self.tau
pi = np.pi
Delta_LT = W/9964 #Displacement in long tons
Delta = Delta_LT*2240 #Displacement in lbf
L = self.length*3.281 #Length in ft
b = self.beam*3.281 #Beam in ft
Vk = self.speed*1.944 #Speed in knots
Vk_L = Vk/np.sqrt(L) #Vk/sqrt(L)
H_sig = H_sig*3.281 #Significant wave height in ft
w = self.rho*self.g/(4.448*35.315) #Specific weight in lbf/ft^3
C_Delta = Delta/(w*b**3) #Static beam-loading coefficient
if self.seaway_drag_type == 1: #Savitsky '76
#Check that variables are inside range of applicability (P. 395 of Savitsky & Brown '76)
P1 = Delta_LT/(0.01*L)**3
P2 = L/b
P5 = H_sig/b
P6 = Vk_L
if P1 < 100 or P1 > 250:
warnings.warn('Vessel displacement coefficient = {0:.3f}, outside of range of applicability (100 <= Delta_LT/(0.01*L)^3 <= 250, with units LT/ft^3). Results are extrapolations.'.format(P1), stacklevel=2)
if P2 < 3 or P2 > 5:
warnings.warn('Vessel length/beam = {0:.3f}, outside of range of applicability (3 <= L/b <= 5). Results are extrapolations.'.format(P2), stacklevel=2)
if tau < 3 or tau > 7:
warnings.warn('Vessel trim = {0:.3f}, outside of range of applicability (3 deg <= tau <= 7 deg). Results are extrapolations.'.format(tau), stacklevel=2)
if beta < 10 or beta > 30:
warnings.warn('Vessel deadrise = {0:.3f}, outside of range of applicability (10 deg <= beta <= 30 deg). Results are extrapolations.'.format(beta), stacklevel=2)
if P5 < 0.2 or P5 > 0.7:
warnings.warn('Significant wave height / beam = {0:.3f}, outside of range of applicability (0.2 <= H_sig/b <= 0.7). Results are extrapolations.'.format(P5), stacklevel=2)
if P6 < 2 or P6 > 6:
warnings.warn('Speed coefficient = {0:.3f}, outside of range of applicability (2 <= Vk/sqrt(L) <= 6, with units knots/ft^0.5). Results are extrapolations.'.format(P6), stacklevel=2)
R_AW_2 = (w*b**3)*66*10**-6*(H_sig/b+0.5)*(L/b)**3/C_Delta+0.0043*(tau-4) #Added resistance at Vk/sqrt(L) = 2
R_AW_4 = (Delta)*(0.3*H_sig/b)/(1+2*H_sig/b)*(1.76-tau/6-2*np.tan(beta*pi/180)**3) #Vk/sqrt(L) = 4
R_AW_6 = (w*b**3)*(0.158*H_sig/b)/(1+(H_sig/b)*(0.12*beta-21*C_Delta*(5.6-L/b)+7.5*(6-L/b))) #Vk/sqrt(L) = 6
R_AWs = np.array([R_AW_2, R_AW_4, R_AW_6])
R_AWs_interp = interpolate.interp1d([2,4,6], R_AWs, kind='quadratic', fill_value='extrapolate')
R_AW = R_AWs_interp([Vk_L])[0]
elif self.seaway_drag_type == 2: #Fridsma '71 design charts
#Check that variables are inside range of applicability (P. R-1495 of Fridsma '71)
if C_Delta < 0.3 or C_Delta > 0.9:
warnings.warn('C_Delta = {0:.3f}, outside of range of applicability (0.3 <= C_Delta <= 0.9). Results are extrapolations'.format(C_Delta), stacklevel=2)
if L/b < 3 or L/b > 6:
warnings.warn('L/b = {0:.3f}, outside of range of applicability (3 <= L/b <= 6). Results are extrapolations'.format(L/b), stacklevel=2)
if C_Delta/(L/b) < 0.06 or C_Delta/(L/b) > 0.18:
warnings.warn('C_Delta/(L/b) = {0:.3f}, outside of range of applicability (0.06 <= C_Delta/(L/b) <= 0.18). Results are extrapolations'.format(C_Delta/(L/b)), stacklevel=2)
if tau < 3 or tau > 7:
warnings.warn('tau = {0:.3f}, outside of range of applicability (3 <= tau <= 7). Results are extrapolations'.format(tau), stacklevel=2)
if beta < 10 or beta > 30:
warnings.warn('beta = {0:.3f}, outside of range of applicability (10 <= beta <= 30). Results are extrapolations'.format(beta), stacklevel=2)
if H_sig/b > 0.8:
warnings.warn('H_sig/b = {0:.3f}, outside of range of applicability (H_sig/b <= 0.8). Results are extrapolations'.format(H_sig/b), stacklevel=2)
if Vk_L > 6:
warnings.warn('Vk_L = {0:.3f}, outside of range of applicability (Vk_L <= 6). Results are extrapolations'.format(Vk_L), stacklevel=2)
#Get data tables (required for when package is distributed)
Raw2_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.2.csv')
Raw4_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.4.csv')
Raw6_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_0.6.csv')
V2_tab = pkg_resources.resource_filename(__name__, 'tables\V_0.2.csv')
V4_tab = pkg_resources.resource_filename(__name__, 'tables\V_0.4.csv')
RawV2_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.2.csv')
RawV4_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.4.csv')
RawV6_tab = pkg_resources.resource_filename(__name__, 'tables\Raw_V_0.6.csv')
#Read values from extracted chart points
arr_Raw2 = np.genfromtxt(Raw2_tab, delimiter=',', skip_header=1)
arr_Raw4 = np.genfromtxt(Raw4_tab, delimiter=',', skip_header=1)
arr_Raw6 = np.genfromtxt(Raw6_tab, delimiter=',', skip_header=1)
arr_V2 = np.genfromtxt(V2_tab, delimiter=',', skip_header=1)
arr_V4 = np.genfromtxt(V4_tab, delimiter=',', skip_header=1)
arr_Raw_V2 = np.genfromtxt(RawV2_tab, delimiter=',', skip_header=1)
arr_Raw_V4 = np.genfromtxt(RawV4_tab, delimiter=',', skip_header=1)
arr_Raw_V6 = np.genfromtxt(RawV6_tab, delimiter=',', skip_header=1)
#Create interpolation functions
interp1Type = 'linear'
interp2Type = 'linear'
Raw2m_interp = interpolate.interp2d(arr_Raw2[:, 1], arr_Raw2[:, 0], arr_Raw2[:, 2], kind=interp2Type)
Raw4m_interp = interpolate.interp2d(arr_Raw4[:, 1], arr_Raw4[:, 0], arr_Raw4[:, 2], kind=interp2Type)
Raw6m_interp = interpolate.interp2d(arr_Raw6[:, 1], arr_Raw6[:, 0], arr_Raw6[:, 2], kind=interp2Type)
V2m_interp = interpolate.interp2d(arr_V2[:, 1], arr_V2[:, 0], arr_V2[:, 2], kind=interp2Type)
V4m_interp = interpolate.interp2d(arr_V4[:, 1], arr_V4[:, 0], arr_V4[:, 2], kind=interp2Type)
V6m_interp = V4m_interp
RawRaw2m_interp = interpolate.interp1d(arr_Raw_V2[:, 0], arr_Raw_V2[:, 1], kind=interp1Type, fill_value='extrapolate')
RawRaw4m_interp = interpolate.interp1d(arr_Raw_V4[:, 0], arr_Raw_V4[:, 1], kind=interp1Type, fill_value='extrapolate')
RawRaw6m_interp = interpolate.interp1d(arr_Raw_V6[:, 0], arr_Raw_V6[:, 1], kind=interp1Type, fill_value='extrapolate')
#Get values following procedure shown in Fridsma 1971 paper
VLm = [V2m_interp(beta, tau)[0], V4m_interp(beta, tau)[0], V6m_interp(beta, tau)[0]]
Rwbm = [Raw2m_interp(beta, tau)[0], Raw4m_interp(beta, tau)[0], Raw6m_interp(beta, tau)[0]]
VVm = Vk_L/VLm
RRm = [RawRaw2m_interp(VVm[0]), RawRaw4m_interp(VVm[1]), RawRaw6m_interp(VVm[2])]
Rwb = np.multiply(RRm, Rwbm)
E1 = lambda H_sig: 1 + ((L/b)**2/25 - 1)/(1 + 0.895*(H_sig/b - 0.6)) #V/sqrt(L) = 2
E2 = lambda H_sig: 1 + 10*H_sig/b*(C_Delta/(L/b) - 0.12) #V/sqrt(L) = 4
E3 = lambda H_sig: 1 + 2*H_sig/b*(0.9*(C_Delta-0.6)-0.7*(C_Delta-0.6)**2) #V/sqrt(L) = 6
E_interp = lambda H_sig: interpolate.interp1d([2, 4, 6], [E1(H_sig), E2(H_sig), E3(H_sig)], kind=interp1Type, fill_value='extrapolate')
E = [E_interp(0.2*b)(Vk_L), E_interp(0.4*b)(Vk_L), E_interp(0.6*b)(Vk_L)]
Rwb_final = np.multiply(Rwb,E)
Rwb_final_interp = interpolate.interp1d([0.2, 0.4, 0.6], Rwb_final, kind=interp1Type, fill_value='extrapolate')
R_AW = Rwb_final_interp(H_sig/b)*w*b**3
warnings.warn('Average impact acceleration based on the Fridsma charts is currently not implemented. Using Savitsky & Brown approximation.', stacklevel=2)
n_cg = 0.0104*(H_sig/b+0.084)*tau/4*(5/3-beta/30)*(Vk_L)**2*L/b/C_Delta #g, at CG
n_bow = n_cg*(1+3.8*(L/b-2.25)/(Vk_L)) #g, at bow
avg_impact_acc = np.array([n_cg, n_bow])
#Update values
self.avg_impact_acc = avg_impact_acc
self.R_AW = R_AW*4.448 #lbf to N conversion
|
[
"numpy.log10",
"numpy.sqrt",
"ndmath.complexGrad",
"numpy.column_stack",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.sin",
"numpy.genfromtxt",
"scipy.interpolate.interp2d",
"ndmath.nDimNewton",
"scipy.signal.step",
"numpy.multiply",
"numpy.exp",
"numpy.real",
"numpy.polyval",
"warnings.warn",
"numpy.identity",
"numpy.linalg.eigvals",
"scipy.special.gamma",
"numpy.cos",
"warnings.filterwarnings",
"numpy.linalg.solve",
"numpy.tan",
"ndmath.finiteGrad",
"pkg_resources.resource_filename",
"numpy.sum",
"numpy.zeros",
"numpy.printoptions"
] |
[((10728, 10758), 'numpy.array', 'np.array', (['[0, -self.weight, 0]'], {}), '([0, -self.weight, 0])\n', (10736, 10758), True, 'import numpy as np\n'), ((33506, 33544), 'numpy.array', 'np.array', (['[[-np.Inf, np.Inf], tauLims]'], {}), '([[-np.Inf, np.Inf], tauLims])\n', (33514, 33544), True, 'import numpy as np\n'), ((33553, 33608), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (33576, 33608), False, 'import warnings\n'), ((33745, 33801), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""default"""'], {'category': 'UserWarning'}), "('default', category=UserWarning)\n", (33768, 33801), False, 'import warnings\n'), ((41166, 41189), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['A_ss'], {}), '(A_ss)\n', (41183, 41189), True, 'import numpy as np\n'), ((41855, 41871), 'numpy.sqrt', 'np.sqrt', (['(C_L / 2)'], {}), '(C_L / 2)\n', (41862, 41871), True, 'import numpy as np\n'), ((42549, 42670), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['[0, 10, 20]', '[tau_crit_0, tau_crit_10, tau_crit_20]'], {'kind': '"""quadratic"""', 'fill_value': '"""extrapolate"""'}), "([0, 10, 20], [tau_crit_0, tau_crit_10, tau_crit_20],\n kind='quadratic', fill_value='extrapolate')\n", (42569, 42670), False, 'from scipy import interpolate, signal\n'), ((52146, 52169), 'numpy.array', 'np.array', (['[n_cg, n_bow]'], {}), '([n_cg, n_bow])\n', (52154, 52169), True, 'import numpy as np\n'), ((26165, 26191), 'numpy.array', 'np.array', (['[F_x, F_z, M_cg]'], {}), '([F_x, F_z, M_cg])\n', (26173, 26191), True, 'import numpy as np\n'), ((28154, 28180), 'numpy.array', 'np.array', (['[F_x, F_z, M_cg]'], {}), '([F_x, F_z, M_cg])\n', (28162, 28180), True, 'import numpy as np\n'), ((29400, 29426), 'numpy.array', 'np.array', (['[F_x, F_x, M_cg]'], {}), '([F_x, F_x, M_cg])\n', (29408, 29426), True, 'import numpy as np\n'), ((30778, 30804), 'numpy.array', 'np.array', (['[F_x, F_z, M_cg]'], {}), '([F_x, F_z, M_cg])\n', (30786, 30804), True, 'import numpy as np\n'), ((31302, 31427), 'numpy.column_stack', 'np.column_stack', (['(self.gravity_force, self.hydrodynamic_force, self.skin_friction, self.\n air_resistance, self.flap_force)'], {}), '((self.gravity_force, self.hydrodynamic_force, self.\n skin_friction, self.air_resistance, self.flap_force))\n', (31317, 31427), True, 'import numpy as np\n'), ((31463, 31491), 'numpy.sum', 'np.sum', (['forcesMatrix'], {'axis': '(1)'}), '(forcesMatrix, axis=1)\n', (31469, 31491), True, 'import numpy as np\n'), ((31931, 31963), 'numpy.array', 'np.array', (['[-F_sum[0], T_z, T_cg]'], {}), '([-F_sum[0], T_z, T_cg])\n', (31939, 31963), True, 'import numpy as np\n'), ((33182, 33216), 'ndmath.complexGrad', 'ndmath.complexGrad', (['_boatForces', 'x'], {}), '(_boatForces, x)\n', (33200, 33216), False, 'import ndmath\n'), ((33641, 33732), 'ndmath.nDimNewton', 'ndmath.nDimNewton', (['_boatForces', 'x0', '_boatForcesPrime', 'tolF', 'maxiter', 'xlims'], {'hehcon': '_L_K'}), '(_boatForces, x0, _boatForcesPrime, tolF, maxiter, xlims,\n hehcon=_L_K)\n', (33658, 33732), False, 'import ndmath\n'), ((37017, 37055), 'numpy.array', 'np.array', (['[[A_33, A_35], [A_53, A_55]]'], {}), '([[A_33, A_35], [A_53, A_55]])\n', (37025, 37055), True, 'import numpy as np\n'), ((38597, 38635), 'numpy.array', 'np.array', (['[[B_33, B_35], [B_53, B_55]]'], {}), '([[B_33, B_35], [B_53, B_55]])\n', (38605, 38635), True, 'import numpy as np\n'), ((44076, 44086), 'numpy.sqrt', 'np.sqrt', (['L'], {}), '(L)\n', (44083, 44086), True, 'import numpy as np\n'), ((46298, 46332), 'numpy.array', 'np.array', (['[R_AW_2, R_AW_4, R_AW_6]'], {}), '([R_AW_2, R_AW_4, R_AW_6])\n', (46306, 46332), True, 'import numpy as np\n'), ((46373, 46460), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['[2, 4, 6]', 'R_AWs'], {'kind': '"""quadratic"""', 'fill_value': '"""extrapolate"""'}), "([2, 4, 6], R_AWs, kind='quadratic', fill_value=\n 'extrapolate')\n", (46393, 46460), False, 'from scipy import interpolate, signal\n'), ((15647, 15709), 'numpy.sqrt', 'np.sqrt', (['(self.thrust_force[0] ** 2 + self.thrust_force[1] ** 2)'], {}), '(self.thrust_force[0] ** 2 + self.thrust_force[1] ** 2)\n', (15654, 15709), True, 'import numpy as np\n'), ((16448, 16473), 'numpy.array', 'np.array', (['self.porpoising'], {}), '(self.porpoising)\n', (16456, 16473), True, 'import numpy as np\n'), ((18328, 18360), 'numpy.sin', 'np.sin', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (18334, 18360), True, 'import numpy as np\n'), ((19030, 19144), 'numpy.polyval', 'np.polyval', (['[-2.100644618790201e-06, -6.815747611588763e-05, -0.001130563334939335, \n 0.5754510457848798]', 'beta'], {}), '([-2.100644618790201e-06, -6.815747611588763e-05, -\n 0.001130563334939335, 0.5754510457848798], beta)\n', (19040, 19144), True, 'import numpy as np\n'), ((19877, 19951), 'warnings.warn', 'warnings.warn', (['"""Vessel operating with dry chines (L_C = 0)."""'], {'stacklevel': '(2)'}), "('Vessel operating with dry chines (L_C = 0).', stacklevel=2)\n", (19890, 19951), False, 'import warnings\n'), ((24448, 24462), 'numpy.sqrt', 'np.sqrt', (['(g * b)'], {}), '(g * b)\n', (24455, 24462), True, 'import numpy as np\n'), ((25606, 25638), 'numpy.tan', 'np.tan', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (25612, 25638), True, 'import numpy as np\n'), ((25705, 25737), 'numpy.cos', 'np.cos', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (25711, 25737), True, 'import numpy as np\n'), ((26649, 26672), 'numpy.cos', 'np.cos', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (26655, 26672), True, 'import numpy as np\n'), ((28449, 28468), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (28457, 28468), True, 'import numpy as np\n'), ((28581, 28613), 'numpy.sin', 'np.sin', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (28587, 28613), True, 'import numpy as np\n'), ((28741, 28773), 'numpy.cos', 'np.cos', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (28747, 28773), True, 'import numpy as np\n'), ((29705, 29724), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (29713, 29724), True, 'import numpy as np\n'), ((30245, 30259), 'numpy.sqrt', 'np.sqrt', (['(g * b)'], {}), '(g * b)\n', (30252, 30259), True, 'import numpy as np\n'), ((31618, 31660), 'numpy.cos', 'np.cos', (['(pi / 180 * (epsilon + tau + eta_5))'], {}), '(pi / 180 * (epsilon + tau + eta_5))\n', (31624, 31660), True, 'import numpy as np\n'), ((31685, 31727), 'numpy.sin', 'np.sin', (['(pi / 180 * (epsilon + tau + eta_5))'], {}), '(pi / 180 * (epsilon + tau + eta_5))\n', (31691, 31727), True, 'import numpy as np\n'), ((35746, 35769), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (35752, 35769), True, 'import numpy as np\n'), ((37688, 37711), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (37694, 37711), True, 'import numpy as np\n'), ((40660, 40771), 'warnings.warn', 'warnings.warn', (['"""No Equation Of Motion (EOM) matrices found. Running get_eom_matrices()."""'], {'stacklevel': '(2)'}), "(\n 'No Equation Of Motion (EOM) matrices found. Running get_eom_matrices().',\n stacklevel=2)\n", (40673, 40771), False, 'import warnings\n'), ((41327, 41346), 'numpy.real', 'np.real', (['eigVals[0]'], {}), '(eigVals[0])\n', (41334, 41346), True, 'import numpy as np\n'), ((41403, 41433), 'numpy.array', 'np.array', (['[[1], [0], [0], [0]]'], {}), '([[1], [0], [0], [0]])\n', (41411, 41433), True, 'import numpy as np\n'), ((41462, 41486), 'numpy.array', 'np.array', (['[[1, 0, 0, 0]]'], {}), '([[1, 0, 0, 0]])\n', (41470, 41486), True, 'import numpy as np\n'), ((41515, 41530), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (41523, 41530), True, 'import numpy as np\n'), ((41606, 41625), 'scipy.signal.step', 'signal.step', (['system'], {}), '(system)\n', (41617, 41625), False, 'from scipy import interpolate, signal\n'), ((48157, 48221), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""tables\\\\Raw_0.2.csv"""'], {}), "(__name__, 'tables\\\\Raw_0.2.csv')\n", (48188, 48221), False, 'import pkg_resources\n'), ((48244, 48308), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""tables\\\\Raw_0.4.csv"""'], {}), "(__name__, 'tables\\\\Raw_0.4.csv')\n", (48275, 48308), False, 'import pkg_resources\n'), ((48331, 48395), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""tables\\\\Raw_0.6.csv"""'], {}), "(__name__, 'tables\\\\Raw_0.6.csv')\n", (48362, 48395), False, 'import pkg_resources\n'), ((48417, 48479), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""tables\\\\V_0.2.csv"""'], {}), "(__name__, 'tables\\\\V_0.2.csv')\n", (48448, 48479), False, 'import pkg_resources\n'), ((48500, 48562), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""tables\\\\V_0.4.csv"""'], {}), "(__name__, 'tables\\\\V_0.4.csv')\n", (48531, 48562), False, 'import pkg_resources\n'), ((48599, 48665), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""tables\\\\Raw_V_0.2.csv"""'], {}), "(__name__, 'tables\\\\Raw_V_0.2.csv')\n", (48630, 48665), False, 'import pkg_resources\n'), ((48689, 48755), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""tables\\\\Raw_V_0.4.csv"""'], {}), "(__name__, 'tables\\\\Raw_V_0.4.csv')\n", (48720, 48755), False, 'import pkg_resources\n'), ((48779, 48845), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['__name__', '"""tables\\\\Raw_V_0.6.csv"""'], {}), "(__name__, 'tables\\\\Raw_V_0.6.csv')\n", (48810, 48845), False, 'import pkg_resources\n'), ((48922, 48975), 'numpy.genfromtxt', 'np.genfromtxt', (['Raw2_tab'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(Raw2_tab, delimiter=',', skip_header=1)\n", (48935, 48975), True, 'import numpy as np\n'), ((48999, 49052), 'numpy.genfromtxt', 'np.genfromtxt', (['Raw4_tab'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(Raw4_tab, delimiter=',', skip_header=1)\n", (49012, 49052), True, 'import numpy as np\n'), ((49076, 49129), 'numpy.genfromtxt', 'np.genfromtxt', (['Raw6_tab'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(Raw6_tab, delimiter=',', skip_header=1)\n", (49089, 49129), True, 'import numpy as np\n'), ((49152, 49203), 'numpy.genfromtxt', 'np.genfromtxt', (['V2_tab'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(V2_tab, delimiter=',', skip_header=1)\n", (49165, 49203), True, 'import numpy as np\n'), ((49225, 49276), 'numpy.genfromtxt', 'np.genfromtxt', (['V4_tab'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(V4_tab, delimiter=',', skip_header=1)\n", (49238, 49276), True, 'import numpy as np\n'), ((49303, 49357), 'numpy.genfromtxt', 'np.genfromtxt', (['RawV2_tab'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(RawV2_tab, delimiter=',', skip_header=1)\n", (49316, 49357), True, 'import numpy as np\n'), ((49383, 49437), 'numpy.genfromtxt', 'np.genfromtxt', (['RawV4_tab'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(RawV4_tab, delimiter=',', skip_header=1)\n", (49396, 49437), True, 'import numpy as np\n'), ((49463, 49517), 'numpy.genfromtxt', 'np.genfromtxt', (['RawV6_tab'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(RawV6_tab, delimiter=',', skip_header=1)\n", (49476, 49517), True, 'import numpy as np\n'), ((49673, 49764), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['arr_Raw2[:, 1]', 'arr_Raw2[:, 0]', 'arr_Raw2[:, 2]'], {'kind': 'interp2Type'}), '(arr_Raw2[:, 1], arr_Raw2[:, 0], arr_Raw2[:, 2], kind=\n interp2Type)\n', (49693, 49764), False, 'from scipy import interpolate, signal\n'), ((49787, 49878), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['arr_Raw4[:, 1]', 'arr_Raw4[:, 0]', 'arr_Raw4[:, 2]'], {'kind': 'interp2Type'}), '(arr_Raw4[:, 1], arr_Raw4[:, 0], arr_Raw4[:, 2], kind=\n interp2Type)\n', (49807, 49878), False, 'from scipy import interpolate, signal\n'), ((49901, 49992), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['arr_Raw6[:, 1]', 'arr_Raw6[:, 0]', 'arr_Raw6[:, 2]'], {'kind': 'interp2Type'}), '(arr_Raw6[:, 1], arr_Raw6[:, 0], arr_Raw6[:, 2], kind=\n interp2Type)\n', (49921, 49992), False, 'from scipy import interpolate, signal\n'), ((50014, 50099), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['arr_V2[:, 1]', 'arr_V2[:, 0]', 'arr_V2[:, 2]'], {'kind': 'interp2Type'}), '(arr_V2[:, 1], arr_V2[:, 0], arr_V2[:, 2], kind=interp2Type\n )\n', (50034, 50099), False, 'from scipy import interpolate, signal\n'), ((50120, 50205), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['arr_V4[:, 1]', 'arr_V4[:, 0]', 'arr_V4[:, 2]'], {'kind': 'interp2Type'}), '(arr_V4[:, 1], arr_V4[:, 0], arr_V4[:, 2], kind=interp2Type\n )\n', (50140, 50205), False, 'from scipy import interpolate, signal\n'), ((50268, 50372), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['arr_Raw_V2[:, 0]', 'arr_Raw_V2[:, 1]'], {'kind': 'interp1Type', 'fill_value': '"""extrapolate"""'}), "(arr_Raw_V2[:, 0], arr_Raw_V2[:, 1], kind=interp1Type,\n fill_value='extrapolate')\n", (50288, 50372), False, 'from scipy import interpolate, signal\n'), ((50399, 50503), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['arr_Raw_V4[:, 0]', 'arr_Raw_V4[:, 1]'], {'kind': 'interp1Type', 'fill_value': '"""extrapolate"""'}), "(arr_Raw_V4[:, 0], arr_Raw_V4[:, 1], kind=interp1Type,\n fill_value='extrapolate')\n", (50419, 50503), False, 'from scipy import interpolate, signal\n'), ((50530, 50634), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['arr_Raw_V6[:, 0]', 'arr_Raw_V6[:, 1]'], {'kind': 'interp1Type', 'fill_value': '"""extrapolate"""'}), "(arr_Raw_V6[:, 0], arr_Raw_V6[:, 1], kind=interp1Type,\n fill_value='extrapolate')\n", (50550, 50634), False, 'from scipy import interpolate, signal\n'), ((51044, 51066), 'numpy.multiply', 'np.multiply', (['RRm', 'Rwbm'], {}), '(RRm, Rwbm)\n', (51055, 51066), True, 'import numpy as np\n'), ((51608, 51627), 'numpy.multiply', 'np.multiply', (['Rwb', 'E'], {}), '(Rwb, E)\n', (51619, 51627), True, 'import numpy as np\n'), ((51658, 51754), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['[0.2, 0.4, 0.6]', 'Rwb_final'], {'kind': 'interp1Type', 'fill_value': '"""extrapolate"""'}), "([0.2, 0.4, 0.6], Rwb_final, kind=interp1Type,\n fill_value='extrapolate')\n", (51678, 51754), False, 'from scipy import interpolate, signal\n'), ((51817, 51981), 'warnings.warn', 'warnings.warn', (['"""Average impact acceleration based on the Fridsma charts is currently not implemented. Using Savitsky & Brown approximation."""'], {'stacklevel': '(2)'}), "(\n 'Average impact acceleration based on the Fridsma charts is currently not implemented. Using Savitsky & Brown approximation.'\n , stacklevel=2)\n", (51830, 51981), False, 'import warnings\n'), ((11781, 11808), 'numpy.sqrt', 'np.sqrt', (['(self.g * self.beam)'], {}), '(self.g * self.beam)\n', (11788, 11808), True, 'import numpy as np\n'), ((11852, 11916), 'numpy.sqrt', 'np.sqrt', (['(self.g * (self.weight / (self.g * self.rho)) ** (1 / 3))'], {}), '(self.g * (self.weight / (self.g * self.rho)) ** (1 / 3))\n', (11859, 11916), True, 'import numpy as np\n'), ((12819, 12864), 'numpy.sin', 'np.sin', (['((self.tau + self.eta_5) * np.pi / 180)'], {}), '((self.tau + self.eta_5) * np.pi / 180)\n', (12825, 12864), True, 'import numpy as np\n'), ((18280, 18312), 'numpy.tan', 'np.tan', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (18286, 18312), True, 'import numpy as np\n'), ((19211, 19301), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['beta_table', 'z_max_table'], {'kind': '"""cubic"""', 'fill_value': '"""extrapolate"""'}), "(beta_table, z_max_table, kind='cubic', fill_value=\n 'extrapolate')\n", (19231, 19301), False, 'from scipy import interpolate, signal\n'), ((19588, 19611), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (19594, 19611), True, 'import numpy as np\n'), ((20174, 20206), 'numpy.tan', 'np.tan', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (20180, 20206), True, 'import numpy as np\n'), ((20367, 20441), 'warnings.warn', 'warnings.warn', (['"""Vessel operating with dry chines (L_C = 0)."""'], {'stacklevel': '(2)'}), "('Vessel operating with dry chines (L_C = 0).', stacklevel=2)\n", (20380, 20441), False, 'import warnings\n'), ((26479, 26502), 'numpy.cos', 'np.cos', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (26485, 26502), True, 'import numpy as np\n'), ((27826, 27858), 'numpy.cos', 'np.cos', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (27832, 27858), True, 'import numpy as np\n'), ((27918, 27950), 'numpy.sin', 'np.sin', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (27924, 27950), True, 'import numpy as np\n'), ((37401, 37424), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (37407, 37424), True, 'import numpy as np\n'), ((39426, 39477), 'ndmath.complexGrad', 'ndmath.complexGrad', (['_func', '[temp_eta_3, temp_eta_5]'], {}), '(_func, [temp_eta_3, temp_eta_5])\n', (39444, 39477), False, 'import ndmath\n'), ((20154, 20177), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (20160, 20177), True, 'import numpy as np\n'), ((31752, 31778), 'numpy.cos', 'np.cos', (['(pi / 180 * epsilon)'], {}), '(pi / 180 * epsilon)\n', (31758, 31778), True, 'import numpy as np\n'), ((31790, 31816), 'numpy.sin', 'np.sin', (['(pi / 180 * epsilon)'], {}), '(pi / 180 * epsilon)\n', (31796, 31816), True, 'import numpy as np\n'), ((39536, 39598), 'ndmath.finiteGrad', 'ndmath.finiteGrad', (['_func', '[temp_eta_3, temp_eta_5]', '(10 ** -6.6)'], {}), '(_func, [temp_eta_3, temp_eta_5], 10 ** -6.6)\n', (39553, 39598), False, 'import ndmath\n'), ((40985, 41007), 'numpy.zeros', 'np.zeros', (['(nDim, nDim)'], {}), '((nDim, nDim))\n', (40993, 41007), True, 'import numpy as np\n'), ((41008, 41025), 'numpy.identity', 'np.identity', (['nDim'], {}), '(nDim)\n', (41019, 41025), True, 'import numpy as np\n'), ((17203, 17267), 'numpy.printoptions', 'np.printoptions', ([], {'formatter': "{'float': f'{{:.{sigFigs}g}}'.format}"}), "(formatter={'float': f'{{:.{sigFigs}g}}'.format})\n", (17218, 17267), True, 'import numpy as np\n'), ((20646, 20669), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (20652, 20669), True, 'import numpy as np\n'), ((20992, 21021), 'numpy.exp', 'np.exp', (['(-(lambda_K - w) / 0.3)'], {}), '(-(lambda_K - w) / 0.3)\n', (20998, 21021), True, 'import numpy as np\n'), ((27295, 27307), 'numpy.log10', 'np.log10', (['Rn'], {}), '(Rn)\n', (27303, 27307), True, 'import numpy as np\n'), ((35666, 35689), 'scipy.special.gamma', 'gamma', (['(1.5 - beta / 180)'], {}), '(1.5 - beta / 180)\n', (35671, 35689), False, 'from scipy.special import gamma\n'), ((35716, 35739), 'scipy.special.gamma', 'gamma', (['(0.5 + beta / 180)'], {}), '(0.5 + beta / 180)\n', (35721, 35739), False, 'from scipy.special import gamma\n'), ((37608, 37631), 'scipy.special.gamma', 'gamma', (['(1.5 - beta / 180)'], {}), '(1.5 - beta / 180)\n', (37613, 37631), False, 'from scipy.special import gamma\n'), ((37658, 37681), 'scipy.special.gamma', 'gamma', (['(0.5 + beta / 180)'], {}), '(0.5 + beta / 180)\n', (37663, 37681), False, 'from scipy.special import gamma\n'), ((41054, 41075), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'K'], {}), '(M, K)\n', (41069, 41075), True, 'import numpy as np\n'), ((41077, 41098), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'C'], {}), '(M, C)\n', (41092, 41098), True, 'import numpy as np\n'), ((46117, 46140), 'numpy.tan', 'np.tan', (['(beta * pi / 180)'], {}), '(beta * pi / 180)\n', (46123, 46140), True, 'import numpy as np\n'), ((20823, 20846), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (20829, 20846), True, 'import numpy as np\n'), ((21359, 21382), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (21365, 21382), True, 'import numpy as np\n'), ((27690, 27713), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (27696, 27713), True, 'import numpy as np\n'), ((27725, 27748), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (27731, 27748), True, 'import numpy as np\n'), ((35644, 35667), 'numpy.sin', 'np.sin', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (35650, 35667), True, 'import numpy as np\n'), ((35691, 35712), 'scipy.special.gamma', 'gamma', (['(1 - beta / 180)'], {}), '(1 - beta / 180)\n', (35696, 35712), False, 'from scipy.special import gamma\n'), ((36175, 36198), 'numpy.tan', 'np.tan', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (36181, 36198), True, 'import numpy as np\n'), ((37586, 37609), 'numpy.sin', 'np.sin', (['(pi / 180 * beta)'], {}), '(pi / 180 * beta)\n', (37592, 37609), True, 'import numpy as np\n'), ((37633, 37654), 'scipy.special.gamma', 'gamma', (['(1 - beta / 180)'], {}), '(1 - beta / 180)\n', (37638, 37654), False, 'from scipy.special import gamma\n'), ((20846, 20878), 'numpy.tan', 'np.tan', (['(pi / 180 * (tau + eta_5))'], {}), '(pi / 180 * (tau + eta_5))\n', (20852, 20878), True, 'import numpy as np\n'), ((27084, 27106), 'numpy.cos', 'np.cos', (['(tau * pi / 180)'], {}), '(tau * pi / 180)\n', (27090, 27106), True, 'import numpy as np\n'), ((26989, 27006), 'numpy.sqrt', 'np.sqrt', (['lambda_W'], {}), '(lambda_W)\n', (26996, 27006), True, 'import numpy as np\n'), ((27034, 27051), 'numpy.sqrt', 'np.sqrt', (['lambda_W'], {}), '(lambda_W)\n', (27041, 27051), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 09:54:17 2015
@author: jmilli
"""
import numpy as np
from scipy.interpolate import interp1d
def create2dMap(values,inputRadii=None,maxRadius=None):
"""
This function takes a 1D radial distribution in input and builds a 2map
"""
nbValues=len(values)
if inputRadii==None:
inputRadii=np.arange(0,nbValues)
maxRadius=nbValues
else:
if maxRadius==None:
raise ValueError('You must provide a maximum radius')
imageAxis = np.arange(-maxRadius/2,maxRadius/2)
x,y = np.meshgrid(imageAxis,imageAxis)
distmap = abs(x+1j*y)
# map2d = np.ndarray(distmap.shape)
radiusOK = np.isfinite(values)
func = interp1d(inputRadii[radiusOK],values[radiusOK],kind='cubic',
bounds_error=False,fill_value=np.nan)
map2d = func(distmap)
return map2d,distmap
|
[
"numpy.meshgrid",
"numpy.isfinite",
"numpy.arange",
"scipy.interpolate.interp1d"
] |
[((533, 573), 'numpy.arange', 'np.arange', (['(-maxRadius / 2)', '(maxRadius / 2)'], {}), '(-maxRadius / 2, maxRadius / 2)\n', (542, 573), True, 'import numpy as np\n'), ((579, 612), 'numpy.meshgrid', 'np.meshgrid', (['imageAxis', 'imageAxis'], {}), '(imageAxis, imageAxis)\n', (590, 612), True, 'import numpy as np\n'), ((692, 711), 'numpy.isfinite', 'np.isfinite', (['values'], {}), '(values)\n', (703, 711), True, 'import numpy as np\n'), ((723, 829), 'scipy.interpolate.interp1d', 'interp1d', (['inputRadii[radiusOK]', 'values[radiusOK]'], {'kind': '"""cubic"""', 'bounds_error': '(False)', 'fill_value': 'np.nan'}), "(inputRadii[radiusOK], values[radiusOK], kind='cubic', bounds_error\n =False, fill_value=np.nan)\n", (731, 829), False, 'from scipy.interpolate import interp1d\n'), ((364, 386), 'numpy.arange', 'np.arange', (['(0)', 'nbValues'], {}), '(0, nbValues)\n', (373, 386), True, 'import numpy as np\n')]
|
# !/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# Modified by <NAME> on 20 April 2020
import argparse
import datetime
import glob
import os
import random
import sys
import time
from PIL import Image
from PIL.PngImagePlugin import PngInfo
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import math
from dotmap import DotMap
try:
import pygame
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
try:
import queue
except ImportError:
import Queue as queue
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.tools.misc import is_within_distance_ahead, compute_magnitude_angle
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
def is_within_distance(target_location, current_location, orientation, max_distance, d_angle_th_up, d_angle_th_low=0):
"""
Check if a target object is within a certain distance from a reference object.
A vehicle in front would be something around 0 deg, while one behind around 180 deg.
:param target_location: location of the target object
:param current_location: location of the reference object
:param orientation: orientation of the reference object
:param max_distance: maximum allowed distance
:param d_angle_th_up: upper thereshold for angle
:param d_angle_th_low: low thereshold for angle (optional, default is 0)
:return: True if target object is within max_distance ahead of the reference object
"""
target_vector = np.array([target_location.x - current_location.x, target_location.y - current_location.y])
norm_target = np.linalg.norm(target_vector)
# If the vector is too short, we can simply stop here
if norm_target < 0.001:
return True
if norm_target > max_distance:
return False
forward_vector = np.array(
[math.cos(math.radians(orientation)), math.sin(math.radians(orientation))])
d_angle = math.degrees(math.acos(np.clip(np.dot(forward_vector, target_vector) / norm_target, -1., 1.)))
return d_angle_th_low < d_angle < d_angle_th_up
def compute_distance(location_1, location_2):
"""
Euclidean distance between 3D points
:param location_1, location_2: 3D points
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return norm
class CarlaSyncMode(object):
"""
Context manager to synchronize output from different sensors. Synchronous
mode is enabled as long as we are inside this context
with CarlaSyncMode(world, sensors) as sync_mode:
while True:
data = sync_mode.tick(timeout=1.0)
"""
def __init__(self, world, *sensors, **kwargs):
self.world = world
self.sensors = sensors
self.frame = None
self.delta_seconds = 1.0 / kwargs.get('fps', 20)
self._queues = []
self._settings = None
self.start()
def start(self):
self._settings = self.world.get_settings()
self.frame = self.world.apply_settings(carla.WorldSettings(
no_rendering_mode=False,
synchronous_mode=True,
fixed_delta_seconds=self.delta_seconds))
def make_queue(register_event):
q = queue.Queue()
register_event(q.put)
self._queues.append(q)
make_queue(self.world.on_tick)
for sensor in self.sensors:
make_queue(sensor.listen)
def tick(self, timeout):
self.frame = self.world.tick()
data = [self._retrieve_data(q, timeout) for q in self._queues]
assert all(x.frame == self.frame for x in data)
return data
def __exit__(self, *args, **kwargs):
self.world.apply_settings(self._settings)
def _retrieve_data(self, sensor_queue, timeout):
while True:
data = sensor_queue.get(timeout=timeout)
if data.frame == self.frame:
return data
def draw_image(surface, image, blend=False):
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if blend:
image_surface.set_alpha(100)
surface.blit(image_surface, (0, 0))
def get_font():
fonts = [x for x in pygame.font.get_fonts()]
default_font = 'ubuntumono'
font = default_font if default_font in fonts else fonts[0]
font = pygame.font.match_font(font)
return pygame.font.Font(font, 14)
def should_quit():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
return True
return False
def clamp(value, minimum=0.0, maximum=100.0):
return max(minimum, min(value, maximum))
class Sun(object):
def __init__(self, azimuth, altitude):
self.azimuth = azimuth
self.altitude = altitude
self._t = 0.0
def tick(self, delta_seconds):
self._t += 0.008 * delta_seconds
self._t %= 2.0 * math.pi
self.azimuth += 0.25 * delta_seconds
self.azimuth %= 360.0
min_alt, max_alt = [20, 90]
self.altitude = 0.5 * (max_alt + min_alt) + 0.5 * (max_alt - min_alt) * math.cos(self._t)
def __str__(self):
return 'Sun(alt: %.2f, azm: %.2f)' % (self.altitude, self.azimuth)
class Storm(object):
def __init__(self, precipitation):
self._t = precipitation if precipitation > 0.0 else -50.0
self._increasing = True
self.clouds = 0.0
self.rain = 0.0
self.wetness = 0.0
self.puddles = 0.0
self.wind = 0.0
self.fog = 0.0
def tick(self, delta_seconds):
delta = (1.3 if self._increasing else -1.3) * delta_seconds
self._t = clamp(delta + self._t, -250.0, 100.0)
self.clouds = clamp(self._t + 40.0, 0.0, 90.0)
self.clouds = clamp(self._t + 40.0, 0.0, 60.0)
self.rain = clamp(self._t, 0.0, 80.0)
delay = -10.0 if self._increasing else 90.0
self.puddles = clamp(self._t + delay, 0.0, 85.0)
self.wetness = clamp(self._t * 5, 0.0, 100.0)
self.wind = 5.0 if self.clouds <= 20 else 90 if self.clouds >= 70 else 40
self.fog = clamp(self._t - 10, 0.0, 30.0)
if self._t == -250.0:
self._increasing = True
if self._t == 100.0:
self._increasing = False
def __str__(self):
return 'Storm(clouds=%d%%, rain=%d%%, wind=%d%%)' % (self.clouds, self.rain, self.wind)
class Weather(object):
def __init__(self, world, changing_weather_speed):
self.world = world
self.reset()
self.weather = world.get_weather()
self.changing_weather_speed = changing_weather_speed
self._sun = Sun(self.weather.sun_azimuth_angle, self.weather.sun_altitude_angle)
self._storm = Storm(self.weather.precipitation)
def reset(self):
weather_params = carla.WeatherParameters(sun_altitude_angle=90.)
self.world.set_weather(weather_params)
def tick(self):
self._sun.tick(self.changing_weather_speed)
self._storm.tick(self.changing_weather_speed)
self.weather.cloudiness = self._storm.clouds
self.weather.precipitation = self._storm.rain
self.weather.precipitation_deposits = self._storm.puddles
self.weather.wind_intensity = self._storm.wind
self.weather.fog_density = self._storm.fog
self.weather.wetness = self._storm.wetness
self.weather.sun_azimuth_angle = self._sun.azimuth
self.weather.sun_altitude_angle = self._sun.altitude
self.world.set_weather(self.weather)
def __str__(self):
return '%s %s' % (self._sun, self._storm)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--vision_size', type=int, default=84)
parser.add_argument('--vision_fov', type=int, default=90)
parser.add_argument('--weather', default=False, action='store_true')
parser.add_argument('--frame_skip', type=int, default=1),
parser.add_argument('--steps', type=int, default=100000)
parser.add_argument('--multiagent', default=False, action='store_true'),
parser.add_argument('--lane', type=int, default=0)
parser.add_argument('--lights', default=False, action='store_true')
args = parser.parse_args()
return args
class LocalPlannerModified(LocalPlanner):
def __del__(self):
pass # otherwise it deletes our vehicle object
def run_step(self):
return super().run_step(debug=False) # otherwise by default shows waypoints, that interfere with our camera
class RoamingAgent(Agent):
"""
RoamingAgent implements a basic agent that navigates scenes making random
choices when facing an intersection.
This agent respects traffic lights and other vehicles.
NOTE: need to re-create after each env reset
"""
def __init__(self, env):
"""
:param vehicle: actor to apply to local planner logic onto
"""
vehicle = env.vehicle
follow_traffic_lights = env.follow_traffic_lights
super(RoamingAgent, self).__init__(vehicle)
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
self._local_planner = LocalPlannerModified(self._vehicle)
self._follow_traffic_lights = follow_traffic_lights
def compute_action(self):
action, traffic_light = self.run_step()
throttle = action.throttle
brake = action.brake
steer = action.steer
#print('tbsl:', throttle, brake, steer, traffic_light)
if brake == 0.0:
return np.array([throttle, steer])
else:
return np.array([-brake, steer])
def run_step(self):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights and other vehicles
actor_list = self._world.get_actors()
vehicle_list = actor_list.filter("*vehicle*")
lights_list = actor_list.filter("*traffic_light*")
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
traffic_light_color = self._is_light_red(lights_list)
if traffic_light_color == 'RED' and self._follow_traffic_lights:
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
if hazard_detected:
control = self.emergency_stop()
else:
self._state = AgentState.NAVIGATING
# standard local planner behavior
control = self._local_planner.run_step()
#print ('Action chosen: ', control)
return control, traffic_light_color
# override case class
def _is_light_red_europe_style(self, lights_list):
"""
This method is specialized to check European style traffic lights.
Only suitable for Towns 03 -- 07.
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
for traffic_light in lights_list:
object_waypoint = self._map.get_waypoint(traffic_light.get_location())
if object_waypoint.road_id != ego_vehicle_waypoint.road_id or \
object_waypoint.lane_id != ego_vehicle_waypoint.lane_id:
continue
if is_within_distance_ahead(traffic_light.get_transform(),
self._vehicle.get_transform(),
self._proximity_threshold):
if traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif traffic_light.state == carla.TrafficLightState.Yellow:
traffic_light_color = "YELLOW"
elif traffic_light.state == carla.TrafficLightState.Green:
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb; pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
return traffic_light_color
# override case class
def _is_light_red_us_style(self, lights_list, debug=False):
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
traffic_light_color = "NONE" # default, if no traffic lights are seen
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
return "JUNCTION"
if self._local_planner.target_waypoint is not None:
if self._local_planner.target_waypoint.is_junction:
min_angle = 180.0
sel_magnitude = 0.0
sel_traffic_light = None
for traffic_light in lights_list:
loc = traffic_light.get_location()
magnitude, angle = compute_magnitude_angle(loc,
ego_vehicle_location,
self._vehicle.get_transform().rotation.yaw)
if magnitude < 60.0 and angle < min(25.0, min_angle):
sel_magnitude = magnitude
sel_traffic_light = traffic_light
min_angle = angle
if sel_traffic_light is not None:
if debug:
print('=== Magnitude = {} | Angle = {} | ID = {}'.format(
sel_magnitude, min_angle, sel_traffic_light.id))
if self._last_traffic_light is None:
self._last_traffic_light = sel_traffic_light
if self._last_traffic_light.state == carla.TrafficLightState.Red:
return "RED"
elif self._last_traffic_light.state == carla.TrafficLightState.Yellow:
traffic_light_color = "YELLOW"
elif self._last_traffic_light.state == carla.TrafficLightState.Green:
if traffic_light_color is not "YELLOW": # (more severe)
traffic_light_color = "GREEN"
else:
import pdb; pdb.set_trace()
# investigate https://carla.readthedocs.io/en/latest/python_api/#carlatrafficlightstate
else:
self._last_traffic_light = None
return traffic_light_color
if __name__ == '__main__':
# example call:
# ./PythonAPI/util/config.py --map Town01 --delta-seconds 0.05
# python PythonAPI/carla/agents/navigation/data_collection_agent.py --vision_size 256 --vision_fov 90 --steps 10000 --weather --lights
args = parse_args()
env = CarlaEnv(args)
try:
done = False
while not done:
action, traffic_light_color = env.compute_action()
next_obs, reward, done, info = env.step(action, traffic_light_color)
print ('Reward: ', reward, 'Done: ', done, 'Location: ', env.vehicle.get_location())
if done:
# env.reset_init()
# env.reset()
done = False
finally:
env.finish()
|
[
"numpy.dtype",
"pygame.font.get_fonts",
"numpy.reshape",
"argparse.ArgumentParser",
"pygame.event.get",
"carla.WeatherParameters",
"pygame.font.match_font",
"math.radians",
"math.cos",
"numpy.array",
"numpy.dot",
"pdb.set_trace",
"numpy.linalg.norm",
"numpy.finfo",
"pygame.font.Font",
"Queue.Queue",
"glob.glob",
"carla.WorldSettings"
] |
[((2198, 2292), 'numpy.array', 'np.array', (['[target_location.x - current_location.x, target_location.y - current_location.y\n ]'], {}), '([target_location.x - current_location.x, target_location.y -\n current_location.y])\n', (2206, 2292), True, 'import numpy as np\n'), ((2307, 2336), 'numpy.linalg.norm', 'np.linalg.norm', (['target_vector'], {}), '(target_vector)\n', (2321, 2336), True, 'import numpy as np\n'), ((4855, 4904), 'numpy.reshape', 'np.reshape', (['array', '(image.height, image.width, 4)'], {}), '(array, (image.height, image.width, 4))\n', (4865, 4904), True, 'import numpy as np\n'), ((5299, 5327), 'pygame.font.match_font', 'pygame.font.match_font', (['font'], {}), '(font)\n', (5321, 5327), False, 'import pygame\n'), ((5339, 5365), 'pygame.font.Font', 'pygame.font.Font', (['font', '(14)'], {}), '(font, 14)\n', (5355, 5365), False, 'import pygame\n'), ((5404, 5422), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (5420, 5422), False, 'import pygame\n'), ((8700, 8725), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8723, 8725), False, 'import argparse\n'), ((3052, 3077), 'numpy.linalg.norm', 'np.linalg.norm', (['[x, y, z]'], {}), '([x, y, z])\n', (3066, 3077), True, 'import numpy as np\n'), ((7876, 7924), 'carla.WeatherParameters', 'carla.WeatherParameters', ([], {'sun_altitude_angle': '(90.0)'}), '(sun_altitude_angle=90.0)\n', (7899, 7924), False, 'import carla\n'), ((462, 620), 'glob.glob', 'glob.glob', (["('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major, sys.\n version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))"], {}), "('../carla/dist/carla-*%d.%d-%s.egg' % (sys.version_info.major,\n sys.version_info.minor, 'win-amd64' if os.name == 'nt' else 'linux-x86_64')\n )\n", (471, 620), False, 'import glob\n'), ((3080, 3095), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3088, 3095), True, 'import numpy as np\n'), ((3825, 3936), 'carla.WorldSettings', 'carla.WorldSettings', ([], {'no_rendering_mode': '(False)', 'synchronous_mode': '(True)', 'fixed_delta_seconds': 'self.delta_seconds'}), '(no_rendering_mode=False, synchronous_mode=True,\n fixed_delta_seconds=self.delta_seconds)\n', (3844, 3936), False, 'import carla\n'), ((4028, 4041), 'Queue.Queue', 'queue.Queue', ([], {}), '()\n', (4039, 4041), True, 'import Queue as queue\n'), ((4824, 4841), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (4832, 4841), True, 'import numpy as np\n'), ((5168, 5191), 'pygame.font.get_fonts', 'pygame.font.get_fonts', ([], {}), '()\n', (5189, 5191), False, 'import pygame\n'), ((10600, 10627), 'numpy.array', 'np.array', (['[throttle, steer]'], {}), '([throttle, steer])\n', (10608, 10627), True, 'import numpy as np\n'), ((10661, 10686), 'numpy.array', 'np.array', (['[-brake, steer]'], {}), '([-brake, steer])\n', (10669, 10686), True, 'import numpy as np\n'), ((2551, 2576), 'math.radians', 'math.radians', (['orientation'], {}), '(orientation)\n', (2563, 2576), False, 'import math\n'), ((2588, 2613), 'math.radians', 'math.radians', (['orientation'], {}), '(orientation)\n', (2600, 2613), False, 'import math\n'), ((6161, 6178), 'math.cos', 'math.cos', (['self._t'], {}), '(self._t)\n', (6169, 6178), False, 'import math\n'), ((2662, 2699), 'numpy.dot', 'np.dot', (['forward_vector', 'target_vector'], {}), '(forward_vector, target_vector)\n', (2668, 2699), True, 'import numpy as np\n'), ((13423, 13438), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (13436, 13438), False, 'import pdb\n'), ((15792, 15807), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (15805, 15807), False, 'import pdb\n')]
|
import warnings
from collections.abc import Iterable
from collections import OrderedDict
import torch
import numpy as np
from torch.utils.data import Dataset
from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense
from deep_staple.utils.common_utils import LabelDisturbanceMode
class HybridIdLoader(Dataset):
def __init__(self,
data_load_function,
ensure_labeled_pairs=True, use_additional_data=False, resample=True,
size:tuple=(96,96,60), normalize:bool=True,
max_load_3d_num=None, crop_3d_w_dim_range=None, modified_3d_label_override=None,
prevent_disturbance=False,
use_2d_normal_to=None, crop_2d_slices_gt_num_threshold=None, pre_interpolation_factor=2.,
fixed_weight_file = None, fixed_weight_min_quantile=None, fixed_weight_min_value=None
):
self.label_tags = []
self.use_2d_normal_to = use_2d_normal_to
self.crop_2d_slices_gt_num_threshold = crop_2d_slices_gt_num_threshold
self.prevent_disturbance = prevent_disturbance
self.do_augment = False
self.use_modified = False
self.disturbed_idxs = []
self.augment_at_collate = False
self.pre_interpolation_factor = pre_interpolation_factor
self.extract_3d_id = lambda _:_
self.extract_short_3d_id = lambda _:_
self.img_paths = {}
self.label_paths = {}
self.img_data_3d = {}
self.label_data_3d = {}
self.modified_label_data_3d = {}
# Load base 3D data
(self.img_paths, self.label_paths,
self.img_data_3d, self.label_data_3d,
self.modified_label_data_3d,
self.extract_3d_id, self.extract_short_3d_id) = data_load_function()
# Retrieve slices and plugin modified data
self.img_data_2d = {}
self.label_data_2d = {}
self.modified_label_data_2d = {}
# Postprocessing of 3d volumes
print("Postprocessing 3D volumes")
orig_3d_num = len(self.label_data_3d.keys())
if ensure_labeled_pairs:
labelled_keys = set(self.label_data_3d.keys())
unlabelled_imgs = set(self.img_data_3d.keys()) - labelled_keys
unlabelled_modified_labels = set([self.extract_3d_id(key) for key in self.modified_label_data_3d.keys()]) - labelled_keys
for del_key in unlabelled_imgs:
del self.img_data_3d[del_key]
for del_key in unlabelled_modified_labels:
del self.modified_label_data_3d[del_key]
if max_load_3d_num:
for del_key in sorted(list(self.img_data_3d.keys()))[max_load_3d_num:]:
del self.img_data_3d[del_key]
for del_key in sorted(list(self.label_data_3d.keys()))[max_load_3d_num:]:
del self.label_data_3d[del_key]
for del_key in sorted(list(self.modified_label_data_3d.keys()))[max_load_3d_num:]:
del self.modified_label_data_3d[del_key]
postprocessed_3d_num = len(self.label_data_3d.keys())
print(f"Removed {orig_3d_num - postprocessed_3d_num} 3D images in postprocessing")
#check for consistency
print(f"Equal image and label numbers: {set(self.img_data_3d)==set(self.label_data_3d)==set(self.modified_label_data_3d)} ({len(self.img_data_3d)})")
img_stack = torch.stack(list(self.img_data_3d.values()), dim=0)
img_mean, img_std = img_stack.mean(), img_stack.std()
label_stack = torch.stack(list(self.label_data_3d.values()), dim=0)
print("Image shape: {}, mean.: {:.2f}, std.: {:.2f}".format(img_stack.shape, img_mean, img_std))
print("Label shape: {}, max.: {}".format(label_stack.shape,torch.max(label_stack)))
if use_2d_normal_to:
if use_2d_normal_to == "D":
slice_dim = -3
if use_2d_normal_to == "H":
slice_dim = -2
if use_2d_normal_to == "W":
slice_dim = -1
for _3d_id, image in self.img_data_3d.items():
for idx, img_slc in [(slice_idx, image.select(slice_dim, slice_idx)) \
for slice_idx in range(image.shape[slice_dim])]:
# Set data view for id like "003rW100"
self.img_data_2d[f"{_3d_id}{use_2d_normal_to}{idx:03d}"] = img_slc
for _3d_id, label in self.label_data_3d.items():
for idx, lbl_slc in [(slice_idx, label.select(slice_dim, slice_idx)) \
for slice_idx in range(label.shape[slice_dim])]:
# Set data view for id like "003rW100"
self.label_data_2d[f"{_3d_id}{use_2d_normal_to}{idx:03d}"] = lbl_slc
for _3d_id, label in self.modified_label_data_3d.items():
for idx, lbl_slc in [(slice_idx, label.select(slice_dim, slice_idx)) \
for slice_idx in range(label.shape[slice_dim])]:
# Set data view for id like "003rW100"
self.modified_label_data_2d[f"{_3d_id}{use_2d_normal_to}{idx:03d}"] = lbl_slc
# Postprocessing of 2d slices
print("Postprocessing 2D slices")
orig_2d_num = len(self.label_data_2d.keys())
if self.crop_2d_slices_gt_num_threshold > 0:
for key, label in list(self.label_data_2d.items()):
uniq_vals = label.unique()
if sum(label[label > 0]) < self.crop_2d_slices_gt_num_threshold:
# Delete 2D slices with less than n gt-pixels (but keep 3d data)
del self.img_data_2d[key]
del self.label_data_2d[key]
del self.modified_label_data_2d[key]
postprocessed_2d_num = len(self.label_data_2d.keys())
print(f"Removed {orig_2d_num - postprocessed_2d_num} of {orig_2d_num} 2D slices in postprocessing")
if fixed_weight_file is not None and any([fixed_weight_min_quantile, fixed_weight_min_value]):
fixed_weightdata = torch.load(fixed_weight_file)
fixed_weights = fixed_weightdata['data_parameters'].detach().cpu()
fixed_d_ids = fixed_weightdata['d_ids']
print(f"Fixed weight quantiles are: {np.quantile(fixed_weights, np.linspace(0.,1.,5))}")
if fixed_weight_min_quantile is not None:
fixed_weight_min_value = np.quantile(fixed_weights, fixed_weight_min_quantile)
elif fixed_weight_min_value is not None:
pass
fixed_del_counter = 0
for key, weight in zip(fixed_d_ids, fixed_weights):
if weight < fixed_weight_min_value:
if use_2d_normal_to:
del self.img_data_2d[key]
del self.label_data_2d[key]
del self.modified_label_data_2d[key]
else:
del self.img_data_3d[key]
del self.label_data_3d[key]
del self.modified_label_data_3d[key]
fixed_del_counter+=1
print(f"Removed {fixed_del_counter} data samples by cropping data with fixed weight min value = {fixed_weight_min_value:.3f}")
# Now make sure dicts are ordered
self.img_paths = OrderedDict(sorted(self.img_paths.items()))
self.label_paths = OrderedDict(sorted(self.label_paths.items()))
self.img_data_3d = OrderedDict(sorted(self.img_data_3d.items()))
self.label_data_3d = OrderedDict(sorted(self.label_data_3d.items()))
self.modified_label_data_3d = OrderedDict(sorted(self.modified_label_data_3d.items()))
self.img_data_2d = OrderedDict(sorted(self.img_data_2d.items()))
self.label_data_2d = OrderedDict(sorted(self.label_data_2d.items()))
self.modified_label_data_2d = OrderedDict(sorted(self.modified_label_data_2d.items()))
nonzero_lbl_percentage = torch.tensor([lbl.sum((-2,-1)) > 0 for lbl in self.label_data_2d.values()]).sum()
nonzero_lbl_percentage = nonzero_lbl_percentage/len(self.label_data_2d)
print(f"Nonzero labels: " f"{nonzero_lbl_percentage*100:.2f}%")
nonzero_mod_lbl_percentage = torch.tensor([ensure_dense(lbl)[0].sum((-2,-1)) > 0 for lbl in self.modified_label_data_2d.values()]).sum()
nonzero_mod_lbl_percentage = nonzero_mod_lbl_percentage/len(self.modified_label_data_2d)
print(f"Nonzero modified labels: " f"{nonzero_mod_lbl_percentage*100:.2f}%")
print(f"Loader will use {postprocessed_2d_num} of {orig_2d_num} 2D slices.")
print("Data import finished.")
print(f"Dataloader will yield {'2D' if self.use_2d_normal_to else '3D'} samples")
def get_short_3d_ids(self):
return [self.extract_short_3d_id(_id) for _id in self.get_3d_ids()]
def get_3d_ids(self):
return list(self.img_data_3d.keys())
def get_2d_ids(self):
assert self.use_2d(), "Dataloader does not provide 2D data."
return list(self.img_data_2d.keys())
def get_id_dicts(self, use_2d_override=None):
all_3d_ids = self.get_3d_ids()
id_dicts = []
if self.use_2d(use_2d_override):
for _2d_dataset_idx, _2d_id in enumerate(self.get_2d_ids()):
_3d_id = _2d_id[:-4]
id_dicts.append(
{
'2d_id': _2d_id,
'2d_dataset_idx': _2d_dataset_idx,
'3d_id': _3d_id,
'3d_dataset_idx': all_3d_ids.index(_3d_id),
}
)
else:
for _3d_dataset_idx, _3d_id in enumerate(self.get_3d_ids()):
id_dicts.append(
{
'3d_id': _3d_id,
'3d_dataset_idx': all_3d_ids.index(_3d_id),
}
)
return id_dicts
def switch_2d_identifiers(self, _2d_identifiers):
assert self.use_2d(), "Dataloader does not provide 2D data."
if isinstance(_2d_identifiers, (torch.Tensor, np.ndarray)):
_2d_identifiers = _2d_identifiers.tolist()
elif not isinstance(_2d_identifiers, Iterable) or isinstance(_2d_identifiers, str):
_2d_identifiers = [_2d_identifiers]
_ids = self.get_2d_ids()
if all([isinstance(elem, int) for elem in _2d_identifiers]):
vals = [_ids[elem] for elem in _2d_identifiers]
elif all([isinstance(elem, str) for elem in _2d_identifiers]):
vals = [_ids.index(elem) for elem in _2d_identifiers]
else:
raise ValueError
return vals[0] if len(vals) == 1 else vals
def switch_3d_identifiers(self, _3d_identifiers):
if isinstance(_3d_identifiers, (torch.Tensor, np.ndarray)):
_3d_identifiers = _3d_identifiers.tolist()
elif not isinstance(_3d_identifiers, Iterable) or isinstance(_3d_identifiers, str):
_3d_identifiers = [_3d_identifiers]
_ids = self.get_3d_ids()
if all([isinstance(elem, int) for elem in _3d_identifiers]):
vals = [_ids[elem] for elem in _3d_identifiers]
elif all([isinstance(elem, str) for elem in _3d_identifiers]):
vals = [_ids.index(elem) if elem in _ids else None for elem in _3d_identifiers]
else:
raise ValueError
return vals[0] if len(vals) == 1 else vals
def get_3d_from_2d_identifiers(self, _2d_identifiers, retrn='id'):
assert self.use_2d(), "Dataloader does not provide 2D data."
assert retrn in ['id', 'idx']
if isinstance(_2d_identifiers, (torch.Tensor, np.ndarray)):
_2d_identifiers = _2d_identifiers.tolist()
elif not isinstance(_2d_identifiers, Iterable) or isinstance(_2d_identifiers, str):
_2d_identifiers = [_2d_identifiers]
if isinstance(_2d_identifiers[0], int):
_2d_identifiers = self.switch_2d_identifiers(_2d_identifiers)
vals = []
for item in _2d_identifiers:
_3d_id = self.extract_3d_id(item)
if retrn == 'id':
vals.append(_3d_id)
elif retrn == 'idx':
vals.append(self.switch_3d_identifiers(_3d_id))
return vals[0] if len(vals) == 1 else vals
def use_2d(self, override=None):
if not self.use_2d_normal_to:
return False
elif override is not None:
return override
else:
return True
def __len__(self, use_2d_override=None):
if self.use_2d(use_2d_override):
return len(self.img_data_2d)
return len(self.img_data_3d)
def __getitem__(self, dataset_idx, use_2d_override=None):
use_2d = self.use_2d(use_2d_override)
if use_2d:
all_ids = self.get_2d_ids()
_id = all_ids[dataset_idx]
image = self.img_data_2d.get(_id, torch.tensor([]))
label = self.label_data_2d.get(_id, torch.tensor([]))
# For 2D id cut last 4 "003rW100"
_3d_id = self.get_3d_from_2d_identifiers(_id)
image_path = self.img_paths[_3d_id]
label_path = self.label_paths[_3d_id]
else:
all_ids = self.get_3d_ids()
_id = all_ids[dataset_idx]
image = self.img_data_3d.get(_id, torch.tensor([]))
label = self.label_data_3d.get(_id, torch.tensor([]))
image_path = self.img_paths[_id]
label_path = self.label_paths[_id]
spat_augment_grid = []
if self.use_modified:
if use_2d:
modified_label = self.modified_label_data_2d.get(_id, label.detach().clone())
else:
modified_label = self.modified_label_data_3d.get(_id, label.detach().clone())
else:
modified_label = label.detach().clone()
b_image = image.unsqueeze(0).cuda()
b_label = label.unsqueeze(0).cuda()
modified_label, _ = ensure_dense(modified_label)
b_modified_label = modified_label.unsqueeze(0).cuda()
if self.do_augment and not self.augment_at_collate:
b_image, b_label, b_spat_augment_grid = self.augment(
b_image, b_label, use_2d, pre_interpolation_factor=self.pre_interpolation_factor
)
_, b_modified_label, _ = spatial_augment(
b_label=b_modified_label, use_2d=use_2d, b_grid_override=b_spat_augment_grid,
pre_interpolation_factor=self.pre_interpolation_factor
)
spat_augment_grid = b_spat_augment_grid.squeeze(0).detach().cpu().clone()
elif not self.do_augment:
b_image, b_label = interpolate_sample(b_image, b_label, 2., use_2d)
_, b_modified_label = interpolate_sample(b_label=b_modified_label, scale_factor=2.,
use_2d=use_2d)
image = b_image.squeeze(0).cpu()
label = b_label.squeeze(0).cpu()
modified_label = b_modified_label.squeeze(0).cpu()
if use_2d:
assert image.dim() == label.dim() == 2
else:
assert image.dim() == label.dim() == 3
return {
'image': image,
'label': label,
'modified_label': modified_label,
# if disturbance is off, modified label is equals label
'dataset_idx': dataset_idx,
'id': _id,
'image_path': image_path,
'label_path': label_path,
'spat_augment_grid': spat_augment_grid
}
def get_3d_item(self, _3d_dataset_idx):
return self.__getitem__(_3d_dataset_idx, use_2d_override=False)
def get_data(self, use_2d_override=None):
if self.use_2d(use_2d_override):
img_stack = torch.stack(list(self.img_data_2d.values()), dim=0)
label_stack = torch.stack(list(self.label_data_2d.values()), dim=0)
modified_label_stack = torch.stack(list(self.modified_label_data_2d.values()), dim=0)
else:
img_stack = torch.stack(list(self.img_data_3d.values()), dim=0)
label_stack = torch.stack(list(self.label_data_3d.values()), dim=0)
modified_label_stack = torch.stack(list(self.modified_label_data_3d.values()), dim=0)
return img_stack, label_stack, modified_label_stack
def disturb_idxs(self, all_idxs, disturbance_mode, disturbance_strength=1., use_2d_override=None):
if self.prevent_disturbance:
warnings.warn("Disturbed idxs shall be set but disturbance is prevented for dataset.")
return
use_2d = self.use_2d(use_2d_override)
if all_idxs is not None:
if isinstance(all_idxs, (np.ndarray, torch.Tensor)):
all_idxs = all_idxs.tolist()
self.disturbed_idxs = all_idxs
else:
self.disturbed_idxs = []
# Reset modified data
for idx in range(self.__len__(use_2d_override=use_2d)):
if use_2d:
label_id = self.get_2d_ids()[idx]
self.modified_label_data_2d[label_id] = self.label_data_2d[label_id]
else:
label_id = self.get_3d_ids()[idx]
self.modified_label_data_3d[label_id] = self.label_data_3d[label_id]
# Now apply disturbance
if idx in self.disturbed_idxs:
if use_2d:
label = self.modified_label_data_2d[label_id].detach().clone()
else:
label = self.modified_label_data_3d[label_id].detach().clone()
with torch_manual_seeded(idx):
if str(disturbance_mode)==str(LabelDisturbanceMode.FLIP_ROLL):
roll_strength = 10*disturbance_strength
if use_2d:
modified_label = \
torch.roll(
label.transpose(-2,-1),
(
int(torch.randn(1)*roll_strength),
int(torch.randn(1)*roll_strength)
),(-2,-1)
)
else:
modified_label = \
torch.roll(
label.permute(1,2,0),
(
int(torch.randn(1)*roll_strength),
int(torch.randn(1)*roll_strength),
int(torch.randn(1)*roll_strength)
),(-3,-2,-1)
)
elif str(disturbance_mode)==str(LabelDisturbanceMode.AFFINE):
b_modified_label = label.unsqueeze(0).cuda()
_, b_modified_label, _ = spatial_augment(b_label=b_modified_label, use_2d=use_2d,
bspline_num_ctl_points=6, bspline_strength=0., bspline_probability=0.,
affine_strength=0.09*disturbance_strength,
add_affine_translation=0.18*disturbance_strength, affine_probability=1.)
modified_label = b_modified_label.squeeze(0).cpu()
else:
raise ValueError(f"Disturbance mode {disturbance_mode} is not implemented.")
if use_2d:
self.modified_label_data_2d[label_id] = modified_label
else:
self.modified_label_data_3d[label_id] = modified_label
def train(self, augment=True, use_modified=True):
self.do_augment = augment
self.use_modified = use_modified
def eval(self, augment=False, use_modified=False):
self.train(augment, use_modified)
def set_augment_at_collate(self, augment_at_collate=True):
self.augment_at_collate = augment_at_collate
def get_efficient_augmentation_collate_fn(self):
use_2d = True if self.use_2d_normal_to else False
def collate_closure(batch):
batch = torch.utils.data._utils.collate.default_collate(batch)
if self.augment_at_collate and self.do_augment:
# Augment the whole batch not just one sample
b_image = batch['image'].cuda()
b_label = batch['label'].cuda()
b_modified_label = batch['modified_label'].cuda()
b_image, b_label, b_spat_augment_grid = self.augment(
b_image, b_label, use_2d, pre_interpolation_factor=self.pre_interpolation_factor
)
_, b_modified_label, _ = spatial_augment(
b_label=b_modified_label, use_2d=use_2d, b_grid_override=b_spat_augment_grid,
pre_interpolation_factor=self.pre_interpolation_factor
)
b_spat_augment_grid = b_spat_augment_grid.detach().clone()
batch['image'], batch['label'], batch['modified_label'], batch['spat_augment_grid'] = b_image, b_label, b_modified_label, b_spat_augment_grid
return batch
return collate_closure
def augment(self, b_image, b_label, use_2d,
noise_strength=0.05,
bspline_num_ctl_points=6, bspline_strength=0.03, bspline_probability=.95,
affine_strength=0.2, affine_probability=.45,
pre_interpolation_factor=2.):
if use_2d:
assert b_image.dim() == b_label.dim() == 3, \
f"Augmenting 2D. Input batch of image and " \
f"label should be BxHxW but are {b_image.shape} and {b_label.shape}"
else:
assert b_image.dim() == b_label.dim() == 4, \
f"Augmenting 3D. Input batch of image and " \
f"label should be BxDxHxW but are {b_image.shape} and {b_label.shape}"
b_image = augmentNoise(b_image, strength=noise_strength)
b_image, b_label, b_spat_augment_grid = spatial_augment(
b_image, b_label,
bspline_num_ctl_points=bspline_num_ctl_points, bspline_strength=bspline_strength, bspline_probability=bspline_probability,
affine_strength=affine_strength, affine_probability=affine_probability,
pre_interpolation_factor=pre_interpolation_factor, use_2d=use_2d)
b_label = b_label.long()
return b_image, b_label, b_spat_augment_grid
|
[
"torch.utils.data._utils.collate.default_collate",
"deep_staple.utils.torch_utils.ensure_dense",
"torch.load",
"torch.max",
"deep_staple.utils.torch_utils.augmentNoise",
"torch.tensor",
"numpy.quantile",
"numpy.linspace",
"deep_staple.utils.torch_utils.interpolate_sample",
"deep_staple.utils.torch_utils.spatial_augment",
"deep_staple.utils.torch_utils.torch_manual_seeded",
"warnings.warn",
"torch.randn"
] |
[((14231, 14259), 'deep_staple.utils.torch_utils.ensure_dense', 'ensure_dense', (['modified_label'], {}), '(modified_label)\n', (14243, 14259), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((22241, 22287), 'deep_staple.utils.torch_utils.augmentNoise', 'augmentNoise', (['b_image'], {'strength': 'noise_strength'}), '(b_image, strength=noise_strength)\n', (22253, 22287), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((22336, 22648), 'deep_staple.utils.torch_utils.spatial_augment', 'spatial_augment', (['b_image', 'b_label'], {'bspline_num_ctl_points': 'bspline_num_ctl_points', 'bspline_strength': 'bspline_strength', 'bspline_probability': 'bspline_probability', 'affine_strength': 'affine_strength', 'affine_probability': 'affine_probability', 'pre_interpolation_factor': 'pre_interpolation_factor', 'use_2d': 'use_2d'}), '(b_image, b_label, bspline_num_ctl_points=\n bspline_num_ctl_points, bspline_strength=bspline_strength,\n bspline_probability=bspline_probability, affine_strength=\n affine_strength, affine_probability=affine_probability,\n pre_interpolation_factor=pre_interpolation_factor, use_2d=use_2d)\n', (22351, 22648), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((6159, 6188), 'torch.load', 'torch.load', (['fixed_weight_file'], {}), '(fixed_weight_file)\n', (6169, 6188), False, 'import torch\n'), ((14597, 14756), 'deep_staple.utils.torch_utils.spatial_augment', 'spatial_augment', ([], {'b_label': 'b_modified_label', 'use_2d': 'use_2d', 'b_grid_override': 'b_spat_augment_grid', 'pre_interpolation_factor': 'self.pre_interpolation_factor'}), '(b_label=b_modified_label, use_2d=use_2d, b_grid_override=\n b_spat_augment_grid, pre_interpolation_factor=self.pre_interpolation_factor\n )\n', (14612, 14756), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((16728, 16819), 'warnings.warn', 'warnings.warn', (['"""Disturbed idxs shall be set but disturbance is prevented for dataset."""'], {}), "(\n 'Disturbed idxs shall be set but disturbance is prevented for dataset.')\n", (16741, 16819), False, 'import warnings\n'), ((20456, 20510), 'torch.utils.data._utils.collate.default_collate', 'torch.utils.data._utils.collate.default_collate', (['batch'], {}), '(batch)\n', (20503, 20510), False, 'import torch\n'), ((3757, 3779), 'torch.max', 'torch.max', (['label_stack'], {}), '(label_stack)\n', (3766, 3779), False, 'import torch\n'), ((6518, 6571), 'numpy.quantile', 'np.quantile', (['fixed_weights', 'fixed_weight_min_quantile'], {}), '(fixed_weights, fixed_weight_min_quantile)\n', (6529, 6571), True, 'import numpy as np\n'), ((13152, 13168), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (13164, 13168), False, 'import torch\n'), ((13218, 13234), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (13230, 13234), False, 'import torch\n'), ((13579, 13595), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (13591, 13595), False, 'import torch\n'), ((13645, 13661), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (13657, 13661), False, 'import torch\n'), ((14945, 14994), 'deep_staple.utils.torch_utils.interpolate_sample', 'interpolate_sample', (['b_image', 'b_label', '(2.0)', 'use_2d'], {}), '(b_image, b_label, 2.0, use_2d)\n', (14963, 14994), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((15028, 15105), 'deep_staple.utils.torch_utils.interpolate_sample', 'interpolate_sample', ([], {'b_label': 'b_modified_label', 'scale_factor': '(2.0)', 'use_2d': 'use_2d'}), '(b_label=b_modified_label, scale_factor=2.0, use_2d=use_2d)\n', (15046, 15105), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((21026, 21185), 'deep_staple.utils.torch_utils.spatial_augment', 'spatial_augment', ([], {'b_label': 'b_modified_label', 'use_2d': 'use_2d', 'b_grid_override': 'b_spat_augment_grid', 'pre_interpolation_factor': 'self.pre_interpolation_factor'}), '(b_label=b_modified_label, use_2d=use_2d, b_grid_override=\n b_spat_augment_grid, pre_interpolation_factor=self.pre_interpolation_factor\n )\n', (21041, 21185), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((17843, 17867), 'deep_staple.utils.torch_utils.torch_manual_seeded', 'torch_manual_seeded', (['idx'], {}), '(idx)\n', (17862, 17867), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((6397, 6421), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(5)'], {}), '(0.0, 1.0, 5)\n', (6408, 6421), True, 'import numpy as np\n'), ((19194, 19457), 'deep_staple.utils.torch_utils.spatial_augment', 'spatial_augment', ([], {'b_label': 'b_modified_label', 'use_2d': 'use_2d', 'bspline_num_ctl_points': '(6)', 'bspline_strength': '(0.0)', 'bspline_probability': '(0.0)', 'affine_strength': '(0.09 * disturbance_strength)', 'add_affine_translation': '(0.18 * disturbance_strength)', 'affine_probability': '(1.0)'}), '(b_label=b_modified_label, use_2d=use_2d,\n bspline_num_ctl_points=6, bspline_strength=0.0, bspline_probability=0.0,\n affine_strength=0.09 * disturbance_strength, add_affine_translation=\n 0.18 * disturbance_strength, affine_probability=1.0)\n', (19209, 19457), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((8419, 8436), 'deep_staple.utils.torch_utils.ensure_dense', 'ensure_dense', (['lbl'], {}), '(lbl)\n', (8431, 8436), False, 'from deep_staple.utils.torch_utils import interpolate_sample, augmentNoise, spatial_augment, torch_manual_seeded, ensure_dense\n'), ((18284, 18298), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (18295, 18298), False, 'import torch\n'), ((18359, 18373), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (18370, 18373), False, 'import torch\n'), ((18730, 18744), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (18741, 18744), False, 'import torch\n'), ((18805, 18819), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (18816, 18819), False, 'import torch\n'), ((18880, 18894), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (18891, 18894), False, 'import torch\n')]
|
from __future__ import annotations
from argparse import ArgumentParser
from collections import deque
import numpy as np
def count_lte(mat: np.ndarray) -> np.ndarray:
"""
lte[i,j] = count (neighbours <= mat[i,j])
. t .
l . r
. b .
"""
aug = np.pad(mat.astype(float), (1, 1), mode="constant", constant_values=np.inf)
l = aug[1:-1, :-2] <= mat
r = aug[1:-1, 2:] <= mat
t = aug[:-2, 1:-1] <= mat
b = aug[2:, 1:-1] <= mat
return l + r + t + b
def part1(xs):
lte = count_lte(xs)
return np.sum(1 + xs[lte == 0])
def get_basin(xs: np.ndarray, row: int, col: int) -> list[tuple[int, int]]:
"""
Return the indices of the locations flowing towards the low point `row, col`.
"""
h, w = xs.shape
out = []
q = deque()
v = np.zeros_like(xs).astype(bool)
q.append((row, col))
v[row, col] = True
while q:
i, j = q.popleft()
out.append((i, j))
for di, dj in [(0, -1), (0, 1), (-1, 0), (1, 0)]:
i2 = i + di
j2 = j + dj
if not (0 <= i2 < h) or not (0 <= j2 < w):
continue
if v[i2, j2]:
continue
if xs[i2, j2] == 9:
continue
q.append((i2, j2))
v[i2, j2] = True
return out
def part2(xs):
lte = count_lte(xs)
basins = [get_basin(xs, row, col) for row, col in zip(*np.where(lte == 0))]
top = sorted(map(len, basins), reverse=True)
return np.product(top[:3])
def visualize(xs):
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap
lte = count_lte(xs)
cmap = cm.Blues_r(np.linspace(0, 1, 10))
cmap[-1] = [0, 0, 0, 1]
plt.imshow(xs, cmap=ListedColormap(cmap))
basins = sorted(
[get_basin(xs, row, col) for row, col in zip(*np.where(lte == 0))],
key=len,
reverse=True,
)
cmap = cm.viridis(np.linspace(0.8, 0.2, 6))
for i in range(3):
r, c = zip(*basins[i])
plt.scatter(c, r, c=[cmap[i * 2]], marker="s")
r, c = np.where(lte == 0)
plt.scatter(c, r, c="red", marker="x")
plt.show()
def main():
with open(args.file) as fp:
xs = np.array([[int(i) for i in x.strip()] for x in fp.readlines()])
if args.visualize:
visualize(xs)
return
print("Part 1:", part1(xs))
print("Part 2:", part2(xs))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--file", type=str, required=True)
parser.add_argument(
"--visualize",
action="store_true",
help="Visualize the map with low points and basins",
)
args = parser.parse_args()
main()
|
[
"numpy.product",
"collections.deque",
"argparse.ArgumentParser",
"numpy.where",
"matplotlib.colors.ListedColormap",
"numpy.sum",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.zeros_like",
"matplotlib.pyplot.show"
] |
[((544, 568), 'numpy.sum', 'np.sum', (['(1 + xs[lte == 0])'], {}), '(1 + xs[lte == 0])\n', (550, 568), True, 'import numpy as np\n'), ((787, 794), 'collections.deque', 'deque', ([], {}), '()\n', (792, 794), False, 'from collections import deque\n'), ((1510, 1529), 'numpy.product', 'np.product', (['top[:3]'], {}), '(top[:3])\n', (1520, 1529), True, 'import numpy as np\n'), ((2125, 2143), 'numpy.where', 'np.where', (['(lte == 0)'], {}), '(lte == 0)\n', (2133, 2143), True, 'import numpy as np\n'), ((2148, 2186), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c', 'r'], {'c': '"""red"""', 'marker': '"""x"""'}), "(c, r, c='red', marker='x')\n", (2159, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2192, 2202), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2200, 2202), True, 'import matplotlib.pyplot as plt\n'), ((2494, 2510), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2508, 2510), False, 'from argparse import ArgumentParser\n'), ((1714, 1735), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1725, 1735), True, 'import numpy as np\n'), ((1978, 2002), 'numpy.linspace', 'np.linspace', (['(0.8)', '(0.2)', '(6)'], {}), '(0.8, 0.2, 6)\n', (1989, 2002), True, 'import numpy as np\n'), ((2066, 2112), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c', 'r'], {'c': '[cmap[i * 2]]', 'marker': '"""s"""'}), "(c, r, c=[cmap[i * 2]], marker='s')\n", (2077, 2112), True, 'import matplotlib.pyplot as plt\n'), ((803, 820), 'numpy.zeros_like', 'np.zeros_like', (['xs'], {}), '(xs)\n', (816, 820), True, 'import numpy as np\n'), ((1790, 1810), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['cmap'], {}), '(cmap)\n', (1804, 1810), False, 'from matplotlib.colors import ListedColormap\n'), ((1427, 1445), 'numpy.where', 'np.where', (['(lte == 0)'], {}), '(lte == 0)\n', (1435, 1445), True, 'import numpy as np\n'), ((1888, 1906), 'numpy.where', 'np.where', (['(lte == 0)'], {}), '(lte == 0)\n', (1896, 1906), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
def split_reim(array):
"""Split a complex valued matrix into its real and imaginary parts.
Args:
array(complex): An array of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): An array of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = np.real(array)
imag = np.imag(array)
split_array = np.stack((real, imag), axis=3)
return split_array
def split_reim_tensor(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
split_array = tf.stack((real, imag), axis=3)
return split_array
def split_reim_channels(array):
"""Split a complex valued tensor into its real and imaginary parts.
Args:
array(complex): A tensor of shape (batch_size, N, N) or (batch_size, N, N, 1)
Returns:
split_array(float): A tensor of shape (batch_size, N, N, 2) containing the real part on one channel and the imaginary part on another channel
"""
real = tf.math.real(array)
imag = tf.math.imag(array)
n_ch = array.get_shape().as_list()[3]
split_array = tf.concat((real, imag), axis=3)
return split_array
def join_reim(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): An complex-valued array of shape (batch_size, N, N, 1)
"""
joined_array = array[:, :, :, 0] + 1j * array[:, :, :, 1]
return joined_array
def join_reim_tensor(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, 2)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N)
"""
joined_array = tf.cast(array[:, :, :, 0], 'complex64') + \
1j * tf.cast(array[:, :, :, 1], 'complex64')
return joined_array
def join_reim_channels(array):
"""Join the real and imaginary channels of a matrix to a single complex-valued matrix.
Args:
array(float): An array of shape (batch_size, N, N, ch)
Returns:
joined_array(complex): A complex-valued array of shape (batch_size, N, N, ch/2)
"""
ch = array.get_shape().as_list()[3]
joined_array = tf.cast(array[:,
:,
:,
:int(ch / 2)],
dtype=tf.complex64) + 1j * tf.cast(array[:,
:,
:,
int(ch / 2):],
dtype=tf.complex64)
return joined_array
def convert_to_frequency_domain(images):
"""Convert an array of images to their Fourier transforms.
Args:
images(float): An array of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed array of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim(np.fft.fft2(join_reim(images), axes=(1, 2)))
return spectra
def convert_tensor_to_frequency_domain(images):
"""Convert a tensor of images to their Fourier transforms.
Args:
images(float): A tensor of shape (batch_size, N, N, 2)
Returns:
spectra(float): An FFT-ed tensor of shape (batch_size, N, N, 2)
"""
n = images.shape[1]
spectra = split_reim_tensor(tf.signal.fft2d(join_reim_tensor(images)))
return spectra
def convert_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim(np.fft.ifft2(join_reim(spectra), axes=(1, 2)))
return images
def convert_tensor_to_image_domain(spectra):
"""Convert an array of Fourier spectra to the corresponding images.
Args:
spectra(float): An array of shape (batch_size, N, N, 2)
Returns:
images(float): An IFFT-ed array of shape (batch_size, N, N, 2)
"""
n = spectra.shape[1]
images = split_reim_tensor(tf.signal.ifft2d(join_reim_tensor(spectra)))
return images
|
[
"tensorflow.math.imag",
"numpy.real",
"numpy.stack",
"tensorflow.concat",
"tensorflow.math.real",
"tensorflow.cast",
"numpy.imag",
"tensorflow.stack"
] |
[((417, 431), 'numpy.real', 'np.real', (['array'], {}), '(array)\n', (424, 431), True, 'import numpy as np\n'), ((443, 457), 'numpy.imag', 'np.imag', (['array'], {}), '(array)\n', (450, 457), True, 'import numpy as np\n'), ((476, 506), 'numpy.stack', 'np.stack', (['(real, imag)'], {'axis': '(3)'}), '((real, imag), axis=3)\n', (484, 506), True, 'import numpy as np\n'), ((911, 930), 'tensorflow.math.real', 'tf.math.real', (['array'], {}), '(array)\n', (923, 930), True, 'import tensorflow as tf\n'), ((942, 961), 'tensorflow.math.imag', 'tf.math.imag', (['array'], {}), '(array)\n', (954, 961), True, 'import tensorflow as tf\n'), ((980, 1010), 'tensorflow.stack', 'tf.stack', (['(real, imag)'], {'axis': '(3)'}), '((real, imag), axis=3)\n', (988, 1010), True, 'import tensorflow as tf\n'), ((1417, 1436), 'tensorflow.math.real', 'tf.math.real', (['array'], {}), '(array)\n', (1429, 1436), True, 'import tensorflow as tf\n'), ((1448, 1467), 'tensorflow.math.imag', 'tf.math.imag', (['array'], {}), '(array)\n', (1460, 1467), True, 'import tensorflow as tf\n'), ((1528, 1559), 'tensorflow.concat', 'tf.concat', (['(real, imag)'], {'axis': '(3)'}), '((real, imag), axis=3)\n', (1537, 1559), True, 'import tensorflow as tf\n'), ((2277, 2316), 'tensorflow.cast', 'tf.cast', (['array[:, :, :, 0]', '"""complex64"""'], {}), "(array[:, :, :, 0], 'complex64')\n", (2284, 2316), True, 'import tensorflow as tf\n'), ((2334, 2373), 'tensorflow.cast', 'tf.cast', (['array[:, :, :, 1]', '"""complex64"""'], {}), "(array[:, :, :, 1], 'complex64')\n", (2341, 2373), True, 'import tensorflow as tf\n')]
|
# /bin/env python
# coding: utf-8
from __future__ import print_function
import sys
import argparse
import logging
import os
import math
import cv2
import numpy as np
class GenerateSyntheticData:
import PythonMagick as Magick
def __init__(self, logger=None):
if logger == None:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
self.logger = logging.getLogger()
else:
self.logger = logger
@staticmethod
def appendArgumentParser(argparser):
argparser.add_argument('--shift-x', type=int, help='')
argparser.add_argument('--shift-y', type=int, help='')
argparser.add_argument('--skew-x', type=float, help='')
argparser.add_argument('--skew-y', type=float, help='')
argparser.add_argument('--rotate', type=float, help='rotates image clock- or counterclock-wise (angle in degrees)')
argparser.add_argument('--horizontal_flip', action='store_true', help='horizontally flips image')
argparser.add_argument('--zoom', type=str, help='resize image; argument given in percentage')
argparser.add_argument('--contrast', type=int, help='default=0; 0~infinity (integer times contract is applided to image)')
argparser.add_argument('--brightness', type=float, help='default=100')
argparser.add_argument('--saturation', type=float, help='default=100')
argparser.add_argument('--hue', type=float, help='default=100')
argparser.add_argument('--blur', action='store_true', help='')
argparser.add_argument('--blur_radius', type=float, default=10, help='')
argparser.add_argument('--blur_sigma', type=float, default=1, help='')
argparser.add_argument('--gaussianBlur', action='store_true', help='')
argparser.add_argument('--gaussianBlur_width', type=float, default=5, help='')
argparser.add_argument('--gaussianBlur_sigma', type=float, default=1, help='')
argparser.add_argument('--despeckle', action='store_true', help='')
argparser.add_argument('--enhance', action='store_true', help='')
argparser.add_argument('--equalize', action='store_true', help='')
argparser.add_argument('--gamma', type=float, help='0 ~ 2; 1 is default')
argparser.add_argument('--implode', type=float, help='Implode factor 0~1; 0 (nothing) to 1 (full); 0.0 ~ 0.5 recommended.')
argparser.add_argument('--negate', action='store_true', help='')
argparser.add_argument('--normalize', action='store_true', help='')
argparser.add_argument('--quantize', action='store_true', help='')
argparser.add_argument('--reduceNoise', type=int, help='default=1')
argparser.add_argument('--shade', action='store_true', help='')
argparser.add_argument('--shade_azimuth', type=float, default=50, help='')
argparser.add_argument('--shade_elevation', type=float, default=50, help='')
argparser.add_argument('--sharpen', action='store_true', help='')
argparser.add_argument('--sharpen_radius', type=float, default=1, help='')
argparser.add_argument('--sharpen_sigma', type=float, default=0.5, help='')
argparser.add_argument('--swirl', type=float, help='degree; default=10')
argparser.add_argument('--wave', action='store_true', help='')
argparser.add_argument('--wave_amplitude', type=float, default=5, help='')
argparser.add_argument('--wave_wavelength', type=float, default=100, help='')
argparser.add_argument('--auto', action='store_true', help='')
argparser.add_argument('--auto_ops', type=str, default='', help='')
argparser.add_argument('--auto_rotate_min', type=float, default=0, help='')
argparser.add_argument('--auto_rotate_max', type=float, default=0, help='')
argparser.add_argument('--auto_zoom_min', type=float, default=0, help='')
argparser.add_argument('--auto_zoom_max', type=float, default=0, help='')
def generateRandomOptions(self, cmdArg):
def _generateRandomOptionsShift(args):
args.shift_x = int(np.abs(np.random.normal(0, 3))) # -10 ~ +10
args.shift_y = int(np.abs(np.random.normal(0, 1))) # -3 ~ +3
def _generateRandomOptionsSkew(args):
args.skew_x = int(np.random.normal(0, 3)) # -10 ~ +10
args.skew_y = int(np.random.normal(0, 3)) # -10 ~ +10
def _generateRandomOptionsRotate(args):
if cmdArg.auto_rotate_min != cmdArg.auto_rotate_max:
args.rotate = int(np.random.uniform(cmdArg.auto_rotate_min, cmdArg.auto_rotate_max))
else:
args.rotate = int(np.random.normal(0, 3)) # -10 ~ +10
def _generateRandomOptionsZoom(args):
if cmdArg.auto_zoom_min != cmdArg.auto_zoom_max:
args.zoom = str(int(np.random.uniform(cmdArg.auto_zoom_min, cmdArg.auto_zoom_max))) + '%'
else:
args.zoom = str(int(np.random.normal(100, 3))) + '%' # 90% ~ 110%
def _generateRandomOptionsContrast(args):
args.contrast = int(np.abs(np.random.normal(0, 1))) # 0 ~ +3
def _generateRandomOptionsBrightness(args):
args.brightness = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsSaturation(args):
args.saturation = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsHue(args):
args.hue = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsBlur(args):
if np.random.binomial(1,0.1): # do blur
if np.random.binomial(1,0.5):
args.blur = True
else:
args.gaussianBlur = True
if args.blur:
args.blur_radius = np.abs(np.random.normal(0, 3)) # 0 ~ 10
args.blur_sigma = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
if args.gaussianBlur:
args.gaussianBlur_width = np.abs(np.random.normal(0, 3)) # 0 ~ 10
args.gaussianBlur_sigma = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
def _generateRandomOptionsHorizontalFlip(args):
args.horizontal_flip = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsDespeckle(args):
args.despeckle = (np.random.binomial(1,0.5) > 0)
def _generateRandomOptionsEnhance(args):
args.enhance = (np.random.binomial(1,0.5) > 0)
def _generateRandomOptionsEqualize(args):
args.equalize = (np.random.binomial(1,0.1) == 1)
def _generateRandomOptionsNegate(args):
args.negate = (np.random.binomial(1,0.1) == 1)
def _generateRandomOptionsNormalize(args):
args.normalize = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsQuantize(args):
args.quantize = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsGamma(args):
args.gamma = np.abs(np.random.normal(1, 0.03)) # 0 ~ 2
def _generateRandomOptionsImplode(args):
args.implode = 0
if np.random.binomial(1,0.5) > 0:
args.implode = np.random.normal(0, 0.15) # -0.5 ~ 0.5
def _generateRandomOptionsReduceNoise(args):
args.reduceNoise = int(np.abs(np.random.normal(0, 0.7))) # 0 ~ 2
def _generateRandomOptionsShade(args):
args.shade = (np.random.binomial(1,0.1) > 0)
if args.shade:
args.shade_azimuth = np.random.normal(50, 17) # 0 ~ 100
args.shade_elevation = np.random.normal(50, 17) # 0 ~ 100
def _generateRandomOptionsSharpen(args):
args.sharpen = (np.random.binomial(1,0.1) > 0)
if args.sharpen:
args.sharpen_radius = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
args.sharpen_sigma = np.abs(np.random.normal(0, 0.3)) # 0 ~ 1
def _generateRandomOptionsSwirl(args):
args.swirl = np.random.normal(0, 5) # -15 ~ +15
def _generateRandomOptionsWave(args):
args.wave = (np.random.binomial(1,0.3) > 0)
if args.wave:
args.wave_amplitude = np.abs(np.random.normal(5, 0.3)) # 0 ~ 10
args.wave_wavelength = np.abs(np.random.normal(100, 10)) # 0 ~ 200
args = argparse.Namespace()
args.shift_x = args.shift_y = None
args.skew_x = args.skew_y = None
args.rotate = args.zoom = None
args.contrast = args.brightness = args.saturation = args.hue = None
args.blur = args.gaussianBlur = None
args.horizontal_flip = None
args.despeckle = args.enhance = args.reduceNoise = None
args.equalize = args.negate = args.normalize = args.quantize = args.gamma = None
args.shade = None
args.sharpen = None
args.implode = args.swirl = args.wave = None
if len(cmdArg.auto_ops)>0:
for op in cmdArg.auto_ops.split(","):
if op == 'shift': _generateRandomOptionsShift(args)
elif op == 'skew': _generateRandomOptionsSkew(args)
elif op == 'rotate': _generateRandomOptionsRotate(args)
elif op == 'zoom': _generateRandomOptionsZoom(args)
elif op == 'contrast': _generateRandomOptionsContrast(args)
elif op == 'brightness': _generateRandomOptionsBrightness(args)
elif op == 'saturation': _generateRandomOptionsSaturation(args)
elif op == 'hue': _generateRandomOptionsHue(args)
elif op == 'blur': _generateRandomOptionsBlur(args)
elif op == 'horizontal_flip': _generateRandomOptionsHorizontalFlip(args)
elif op == 'despeckle': _generateRandomOptionsDespeckle(args)
elif op == 'enhance': _generateRandomOptionsEnhance(args)
elif op == 'equalize': _generateRandomOptionsEqualize(args)
elif op == 'negate': _generateRandomOptionsNegate(args)
elif op == 'normalize': _generateRandomOptionsNormalize(args)
elif op == 'quantize': _generateRandomOptionsQuantize(args)
elif op == 'gamma': _generateRandomOptionsGamma(args)
elif op == 'implode': _generateRandomOptionsImplode(args)
elif op == 'reduceNoise': _generateRandomOptionsReduceNoise(args)
elif op == 'shade': _generateRandomOptionsShade(args)
elif op == 'sharpen': _generateRandomOptionsSharpen(args)
elif op == 'swirl': _generateRandomOptionsSwirl(args)
elif op == 'wave': _generateRandomOptionsWave(args)
else:
self.logger.error('Unknown Operation Name ' + op)
else: # apply all operations
_generateRandomOptionsShift(args)
_generateRandomOptionsSkew(args)
_generateRandomOptionsRotate(args)
_generateRandomOptionsZoom(args)
_generateRandomOptionsContrast(args)
_generateRandomOptionsBrightness(args)
_generateRandomOptionsSaturation(args)
_generateRandomOptionsHue(args)
_generateRandomOptionsBlur(args)
#_generateRandomOptionsHorizontalFlip(args)
_generateRandomOptionsDespeckle(args)
_generateRandomOptionsEnhance(args)
#_generateRandomOptionsEqualize(args)
#_generateRandomOptionsNegate(args)
_generateRandomOptionsNormalize(args)
_generateRandomOptionsQuantize(args)
_generateRandomOptionsGamma(args)
_generateRandomOptionsImplode(args)
_generateRandomOptionsReduceNoise(args)
_generateRandomOptionsShade(args)
_generateRandomOptionsSharpen(args)
_generateRandomOptionsSwirl(args)
#_generateRandomOptionsWave(args)
self.logger.debug('Randomly generated options: ')
for key in vars(args):
self.logger.debug(' -- %s: %s' % (key, getattr(args, key)))
self.logger.debug('')
return args
def isVideo(self, inputF):
video_file_extensions = (
'.264', '.3g2', '.3gp', '.3gp2', '.3gpp', '.3gpp2', '.3mm', '.3p2', '.60d', '.787', '.89', '.aaf', '.aec', '.aep', '.aepx',
'.aet', '.aetx', '.ajp', '.ale', '.am', '.amc', '.amv', '.amx', '.anim', '.aqt', '.arcut', '.arf', '.asf', '.asx', '.avb',
'.avc', '.avd', '.avi', '.avp', '.avs', '.avs', '.avv', '.axm', '.bdm', '.bdmv', '.bdt2', '.bdt3', '.bik', '.bin', '.bix',
'.bmk', '.bnp', '.box', '.bs4', '.bsf', '.bvr', '.byu', '.camproj', '.camrec', '.camv', '.ced', '.cel', '.cine', '.cip',
'.clpi', '.cmmp', '.cmmtpl', '.cmproj', '.cmrec', '.cpi', '.cst', '.cvc', '.cx3', '.d2v', '.d3v', '.dat', '.dav', '.dce',
'.dck', '.dcr', '.dcr', '.ddat', '.dif', '.dir', '.divx', '.dlx', '.dmb', '.dmsd', '.dmsd3d', '.dmsm', '.dmsm3d', '.dmss',
'.dmx', '.dnc', '.dpa', '.dpg', '.dream', '.dsy', '.dv', '.dv-avi', '.dv4', '.dvdmedia', '.dvr', '.dvr-ms', '.dvx', '.dxr',
'.dzm', '.dzp', '.dzt', '.edl', '.evo', '.eye', '.ezt', '.f4p', '.f4v', '.fbr', '.fbr', '.fbz', '.fcp', '.fcproject',
'.ffd', '.flc', '.flh', '.fli', '.flv', '.flx', '.gfp', '.gl', '.gom', '.grasp', '.gts', '.gvi', '.gvp', '.h264', '.hdmov',
'.hkm', '.ifo', '.imovieproj', '.imovieproject', '.ircp', '.irf', '.ism', '.ismc', '.ismv', '.iva', '.ivf', '.ivr', '.ivs',
'.izz', '.izzy', '.jss', '.jts', '.jtv', '.k3g', '.kmv', '.ktn', '.lrec', '.lsf', '.lsx', '.m15', '.m1pg', '.m1v', '.m21',
'.m21', '.m2a', '.m2p', '.m2t', '.m2ts', '.m2v', '.m4e', '.m4u', '.m4v', '.m75', '.mani', '.meta', '.mgv', '.mj2', '.mjp',
'.mjpg', '.mk3d', '.mkv', '.mmv', '.mnv', '.mob', '.mod', '.modd', '.moff', '.moi', '.moov', '.mov', '.movie', '.mp21',
'.mp21', '.mp2v', '.mp4', '.mp4v', '.mpe', '.mpeg', '.mpeg1', '.mpeg4', '.mpf', '.mpg', '.mpg2', '.mpgindex', '.mpl',
'.mpl', '.mpls', '.mpsub', '.mpv', '.mpv2', '.mqv', '.msdvd', '.mse', '.msh', '.mswmm', '.mts', '.mtv', '.mvb', '.mvc',
'.mvd', '.mve', '.mvex', '.mvp', '.mvp', '.mvy', '.mxf', '.mxv', '.mys', '.ncor', '.nsv', '.nut', '.nuv', '.nvc', '.ogm',
'.ogv', '.ogx', '.osp', '.otrkey', '.pac', '.par', '.pds', '.pgi', '.photoshow', '.piv', '.pjs', '.playlist', '.plproj',
'.pmf', '.pmv', '.pns', '.ppj', '.prel', '.pro', '.prproj', '.prtl', '.psb', '.psh', '.pssd', '.pva', '.pvr', '.pxv',
'.qt', '.qtch', '.qtindex', '.qtl', '.qtm', '.qtz', '.r3d', '.rcd', '.rcproject', '.rdb', '.rec', '.rm', '.rmd', '.rmd',
'.rmp', '.rms', '.rmv', '.rmvb', '.roq', '.rp', '.rsx', '.rts', '.rts', '.rum', '.rv', '.rvid', '.rvl', '.sbk', '.sbt',
'.scc', '.scm', '.scm', '.scn', '.screenflow', '.sec', '.sedprj', '.seq', '.sfd', '.sfvidcap', '.siv', '.smi', '.smi',
'.smil', '.smk', '.sml', '.smv', '.spl', '.sqz', '.srt', '.ssf', '.ssm', '.stl', '.str', '.stx', '.svi', '.swf', '.swi',
'.swt', '.tda3mt', '.tdx', '.thp', '.tivo', '.tix', '.tod', '.tp', '.tp0', '.tpd', '.tpr', '.trp', '.ts', '.tsp', '.ttxt',
'.tvs', '.usf', '.usm', '.vc1', '.vcpf', '.vcr', '.vcv', '.vdo', '.vdr', '.vdx', '.veg', '.vem', '.vep', '.vf', '.vft',
'.vfw', '.vfz', '.vgz', '.vid', '.video', '.viewlet', '.viv', '.vivo', '.vlab', '.vob', '.vp3', '.vp6', '.vp7', '.vpj',
'.vro', '.vs4', '.vse', '.vsp', '.w32', '.wcp', '.webm', '.wlmp', '.wm', '.wmd', '.wmmp', '.wmv', '.wmx', '.wot', '.wp3',
'.wpl', '.wtv', '.wve', '.wvx', '.xej', '.xel', '.xesc', '.xfl', '.xlmv', '.xmv', '.xvid', '.y4m', '.yog', '.yuv', '.zeg',
'.zm1', '.zm2', '.zm3', '.zmv')
if inputF.endswith((video_file_extensions)):
return True
return False
def getFPS(self, vF):
video = cv2.VideoCapture(vF);
major_ver, _, _ = (cv2.__version__).split('.')
if int(major_ver) < 3 :
fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
else :
fps = video.get(cv2.CAP_PROP_FPS)
video.release()
return fps
def splitFromVideo(self, inputF, outputFPrefix):
retVal = []
vid = cv2.VideoCapture(inputF)
idx = 0
while(True):
ret, frame = vid.read()
if not ret:
break
name = outputFPrefix + '_frame' + str(idx) + '.png'
cv2.imwrite(name, frame)
retVal.append(name)
idx += 1
return retVal
def mergeIntoVideo(self, inFs, outputF, FPS):
frame = cv2.imread(inFs[0])
height, width, _ = frame.shape
video = cv2.VideoWriter(outputF, cv2.VideoWriter_fourcc(*'mp4v'), FPS, (width, height))
for inF in inFs:
video.write(cv2.imread(inF))
video.release()
def generate(self, inputF, outputF, args):
if args.auto:
auto_options = self.generateRandomOptions(args)
logger.info('Random options: ' + str(auto_options))
if self.isVideo(inputF):
FPS = self.getFPS(inputF)
inputFs = self.splitFromVideo(inputF, outputF+'_input')
outputFs = []
for idx in range(0, len(inputFs)):
iF = inputFs[idx]
oF = outputF + '_output_frame' + str(idx) + '.png'
if args.auto:
self._generate(iF, oF, auto_options)
else:
self._generate(iF, oF, args)
outputFs.append(oF)
self.mergeIntoVideo(outputFs, outputF, FPS)
for f in inputFs:
os.remove(f)
for f in outputFs:
os.remove(f)
return True
else:
if args.auto:
return self._generate(inputF, outputF, auto_options)
else:
return self._generate(inputF, outputF, args)
def _generate(self, inputF, outputF, args):
inputImage = self.Magick.Image(inputF)
input_width = inputImage.size().width()
input_height = inputImage.size().height()
self.logger.debug('Input width and height: %d x %d' % (input_width, input_height))
# make image ready to be modified
inputImage.modifyImage()
inputImage.backgroundColor(self.Magick.Color('black'))
if args.shift_x != None:
inputImage.roll(args.shift_x, 0)
if args.shift_y != None:
inputImage.roll(0, args.shift_y)
if args.skew_x != None and args.skew_y != None:
inputImage.shear(args.skew_x, args.skew_y)
elif args.skew_x != None:
inputImage.shear(args.skew_x, 0)
if args.skew_y != None:
inputImage.shear(0, args.skew_y)
if args.rotate != None:
inputImage.rotate(args.rotate)
inputImage.crop(self.Magick.Geometry(input_width, input_height, 0, 0))
if args.horizontal_flip:
inputImage.flop()
if args.zoom != None:
inputImage.sample(self.Magick.Geometry(args.zoom))
if int(args.zoom.strip()[0:-1]) >= 100:
inputImage.crop(self.Magick.Geometry(input_width,
input_height,
int((inputImage.size().width() - input_width) / 2),
int((inputImage.size().height() - input_height) / 2)))
else:
# PythonMagick is missing extent() API
# inputImage.exent(Magick.Geometry(input_width, input_height), Magick.GravityType.CenterGravity)
smallWidth = inputImage.size().width()
smallHeight = inputImage.size().height()
inputImage.size(self.Magick.Geometry(input_width, input_height))
inputImage.draw(self.Magick.DrawableRectangle(smallWidth, smallHeight, input_width, input_height))
inputImage.draw(self.Magick.DrawableRectangle(smallWidth, 0, input_width, smallHeight))
inputImage.draw(self.Magick.DrawableRectangle(0, smallHeight, smallWidth, input_height))
inputImage.roll(int((input_width - smallWidth) / 2), int((input_height - smallHeight) / 2))
if args.contrast != None:
for _ in range(0, args.contrast):
inputImage.contrast(args.contrast)
if args.brightness != None or args.saturation != None or args.hue != None:
if args.brightness is None:
args.brightness = 100
if args.saturation is None:
args.saturation = 100
if args.hue is None:
args.hue = 100
inputImage.modulate(args.brightness, args.saturation, args.hue)
if args.blur:
inputImage.blur(args.blur_radius, args.blur_sigma)
if args.gaussianBlur:
inputImage.gaussianBlur(args.gaussianBlur_width, args.gaussianBlur_sigma)
if args.despeckle:
inputImage.despeckle()
if args.enhance:
inputImage.enhance()
if args.equalize:
inputImage.equalize()
if args.gamma != None:
inputImage.gamma(args.gamma)
if args.implode != None:
inputImage.implode(args.implode)
if args.negate:
inputImage.negate()
if args.normalize:
inputImage.normalize()
if args.quantize:
inputImage.quantize()
if args.reduceNoise != None:
inputImage.reduceNoise(args.reduceNoise)
if args.shade:
inputImage.shade(args.shade_azimuth, args.shade_elevation)
if args.sharpen:
inputImage.sharpen(args.sharpen_radius, args.sharpen_sigma)
if args.swirl != None:
inputImage.swirl(args.swirl)
if args.wave:
inputImage.wave(args.wave_amplitude, args.wave_wavelength)
inputImage.crop(self.Magick.Geometry(input_width,
input_height,
int(math.fabs((inputImage.size().width() - input_width) / 2)),
int(math.fabs((inputImage.size().height() - input_height) / 2))))
inputImage.write(outputF)
self.logger.debug('Output width and height: %d x %d' % (inputImage.size().width(), inputImage.size().height()))
return True
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('-l', '--log-level', default='INFO', help="log-level (INFO|WARN|DEBUG|FATAL|ERROR)")
argparser.add_argument('-i', '--input', required=True, help='Input image file name')
argparser.add_argument('-o', '--output', required=True, help='Output image file name')
argparser.add_argument('-w', '--overwrite', action='store_true', help='If set, will overwrite the existing output file')
GenerateSyntheticData.appendArgumentParser(argparser)
args = argparser.parse_args()
logging.basicConfig(stream=sys.stdout, level=args.log_level)
logger = logging.getLogger("DragonFly-ASL-GSD")
logger.debug('CLI arguments')
for key in vars(args):
logger.debug(' -- %s: %s' % (key, getattr(args, key)))
logger.debug('')
# check input file exists
if not os.path.isfile(args.input):
logger.error('Input file %s does not exist: ' % args.input)
sys.exit(1)
# check if output file exists
if os.path.isfile(args.output) and not args.overwrite:
try: input = raw_input
except NameError: pass
yn = input('Do you wish to overwrite %s? (y/n) ' % args.output)
if yn != 'y' and yn != 'Y':
logger.error('Output file %s will not be overwritten.' % args.output)
sys.exit(1)
GSD = GenerateSyntheticData(logger=logger)
status = GSD.generate(args.input, args.output, args)
logger.debug('Generation status: %r' % status)
|
[
"logging.basicConfig",
"logging.getLogger",
"cv2.__version__.split",
"numpy.random.normal",
"cv2.imwrite",
"argparse.ArgumentParser",
"os.path.isfile",
"numpy.random.uniform",
"argparse.Namespace",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"sys.exit",
"cv2.imread",
"numpy.random.binomial",
"os.remove"
] |
[((23128, 23153), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (23151, 23153), False, 'import argparse\n'), ((23668, 23728), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'args.log_level'}), '(stream=sys.stdout, level=args.log_level)\n', (23687, 23728), False, 'import logging\n'), ((23742, 23780), 'logging.getLogger', 'logging.getLogger', (['"""DragonFly-ASL-GSD"""'], {}), "('DragonFly-ASL-GSD')\n", (23759, 23780), False, 'import logging\n'), ((8491, 8511), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '()\n', (8509, 8511), False, 'import argparse\n'), ((16372, 16392), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vF'], {}), '(vF)\n', (16388, 16392), False, 'import cv2\n'), ((16420, 16446), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (16441, 16446), False, 'import cv2\n'), ((16726, 16750), 'cv2.VideoCapture', 'cv2.VideoCapture', (['inputF'], {}), '(inputF)\n', (16742, 16750), False, 'import cv2\n'), ((17113, 17132), 'cv2.imread', 'cv2.imread', (['inFs[0]'], {}), '(inFs[0])\n', (17123, 17132), False, 'import cv2\n'), ((23969, 23995), 'os.path.isfile', 'os.path.isfile', (['args.input'], {}), '(args.input)\n', (23983, 23995), False, 'import os\n'), ((24073, 24084), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (24081, 24084), False, 'import sys\n'), ((24126, 24153), 'os.path.isfile', 'os.path.isfile', (['args.output'], {}), '(args.output)\n', (24140, 24153), False, 'import os\n'), ((311, 369), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (330, 369), False, 'import logging\n'), ((396, 415), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (413, 415), False, 'import logging\n'), ((5361, 5385), 'numpy.random.normal', 'np.random.normal', (['(100)', '(5)'], {}), '(100, 5)\n', (5377, 5385), True, 'import numpy as np\n'), ((5485, 5509), 'numpy.random.normal', 'np.random.normal', (['(100)', '(5)'], {}), '(100, 5)\n', (5501, 5509), True, 'import numpy as np\n'), ((5591, 5615), 'numpy.random.normal', 'np.random.normal', (['(100)', '(5)'], {}), '(100, 5)\n', (5607, 5615), True, 'import numpy as np\n'), ((5690, 5716), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (5708, 5716), True, 'import numpy as np\n'), ((8145, 8167), 'numpy.random.normal', 'np.random.normal', (['(0)', '(5)'], {}), '(0, 5)\n', (8161, 8167), True, 'import numpy as np\n'), ((16946, 16970), 'cv2.imwrite', 'cv2.imwrite', (['name', 'frame'], {}), '(name, frame)\n', (16957, 16970), False, 'import cv2\n'), ((17213, 17244), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (17235, 17244), False, 'import cv2\n'), ((24442, 24453), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (24450, 24453), False, 'import sys\n'), ((4426, 4448), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)'], {}), '(0, 3)\n', (4442, 4448), True, 'import numpy as np\n'), ((4493, 4515), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)'], {}), '(0, 3)\n', (4509, 4515), True, 'import numpy as np\n'), ((5747, 5773), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {}), '(1, 0.5)\n', (5765, 5773), True, 'import numpy as np\n'), ((6350, 6376), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (6368, 6376), True, 'import numpy as np\n'), ((6463, 6489), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {}), '(1, 0.5)\n', (6481, 6489), True, 'import numpy as np\n'), ((6572, 6598), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {}), '(1, 0.5)\n', (6590, 6598), True, 'import numpy as np\n'), ((6683, 6709), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (6701, 6709), True, 'import numpy as np\n'), ((6791, 6817), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (6809, 6817), True, 'import numpy as np\n'), ((6909, 6935), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (6927, 6935), True, 'import numpy as np\n'), ((7020, 7046), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (7038, 7046), True, 'import numpy as np\n'), ((7131, 7156), 'numpy.random.normal', 'np.random.normal', (['(1)', '(0.03)'], {}), '(1, 0.03)\n', (7147, 7156), True, 'import numpy as np\n'), ((7261, 7287), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {}), '(1, 0.5)\n', (7279, 7287), True, 'import numpy as np\n'), ((7323, 7348), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.15)'], {}), '(0, 0.15)\n', (7339, 7348), True, 'import numpy as np\n'), ((7569, 7595), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (7587, 7595), True, 'import numpy as np\n'), ((7664, 7688), 'numpy.random.normal', 'np.random.normal', (['(50)', '(17)'], {}), '(50, 17)\n', (7680, 7688), True, 'import numpy as np\n'), ((7739, 7763), 'numpy.random.normal', 'np.random.normal', (['(50)', '(17)'], {}), '(50, 17)\n', (7755, 7763), True, 'import numpy as np\n'), ((7853, 7879), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.1)'], {}), '(1, 0.1)\n', (7871, 7879), True, 'import numpy as np\n'), ((8253, 8279), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.3)'], {}), '(1, 0.3)\n', (8271, 8279), True, 'import numpy as np\n'), ((17317, 17332), 'cv2.imread', 'cv2.imread', (['inF'], {}), '(inF)\n', (17327, 17332), False, 'import cv2\n'), ((18165, 18177), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (18174, 18177), False, 'import os\n'), ((18225, 18237), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (18234, 18237), False, 'import os\n'), ((4237, 4259), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)'], {}), '(0, 3)\n', (4253, 4259), True, 'import numpy as np\n'), ((4313, 4335), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (4329, 4335), True, 'import numpy as np\n'), ((4678, 4743), 'numpy.random.uniform', 'np.random.uniform', (['cmdArg.auto_rotate_min', 'cmdArg.auto_rotate_max'], {}), '(cmdArg.auto_rotate_min, cmdArg.auto_rotate_max)\n', (4695, 4743), True, 'import numpy as np\n'), ((4797, 4819), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)'], {}), '(0, 3)\n', (4813, 4819), True, 'import numpy as np\n'), ((5239, 5261), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (5255, 5261), True, 'import numpy as np\n'), ((5946, 5968), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)'], {}), '(0, 3)\n', (5962, 5968), True, 'import numpy as np\n'), ((6021, 6045), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.7)'], {}), '(0, 0.7)\n', (6037, 6045), True, 'import numpy as np\n'), ((6139, 6161), 'numpy.random.normal', 'np.random.normal', (['(0)', '(3)'], {}), '(0, 3)\n', (6155, 6161), True, 'import numpy as np\n'), ((6222, 6246), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.7)'], {}), '(0, 0.7)\n', (6238, 6246), True, 'import numpy as np\n'), ((7459, 7483), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.7)'], {}), '(0, 0.7)\n', (7475, 7483), True, 'import numpy as np\n'), ((7958, 7982), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.7)'], {}), '(0, 0.7)\n', (7974, 7982), True, 'import numpy as np\n'), ((8037, 8061), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.3)'], {}), '(0, 0.3)\n', (8053, 8061), True, 'import numpy as np\n'), ((8355, 8379), 'numpy.random.normal', 'np.random.normal', (['(5)', '(0.3)'], {}), '(5, 0.3)\n', (8371, 8379), True, 'import numpy as np\n'), ((8437, 8462), 'numpy.random.normal', 'np.random.normal', (['(100)', '(10)'], {}), '(100, 10)\n', (8453, 8462), True, 'import numpy as np\n'), ((4978, 5039), 'numpy.random.uniform', 'np.random.uniform', (['cmdArg.auto_zoom_min', 'cmdArg.auto_zoom_max'], {}), '(cmdArg.auto_zoom_min, cmdArg.auto_zoom_max)\n', (4995, 5039), True, 'import numpy as np\n'), ((5102, 5126), 'numpy.random.normal', 'np.random.normal', (['(100)', '(3)'], {}), '(100, 3)\n', (5118, 5126), True, 'import numpy as np\n')]
|
from service_objects import services
import numpy as np
import pandas as pd
from django.db import connection
import datetime
from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface
class IngestMatchesService(services.Service):
def process(self):
cursor = connection.cursor()
errors = ''
total_matches_updated = 0
total_matches_inserted = 0
tourneys = {}
surfaces = {}
tourney_levels = {}
players = {}
for year in range(1990, 2021):
csv_file = pd.read_csv('https://raw.githubusercontent.com/JeffSackmann/tennis_atp/master/atp_matches_' + str(year) + '.csv', header=1, names=self.getColumns())
for row in csv_file.itertuples():
created_at = datetime.datetime.now()
updated_at = datetime.datetime.now()
#try:
id = str(row.tourney_id) + '-' + str(row.match_num)
match = Match.objects.filter(id=id)
if (not match):
match = Match()
match.id = id
match.year = row.tourney_id.split('-')[0]
match.match_num = row.match_num
match.result = row.score
match.best_of = row.best_of
match.minutes = None if np.isnan(row.minutes) else row.minutes
match.round = row.round
if not tourneys.get(str(row.tourney_id)):
tourney = Tourney.objects.filter(id=row.tourney_id)
if (not tourney):
tourney = Tourney()
tourney.id = row.tourney_id
tourney.name = row.tourney_name
tourney.date = datetime.datetime.strptime(str(int(row.tourney_date)), '%Y%m%d').date()
tourney.created_at = created_at
tourney.updated_at = updated_at
if not surfaces.get(str(row.surface)):
surfaces[str(row.surface)] = self.getSurface(str(row.surface))
tourney.surface = surfaces[str(row.surface)]
if not tourney_levels.get(str(row.tourney_level)):
tourney_levels[str(row.tourney_level)] = self.getTourneyLevel(str(row.tourney_level))
tourney.tourney_level = tourney_levels[str(row.tourney_level)]
tourney.created_at = created_at
tourney.updated_at = updated_at
tourney.save()
else:
tourney = tourney[0]
tourneys[str(row.tourney_id)] = tourney
match.tourney = tourneys[str(row.tourney_id)]
match.created_at = created_at
match.updated_at = updated_at
match.save()
total_matches_inserted += 1
else:
match[0].year = row.tourney_id.split('-')[0]
match[0].save()
total_matches_updated += 1
match = match[0]
match_stats_id = str(row.tourney_id) + '-' + str(row.match_num) + '-' + str(row.winner_id)
match_stats = Match_Stats.objects.filter(id=match_stats_id)
if (not match_stats):
seed = row.winner_seed
if pd.isnull(row.winner_seed) or not str(row.winner_seed).isnumeric():
seed = None
match_stats = Match_Stats()
match_stats.id = match_stats_id
match_stats.type = ""
match_stats.seed = seed
match_stats.aces = None if np.isnan(row.w_ace) else row.w_ace
match_stats.double_faults = None if np.isnan(row.w_df) else row.w_df
match_stats.service_points = None if np.isnan(row.w_svpt) else row.w_svpt
match_stats.first_services = None if np.isnan(row.w_1stIn) else row.w_1stIn
match_stats.first_services_won = None if np.isnan(row.w_1stWon) else row.w_1stWon
match_stats.second_services_won = None if np.isnan(row.w_2ndWon) else row.w_2ndWon
match_stats.service_game_won = None if np.isnan(row.w_SvGms) else row.w_SvGms
match_stats.break_points_saved = None if np.isnan(row.w_bpSaved) else row.w_bpSaved
match_stats.break_points_played = None if np.isnan(row.w_bpFaced) else row.w_bpFaced
match_stats.rank = None if np.isnan(row.winner_rank) else row.winner_rank
match_stats.rank_points = None if np.isnan(row.winner_rank_points) else row.winner_rank_points
match_stats.is_winner = True
match_stats.created_at = created_at
match_stats.updated_at = updated_at
players[row.winner_id] = self.getPlayer(str(row.winner_id))
match_stats.player = players[row.winner_id]
match_stats.match = match
match_stats.save()
match_stats_id = str(row.tourney_id) + '-' + str(row.match_num) + '-' + str(row.loser_id)
match_stats = Match_Stats.objects.filter(id=match_stats_id)
if (not match_stats):
seed = row.loser_seed
if pd.isnull(row.loser_seed) or not str(row.loser_seed).isnumeric():
seed = None
match_stats = Match_Stats()
match_stats.id = match_stats_id
match_stats.type = ""
match_stats.seed = seed
match_stats.aces = None if np.isnan(row.l_ace) else row.l_ace
match_stats.double_faults = None if np.isnan(row.l_df) else row.l_df
match_stats.service_points = None if np.isnan(row.l_svpt) else row.l_svpt
match_stats.first_services = None if np.isnan(row.l_1stIn) else row.l_1stIn
match_stats.first_services_won = None if np.isnan(row.l_1stWon) else row.l_1stWon
match_stats.second_services_won = None if np.isnan(row.l_2ndWon) else row.l_2ndWon
match_stats.service_game_won = None if np.isnan(row.l_SvGms) else row.l_SvGms
match_stats.break_points_saved = None if np.isnan(row.l_bpSaved) else row.l_bpSaved
match_stats.break_points_played = None if np.isnan(row.l_bpFaced) else row.l_bpFaced
match_stats.rank = None if np.isnan(row.loser_rank) else row.loser_rank
match_stats.rank_points = None if np.isnan(row.loser_rank_points) else row.loser_rank_points
match_stats.is_winner = False
match_stats.created_at = created_at
match_stats.updated_at = updated_at
players[row.loser_id] = self.getPlayer(str(row.loser_id))
match_stats.player = players[row.loser_id]
match_stats.match = match
match_stats.save()
#except:
# assert False, (row.tourney_date, )
#errors = errors + '|||' + str(row.tourney_id) + '-' + str(row.match_num)
return {'inserts': total_matches_inserted, 'updates': total_matches_updated}
def getColumns(self):
return ["tourney_id","tourney_name","surface","draw_size","tourney_level","tourney_date","match_num","winner_id","winner_seed","winner_entry","winner_name","winner_hand","winner_ht","winner_ioc","winner_age",
"loser_id","loser_seed","loser_entry","loser_name","loser_hand","loser_ht","loser_ioc","loser_age","score","best_of","round","minutes","w_ace","w_df","w_svpt","w_1stIn","w_1stWon","w_2ndWon","w_SvGms","w_bpSaved",
"w_bpFaced","l_ace","l_df","l_svpt","l_1stIn","l_1stWon","l_2ndWon","l_SvGms","l_bpSaved","l_bpFaced","winner_rank","winner_rank_points","loser_rank","loser_rank_points"]
def getPlayer(self, id):
player = Player.objects.filter(id=id)
if (not player):
return None
else:
player = player[0]
return player
def getSurface(self, name):
surface = Surface.objects.filter(name=name)
if (not surface):
surface = Surface()
surface.name = name
surface.created_at = datetime.datetime.now()
surface.updated_at = datetime.datetime.now()
surface.save()
else:
surface = surface[0]
return surface
def getTourneyLevel(self, code):
tourney_level = Tourney_Level.objects.filter(code=code)
if (not tourney_level):
tourney_level = Tourney_Level()
tourney_level.code = code
tourney_level.name = code
tourney_level.created_at = datetime.datetime.now()
tourney_level.updated_at = datetime.datetime.now()
tourney_level.save()
else:
tourney_level = tourney_level[0]
return tourney_level
|
[
"front.models.Tourney_Level.objects.filter",
"pandas.isnull",
"front.models.Match.objects.filter",
"front.models.Surface",
"front.models.Tourney_Level",
"datetime.datetime.now",
"front.models.Match_Stats.objects.filter",
"front.models.Player.objects.filter",
"django.db.connection.cursor",
"numpy.isnan",
"front.models.Tourney.objects.filter",
"front.models.Tourney",
"front.models.Surface.objects.filter",
"front.models.Match_Stats",
"front.models.Match"
] |
[((299, 318), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (316, 318), False, 'from django.db import connection\n'), ((9006, 9034), 'front.models.Player.objects.filter', 'Player.objects.filter', ([], {'id': 'id'}), '(id=id)\n', (9027, 9034), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((9206, 9239), 'front.models.Surface.objects.filter', 'Surface.objects.filter', ([], {'name': 'name'}), '(name=name)\n', (9228, 9239), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((9611, 9650), 'front.models.Tourney_Level.objects.filter', 'Tourney_Level.objects.filter', ([], {'code': 'code'}), '(code=code)\n', (9639, 9650), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((9288, 9297), 'front.models.Surface', 'Surface', ([], {}), '()\n', (9295, 9297), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((9363, 9386), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9384, 9386), False, 'import datetime\n'), ((9420, 9443), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9441, 9443), False, 'import datetime\n'), ((9712, 9727), 'front.models.Tourney_Level', 'Tourney_Level', ([], {}), '()\n', (9725, 9727), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((9843, 9866), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9864, 9866), False, 'import datetime\n'), ((9906, 9929), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9927, 9929), False, 'import datetime\n'), ((797, 820), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (818, 820), False, 'import datetime\n'), ((854, 877), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (875, 877), False, 'import datetime\n'), ((1000, 1027), 'front.models.Match.objects.filter', 'Match.objects.filter', ([], {'id': 'id'}), '(id=id)\n', (1020, 1027), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((3823, 3868), 'front.models.Match_Stats.objects.filter', 'Match_Stats.objects.filter', ([], {'id': 'match_stats_id'}), '(id=match_stats_id)\n', (3849, 3868), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((6011, 6056), 'front.models.Match_Stats.objects.filter', 'Match_Stats.objects.filter', ([], {'id': 'match_stats_id'}), '(id=match_stats_id)\n', (6037, 6056), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((1097, 1104), 'front.models.Match', 'Match', ([], {}), '()\n', (1102, 1104), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((4132, 4145), 'front.models.Match_Stats', 'Match_Stats', ([], {}), '()\n', (4143, 4145), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((6317, 6330), 'front.models.Match_Stats', 'Match_Stats', ([], {}), '()\n', (6328, 6330), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((1414, 1435), 'numpy.isnan', 'np.isnan', (['row.minutes'], {}), '(row.minutes)\n', (1422, 1435), True, 'import numpy as np\n'), ((1650, 1691), 'front.models.Tourney.objects.filter', 'Tourney.objects.filter', ([], {'id': 'row.tourney_id'}), '(id=row.tourney_id)\n', (1672, 1691), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n'), ((3985, 4011), 'pandas.isnull', 'pd.isnull', (['row.winner_seed'], {}), '(row.winner_seed)\n', (3994, 4011), True, 'import pandas as pd\n'), ((4347, 4366), 'numpy.isnan', 'np.isnan', (['row.w_ace'], {}), '(row.w_ace)\n', (4355, 4366), True, 'import numpy as np\n'), ((4442, 4460), 'numpy.isnan', 'np.isnan', (['row.w_df'], {}), '(row.w_df)\n', (4450, 4460), True, 'import numpy as np\n'), ((4536, 4556), 'numpy.isnan', 'np.isnan', (['row.w_svpt'], {}), '(row.w_svpt)\n', (4544, 4556), True, 'import numpy as np\n'), ((4634, 4655), 'numpy.isnan', 'np.isnan', (['row.w_1stIn'], {}), '(row.w_1stIn)\n', (4642, 4655), True, 'import numpy as np\n'), ((4738, 4760), 'numpy.isnan', 'np.isnan', (['row.w_1stWon'], {}), '(row.w_1stWon)\n', (4746, 4760), True, 'import numpy as np\n'), ((4845, 4867), 'numpy.isnan', 'np.isnan', (['row.w_2ndWon'], {}), '(row.w_2ndWon)\n', (4853, 4867), True, 'import numpy as np\n'), ((4949, 4970), 'numpy.isnan', 'np.isnan', (['row.w_SvGms'], {}), '(row.w_SvGms)\n', (4957, 4970), True, 'import numpy as np\n'), ((5053, 5076), 'numpy.isnan', 'np.isnan', (['row.w_bpSaved'], {}), '(row.w_bpSaved)\n', (5061, 5076), True, 'import numpy as np\n'), ((5162, 5185), 'numpy.isnan', 'np.isnan', (['row.w_bpFaced'], {}), '(row.w_bpFaced)\n', (5170, 5185), True, 'import numpy as np\n'), ((5256, 5281), 'numpy.isnan', 'np.isnan', (['row.winner_rank'], {}), '(row.winner_rank)\n', (5264, 5281), True, 'import numpy as np\n'), ((5361, 5393), 'numpy.isnan', 'np.isnan', (['row.winner_rank_points'], {}), '(row.winner_rank_points)\n', (5369, 5393), True, 'import numpy as np\n'), ((6172, 6197), 'pandas.isnull', 'pd.isnull', (['row.loser_seed'], {}), '(row.loser_seed)\n', (6181, 6197), True, 'import pandas as pd\n'), ((6532, 6551), 'numpy.isnan', 'np.isnan', (['row.l_ace'], {}), '(row.l_ace)\n', (6540, 6551), True, 'import numpy as np\n'), ((6627, 6645), 'numpy.isnan', 'np.isnan', (['row.l_df'], {}), '(row.l_df)\n', (6635, 6645), True, 'import numpy as np\n'), ((6721, 6741), 'numpy.isnan', 'np.isnan', (['row.l_svpt'], {}), '(row.l_svpt)\n', (6729, 6741), True, 'import numpy as np\n'), ((6819, 6840), 'numpy.isnan', 'np.isnan', (['row.l_1stIn'], {}), '(row.l_1stIn)\n', (6827, 6840), True, 'import numpy as np\n'), ((6923, 6945), 'numpy.isnan', 'np.isnan', (['row.l_1stWon'], {}), '(row.l_1stWon)\n', (6931, 6945), True, 'import numpy as np\n'), ((7030, 7052), 'numpy.isnan', 'np.isnan', (['row.l_2ndWon'], {}), '(row.l_2ndWon)\n', (7038, 7052), True, 'import numpy as np\n'), ((7134, 7155), 'numpy.isnan', 'np.isnan', (['row.l_SvGms'], {}), '(row.l_SvGms)\n', (7142, 7155), True, 'import numpy as np\n'), ((7238, 7261), 'numpy.isnan', 'np.isnan', (['row.l_bpSaved'], {}), '(row.l_bpSaved)\n', (7246, 7261), True, 'import numpy as np\n'), ((7347, 7370), 'numpy.isnan', 'np.isnan', (['row.l_bpFaced'], {}), '(row.l_bpFaced)\n', (7355, 7370), True, 'import numpy as np\n'), ((7441, 7465), 'numpy.isnan', 'np.isnan', (['row.loser_rank'], {}), '(row.loser_rank)\n', (7449, 7465), True, 'import numpy as np\n'), ((7544, 7575), 'numpy.isnan', 'np.isnan', (['row.loser_rank_points'], {}), '(row.loser_rank_points)\n', (7552, 7575), True, 'import numpy as np\n'), ((1780, 1789), 'front.models.Tourney', 'Tourney', ([], {}), '()\n', (1787, 1789), False, 'from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface\n')]
|
import numpy as np
import cv2
import os
from matplotlib import pyplot as plt
#### INPUT ####
# folder that contains datapoints
folderName = '2dIR'
#### SETTINGS ####
# settings listed below are suitable for 2D data
# intensity of noise filtering; higher values mean more blurring
medianKernel = 5
# blurring radius in x and y direction; higher values mean more blurring; note: these values need to be odd
gaussian_x = 9
gaussian_y = 181
# decay blurring strength; higher values mean blurring will be more focused on the center
gaussianSigma = 60
# number of pixels that are averaged on both sides when iterating over each pixel in a row
pixelsAveraged1 = 10
# number of pixels that are averaged on both sides when iterating over pixels closer to the leading edge; this number should be smaller than pixelsAveraged1 since higher precision is needed
pixelsAveraged2 = 6
# vertical range of pixels considered when determining transition line; range is selected so that noise at the root and the tip is disregarded
rangeVer = (40, 400)
# maximal fraction of standard deviation for the point to be included during filtering
maxStd = 0.9
# minimal fraction of the points left after filtering for the line to be considered as transition line
minFiltered = 0.5
# critical angle at which the line closest to the leading edge is considered to be the transition line
criticalAngle = 7.5
# margin of averaged pixels between the leading edge and detected transition points
margin = 2
# minimal average difference of the more aft lines to be considered as transition line
minDifference1 = 4.68
# minimal average difference of the more forward lines to be considered as transition line
minDifference2 = 3.1
# width of the cropped image
width = 360
# settings listed below are suitable for 3D data
# medianKernel = 5
# gaussian_x = 9
# gaussian_y = 181
# gaussianSigma = 60
# pixelsAveraged1 = 9
# pixelsAveraged2 = 6
# rangeVer = (40, 400)
# maxStd = 1.5
# minFiltered = 0.5
# criticalAngle = 9.5
# margin = 2
# minDifference1 = 3.84
# minDifference2 = 3.1
# width = 360
# processing image
def findTransition(data, angle):
# removing NaN values from the array
data = data[:, ~np.isnan(data).all(axis=0)]
# normalising data
data = ((data - np.amin(data)) / (np.amax(data) - np.amin(data)) * 255)
# converting to pixel data
data = data.astype(np.uint8)
# processing data using median and gaussian blur
blurred = cv2.medianBlur(data, medianKernel)
blurred = cv2.GaussianBlur(blurred, (gaussian_x, gaussian_y), gaussianSigma)
# creating empty arrays to store locations of edges and potential transitions
edges = np.zeros((len(blurred), 2), dtype=int)
edge = (0, 0)
differencesVer = np.zeros((len(blurred), 3))
transitions1 = np.zeros((len(blurred), 2), dtype=int)
transitions2 = np.zeros((len(blurred), 2), dtype=int)
# iterating over each row of pixels
for i in range(len(blurred)):
# iterating over each pixel in a row and calculating differences between pixels to the right and to the left
differencesHor1 = np.zeros(len(blurred[i]))
for j in range(len(blurred[i])):
if j - pixelsAveraged1 >= 0 and j + pixelsAveraged1 <= len(blurred[i]):
differencesHor1[j] = np.absolute(np.average(blurred[i, j - pixelsAveraged1:j]) - np.average(blurred[i, j:j + pixelsAveraged1]))
# selecting two locations where differences are the highest
edges[i, 0] = np.argmax(differencesHor1)
for j in range(len(differencesHor1)):
if differencesHor1[j] > differencesHor1[edges[i, 1]] and np.absolute(edges[i, 0] - j) > pixelsAveraged1:
edges[i, 1] = j
edges = np.sort(edges, axis=1)
# averaging the detected locations to determine position of the edges
edge = int(np.average(edges[rangeVer[0]:rangeVer[1], 0])), int(np.average([edges[rangeVer[0]:rangeVer[1], 1]]))
# iterating over each pixel between edges and calculating differences between pixels to the right and to the left
differencesHor1 = np.zeros(len(blurred[i]))
for j in range(len(blurred[i])):
if edges[i, 0] + 2 * pixelsAveraged1 <= j <= edges[i, 1] - margin * pixelsAveraged1:
differencesHor1[j] = np.absolute(np.average(blurred[i, j - pixelsAveraged1:j]) - np.average(blurred[i, j:j + pixelsAveraged1]))
# selecting two locations where differences are the highest
transitions1[i, 0] = np.argmax(differencesHor1)
for j in range(len(differencesHor1)):
if differencesHor1[j] > differencesHor1[transitions1[i, 1]] and np.absolute(transitions1[i, 0] - j) > 3 * pixelsAveraged1:
transitions1[i, 1] = j
transitions1 = np.sort(transitions1, axis=1)
# iterating over pixels closer to the leading edge and calculating differences between pixels to the right and to the left
differencesHor2 = np.zeros(len(blurred[i]))
for j in range(len(blurred[i])):
if edges[i, 0] + 10 * pixelsAveraged2 <= j <= edges[i, 1] - pixelsAveraged2:
differencesHor2[j] = np.absolute(np.average(blurred[i, j - pixelsAveraged2:j]) - np.average(blurred[i, j:j + pixelsAveraged2]))
# selecting two locations where differences are the highest
transitions2[i, 0] = np.argmax(differencesHor2)
for j in range(len(differencesHor2)):
if differencesHor2[j] > differencesHor2[transitions2[i, 1]] and np.absolute(transitions2[i, 0] - j) > pixelsAveraged2:
transitions2[i, 1] = j
transitions2 = np.sort(transitions2, axis=1)
# saving maximal horizontal differences to calculate vertical differences
differencesVer[i, 0] = differencesHor1[transitions1[i, 0]]
differencesVer[i, 1] = differencesHor1[transitions1[i, 1]]
differencesVer[i, 2] = differencesHor2[transitions2[i, 0]]
# cropping locations of transitions and vertical differences
transitions1 = transitions1[rangeVer[0]:rangeVer[1], :]
transitions2 = transitions2[rangeVer[0]:rangeVer[1], :]
differencesVer = differencesVer[rangeVer[0]:rangeVer[1], :]
# calculating average and standard deviation of the first detected transition line
transitions1Avg = np.average(transitions1[:, 0])
transitions1Std = np.std(transitions1[:, 0])
# filtering locations that are too far from the average
transitions1Filtered = []
for i in range(len(transitions1)):
if round(transitions1Avg - maxStd * transitions1Std) <= transitions1[i, 0] <= round(transitions1Avg + maxStd * transitions1Std):
transitions1Filtered.append(transitions1[i, 0])
# calculating average and standard deviation of the second detected transition line
transitions2Avg = np.average(transitions1[:, 1])
transitions2Std = np.std(transitions1[:, 1])
# filtering locations that are too far from the average
transitions2Filtered = []
for i in range(len(transitions1)):
if round(transitions2Avg - maxStd * transitions2Std) <= transitions1[i, 1] <= round(transitions2Avg + maxStd * transitions2Std):
transitions2Filtered.append(transitions1[i, 1])
# calculating average and standard deviation of the third detected transition line
transitions3Avg = [np.average(transitions2[:, 0]), np.average(transitions2[:, 1])]
transitions3Std = [np.std(transitions2[:, 0]), np.std(transitions2[:, 1])]
# filtering locations that are too far from the average
transitions3Filtered = []
for i in range(len(transitions2)):
if round(transitions3Avg[0] - maxStd * transitions3Std[0]) <= transitions2[i, 0] <= round(transitions3Avg[0] + maxStd * transitions3Std[0]) \
and round(transitions3Avg[1] - maxStd * transitions3Std[1]) <= transitions2[i, 1] <= round(transitions3Avg[1] + maxStd * transitions3Std[1]):
transitions3Filtered.append(np.average(transitions2[i, :]))
# calculating the average of vertical differences for each transition line
differences = np.zeros(3)
differences[0] = np.average(differencesVer[:, 0])
differences[1] = np.average(differencesVer[:, 1])
differences[2] = np.average(differencesVer[:, 2])
# choosing one of the three detected lines
if differences[0] >= minDifference1 and len(transitions1Filtered) > minFiltered * (rangeVer[1] - rangeVer[0]) and angle < criticalAngle:
transition = round(np.average(transitions1Filtered))
elif differences[1] >= minDifference1 and len(transitions2Filtered) > minFiltered * (rangeVer[1] - rangeVer[0]) and angle < criticalAngle:
transition = round(np.average(transitions2Filtered))
elif differences[2] >= minDifference2:
transition = round(np.average(transitions3Filtered))
else:
transition = edge[1]
# printing parameters for debugging
# print('Differences 1: ' + differences[0])
# print('Differences 2: ' + differences[1])
# print('Differences 3: ' + differences[2])
# print('Length of filtered transitions 1:' + str(len(transitions1Filtered)))
# print('Length of filtered transitions 1:' + str(len(transitions2Filtered)))
# print('Length of filtered transitions 1:' + str(len(transitions3Filtered)))
# calculating the location of transition as percentage of chord length
XC = 1 - ((transition - edge[0]) / (edge[1] - edge[0]))
# printing edges and transition line on the generated image
for i in range(len(data)):
data[i, edge[0] - 1:edge[0] + 1] = 0
data[i, edge[1] - 1:edge[1] + 1] = 0
data[i, transition - 1:transition + 1] = 0
# data[i, edges[i, 0] - 1:edges[i, 0] + 1] = 0
# data[i, edges[i, 1] - 1:edges[i, 1] + 1] = 0
# printing detected lines on the generated image
# for i in range(len(transitions1)):
# data[i + rangeVer[0], transitions1[i, 0] - 1:transitions1[i, 0] + 1] = 0
# data[i + rangeVer[0], transitions1[i, 1] - 1:transitions1[i, 1] + 1] = 0
# data[i + rangeVer[0], transitions2[i, 0] - 1:transitions2[i, 0] + 1] = 0
# data[i + rangeVer[0], transitions2[i, 1] - 1:transitions2[i, 1] + 1] = 0
# calculating midpoint between edges and cropping the image
midpoint = int((edge[1] - edge[0]) / 2 + edge[0])
data = data[:, int(midpoint - width / 2):int(midpoint + width / 2)]
blurred = blurred[:, int(midpoint - width / 2):int(midpoint + width / 2)]
# converting data to contiguous array
data = np.ascontiguousarray(data, dtype=np.uint8)
# settings for placing AoA and transition location on the image
text1 = 'AoA: ' + str(angle)
text2 = 'x/c = ' + str(round(XC, 3))
org1 = (60, 20)
org2 = (60, 40)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
color = (255, 0, 0)
thickness = 1
# inserting text to the image
data = cv2.putText(data, text1, org1, font, fontScale, color, thickness, cv2.LINE_AA)
data = cv2.putText(data, text2, org2, font, fontScale, color, thickness, cv2.LINE_AA)
# showing generated images
# cv2.imshow("data", data)
# cv2.imshow("blurred", blurred)
# cv2.waitKey(0)
# saving generated images
path = 'Images'
fileName = 'AoA=' + str(angle) + ',XC=' + str(round(XC, 3)) + '.jpg'
cv2.imwrite(os.path.join(path, fileName), data)
# cv2.imwrite(os.path.join(path, 'blurred.jpg'), blurred)
return XC
# detecting all folders in the selected directory
folders = os.listdir(folderName + '/.')
# creating empty array for results
results = np.zeros((len(folders), 2))
# iterating over each folder
for i, folder in enumerate(folders):
# detecting all files in the selected folder
folderPath = folderName + '/' + folder + '/.'
files = os.listdir(folderPath)
# creating empty array in the size of data
dataPoints = np.zeros((480, 640))
# monitoring progress of the program
print('---------------------------------------')
print('Progress: ' + str(round(i / len(folders) * 100, 2)) + '%')
print('AoA: ' + folder)
# iterating over detected files
for file in files:
# importing data into array
filePath = folderName + '/' + folder + '/' + file
dataPoint = np.genfromtxt(filePath, delimiter=';')
# removing NaN values from the array
dataPoint = dataPoint[:, ~np.isnan(dataPoint).all(axis=0)]
# adding imported data to the array
dataPoints += dataPoint
break
# calculating average of the data
# dataPoints = dataPoints / len(files)
# calculating location of transition and saving it into the results
transitionXC = findTransition(dataPoints, float(folder))
results[i] = [float(folder), transitionXC]
# saving results to text file
results = results[results[:, 0].argsort()]
np.savetxt('results.txt', results, delimiter=',')
# generating graph of location vs angle of attack
plt.plot(results[:, 0], results[:, 1])
plt.xlabel("Angle of attack [deg]")
plt.ylabel("Location of transition [x/c]")
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"numpy.ascontiguousarray",
"numpy.genfromtxt",
"os.listdir",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.sort",
"cv2.medianBlur",
"numpy.amin",
"numpy.average",
"numpy.argmax",
"cv2.putText",
"numpy.isnan",
"numpy.savetxt",
"numpy.std",
"cv2.GaussianBlur",
"matplotlib.pyplot.show",
"numpy.absolute",
"os.path.join",
"numpy.zeros",
"numpy.amax"
] |
[((11748, 11777), 'os.listdir', 'os.listdir', (["(folderName + '/.')"], {}), "(folderName + '/.')\n", (11758, 11777), False, 'import os\n'), ((13133, 13182), 'numpy.savetxt', 'np.savetxt', (['"""results.txt"""', 'results'], {'delimiter': '""","""'}), "('results.txt', results, delimiter=',')\n", (13143, 13182), True, 'import numpy as np\n'), ((13237, 13275), 'matplotlib.pyplot.plot', 'plt.plot', (['results[:, 0]', 'results[:, 1]'], {}), '(results[:, 0], results[:, 1])\n', (13245, 13275), True, 'from matplotlib import pyplot as plt\n'), ((13277, 13312), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle of attack [deg]"""'], {}), "('Angle of attack [deg]')\n", (13287, 13312), True, 'from matplotlib import pyplot as plt\n'), ((13314, 13356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Location of transition [x/c]"""'], {}), "('Location of transition [x/c]')\n", (13324, 13356), True, 'from matplotlib import pyplot as plt\n'), ((13358, 13368), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13366, 13368), True, 'from matplotlib import pyplot as plt\n'), ((2527, 2561), 'cv2.medianBlur', 'cv2.medianBlur', (['data', 'medianKernel'], {}), '(data, medianKernel)\n', (2541, 2561), False, 'import cv2\n'), ((2577, 2643), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['blurred', '(gaussian_x, gaussian_y)', 'gaussianSigma'], {}), '(blurred, (gaussian_x, gaussian_y), gaussianSigma)\n', (2593, 2643), False, 'import cv2\n'), ((6442, 6472), 'numpy.average', 'np.average', (['transitions1[:, 0]'], {}), '(transitions1[:, 0])\n', (6452, 6472), True, 'import numpy as np\n'), ((6496, 6522), 'numpy.std', 'np.std', (['transitions1[:, 0]'], {}), '(transitions1[:, 0])\n', (6502, 6522), True, 'import numpy as np\n'), ((6970, 7000), 'numpy.average', 'np.average', (['transitions1[:, 1]'], {}), '(transitions1[:, 1])\n', (6980, 7000), True, 'import numpy as np\n'), ((7024, 7050), 'numpy.std', 'np.std', (['transitions1[:, 1]'], {}), '(transitions1[:, 1])\n', (7030, 7050), True, 'import numpy as np\n'), ((8260, 8271), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (8268, 8271), True, 'import numpy as np\n'), ((8294, 8326), 'numpy.average', 'np.average', (['differencesVer[:, 0]'], {}), '(differencesVer[:, 0])\n', (8304, 8326), True, 'import numpy as np\n'), ((8349, 8381), 'numpy.average', 'np.average', (['differencesVer[:, 1]'], {}), '(differencesVer[:, 1])\n', (8359, 8381), True, 'import numpy as np\n'), ((8404, 8436), 'numpy.average', 'np.average', (['differencesVer[:, 2]'], {}), '(differencesVer[:, 2])\n', (8414, 8436), True, 'import numpy as np\n'), ((10742, 10784), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (10762, 10784), True, 'import numpy as np\n'), ((11125, 11203), 'cv2.putText', 'cv2.putText', (['data', 'text1', 'org1', 'font', 'fontScale', 'color', 'thickness', 'cv2.LINE_AA'], {}), '(data, text1, org1, font, fontScale, color, thickness, cv2.LINE_AA)\n', (11136, 11203), False, 'import cv2\n'), ((11216, 11294), 'cv2.putText', 'cv2.putText', (['data', 'text2', 'org2', 'font', 'fontScale', 'color', 'thickness', 'cv2.LINE_AA'], {}), '(data, text2, org2, font, fontScale, color, thickness, cv2.LINE_AA)\n', (11227, 11294), False, 'import cv2\n'), ((12041, 12063), 'os.listdir', 'os.listdir', (['folderPath'], {}), '(folderPath)\n', (12051, 12063), False, 'import os\n'), ((12132, 12152), 'numpy.zeros', 'np.zeros', (['(480, 640)'], {}), '((480, 640))\n', (12140, 12152), True, 'import numpy as np\n'), ((3585, 3611), 'numpy.argmax', 'np.argmax', (['differencesHor1'], {}), '(differencesHor1)\n', (3594, 3611), True, 'import numpy as np\n'), ((3827, 3849), 'numpy.sort', 'np.sort', (['edges'], {'axis': '(1)'}), '(edges, axis=1)\n', (3834, 3849), True, 'import numpy as np\n'), ((4616, 4642), 'numpy.argmax', 'np.argmax', (['differencesHor1'], {}), '(differencesHor1)\n', (4625, 4642), True, 'import numpy as np\n'), ((4890, 4919), 'numpy.sort', 'np.sort', (['transitions1'], {'axis': '(1)'}), '(transitions1, axis=1)\n', (4897, 4919), True, 'import numpy as np\n'), ((5485, 5511), 'numpy.argmax', 'np.argmax', (['differencesHor2'], {}), '(differencesHor2)\n', (5494, 5511), True, 'import numpy as np\n'), ((5755, 5784), 'numpy.sort', 'np.sort', (['transitions2'], {'axis': '(1)'}), '(transitions2, axis=1)\n', (5762, 5784), True, 'import numpy as np\n'), ((7498, 7528), 'numpy.average', 'np.average', (['transitions2[:, 0]'], {}), '(transitions2[:, 0])\n', (7508, 7528), True, 'import numpy as np\n'), ((7530, 7560), 'numpy.average', 'np.average', (['transitions2[:, 1]'], {}), '(transitions2[:, 1])\n', (7540, 7560), True, 'import numpy as np\n'), ((7586, 7612), 'numpy.std', 'np.std', (['transitions2[:, 0]'], {}), '(transitions2[:, 0])\n', (7592, 7612), True, 'import numpy as np\n'), ((7614, 7640), 'numpy.std', 'np.std', (['transitions2[:, 1]'], {}), '(transitions2[:, 1])\n', (7620, 7640), True, 'import numpy as np\n'), ((11566, 11594), 'os.path.join', 'os.path.join', (['path', 'fileName'], {}), '(path, fileName)\n', (11578, 11594), False, 'import os\n'), ((12533, 12571), 'numpy.genfromtxt', 'np.genfromtxt', (['filePath'], {'delimiter': '""";"""'}), "(filePath, delimiter=';')\n", (12546, 12571), True, 'import numpy as np\n'), ((8657, 8689), 'numpy.average', 'np.average', (['transitions1Filtered'], {}), '(transitions1Filtered)\n', (8667, 8689), True, 'import numpy as np\n'), ((2334, 2347), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (2341, 2347), True, 'import numpy as np\n'), ((2352, 2365), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (2359, 2365), True, 'import numpy as np\n'), ((2368, 2381), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (2375, 2381), True, 'import numpy as np\n'), ((3951, 3996), 'numpy.average', 'np.average', (['edges[rangeVer[0]:rangeVer[1], 0]'], {}), '(edges[rangeVer[0]:rangeVer[1], 0])\n', (3961, 3996), True, 'import numpy as np\n'), ((4003, 4050), 'numpy.average', 'np.average', (['[edges[rangeVer[0]:rangeVer[1], 1]]'], {}), '([edges[rangeVer[0]:rangeVer[1], 1]])\n', (4013, 4050), True, 'import numpy as np\n'), ((8127, 8157), 'numpy.average', 'np.average', (['transitions2[i, :]'], {}), '(transitions2[i, :])\n', (8137, 8157), True, 'import numpy as np\n'), ((8863, 8895), 'numpy.average', 'np.average', (['transitions2Filtered'], {}), '(transitions2Filtered)\n', (8873, 8895), True, 'import numpy as np\n'), ((3729, 3757), 'numpy.absolute', 'np.absolute', (['(edges[i, 0] - j)'], {}), '(edges[i, 0] - j)\n', (3740, 3757), True, 'import numpy as np\n'), ((4767, 4802), 'numpy.absolute', 'np.absolute', (['(transitions1[i, 0] - j)'], {}), '(transitions1[i, 0] - j)\n', (4778, 4802), True, 'import numpy as np\n'), ((5636, 5671), 'numpy.absolute', 'np.absolute', (['(transitions2[i, 0] - j)'], {}), '(transitions2[i, 0] - j)\n', (5647, 5671), True, 'import numpy as np\n'), ((8969, 9001), 'numpy.average', 'np.average', (['transitions3Filtered'], {}), '(transitions3Filtered)\n', (8979, 9001), True, 'import numpy as np\n'), ((2261, 2275), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (2269, 2275), True, 'import numpy as np\n'), ((3396, 3441), 'numpy.average', 'np.average', (['blurred[i, j - pixelsAveraged1:j]'], {}), '(blurred[i, j - pixelsAveraged1:j])\n', (3406, 3441), True, 'import numpy as np\n'), ((3444, 3489), 'numpy.average', 'np.average', (['blurred[i, j:j + pixelsAveraged1]'], {}), '(blurred[i, j:j + pixelsAveraged1])\n', (3454, 3489), True, 'import numpy as np\n'), ((4420, 4465), 'numpy.average', 'np.average', (['blurred[i, j - pixelsAveraged1:j]'], {}), '(blurred[i, j - pixelsAveraged1:j])\n', (4430, 4465), True, 'import numpy as np\n'), ((4468, 4513), 'numpy.average', 'np.average', (['blurred[i, j:j + pixelsAveraged1]'], {}), '(blurred[i, j:j + pixelsAveraged1])\n', (4478, 4513), True, 'import numpy as np\n'), ((5289, 5334), 'numpy.average', 'np.average', (['blurred[i, j - pixelsAveraged2:j]'], {}), '(blurred[i, j - pixelsAveraged2:j])\n', (5299, 5334), True, 'import numpy as np\n'), ((5337, 5382), 'numpy.average', 'np.average', (['blurred[i, j:j + pixelsAveraged2]'], {}), '(blurred[i, j:j + pixelsAveraged2])\n', (5347, 5382), True, 'import numpy as np\n'), ((12655, 12674), 'numpy.isnan', 'np.isnan', (['dataPoint'], {}), '(dataPoint)\n', (12663, 12674), True, 'import numpy as np\n')]
|
import numpy as np
from matplotlib import pyplot as plt
from env import DrivingEnv
from solvers import GridSolver, SampleGraphSolver
def time_compare(seed=1234, min_sample=10, max_sample=50, count=10):
sample_count = np.linspace(min_sample, max_sample, count).astype(int)
grid_times = []
graph_times = []
for size in sample_count:
env = DrivingEnv(15, random_seed=seed)
solver = GridSolver(size)
grid_times.append(solver.solve(env, max_steps=500))
env = DrivingEnv(15, random_seed=seed)
solver = SampleGraphSolver(size*size)
graph_times.append(solver.solve(env, max_steps=500))
plt.figure()
plt.semilogy(sample_count, grid_times, label="Grid-based")
plt.semilogy(sample_count, graph_times, label="Graph-based")
plt.xlabel("Equivalent sample size")
plt.ylabel("Running time (s)")
plt.legend()
plt.show()
def grid_size_reward_compare(seed=1234, min_sample=10, max_sample=50, count=10, repeat=5):
env = DrivingEnv(15, random_seed=seed)
size_list = np.linspace(min_sample, max_sample, count).astype(int)
cost_list = []
for size in size_list:
cost_cases = []
for _ in range(repeat):
solver = SampleGraphSolver(size*size)
solver.solve(env, max_steps=200, early_stop=False)
states, cost = env.simulate(solver)
cost_cases.append(cost)
cost_list.append(cost_cases)
plt.figure()
plt.plot(size_list, np.mean(cost_list, axis=1))
plt.xlabel("Graph size")
plt.ylabel("Time and safety cost")
plt.title("Graph based policy performance versus graph size")
plt.show()
def grid_with_different_safety_cost(cost_type="linear"):
env = DrivingEnv(15, random_seed=1234)
def render_graph(solver, ax):
solution = solver.report_solution()
solution_set = set()
for i in range(len(solution) - 1):
solution_set.add((solution[i], solution[i+1]))
for n1, n2 in solver._connections:
if (n1, n2) in solution_set or (n2, n1) in solution_set:
color = "#1A090D"
lwidth = 5
else:
color = "#4A139488"
lwidth = 1
ax.plot([solver._samples[n1].x, solver._samples[n2].x], [solver._samples[n1].y, solver._samples[n2].y], lw=lwidth, c=color)
ax.scatter([p.x for p in solver._samples], [p.y for p in solver._samples], c=solver._safety_cost_cache)
solver = SampleGraphSolver(800)
solver.solve(env, max_steps=200, safety_weight=100, safety_type=cost_type)
fig, ax = plt.subplots(1)
env.render(ax)
render_graph(solver, ax)
plt.title("Graph-based solution with %s cost" % cost_type)
plt.show()
def graph_with_different_weight(seed=1234, ratio_count=7):
ratios = np.logspace(-3, 3, ratio_count)
fig, ax = plt.subplots(1)
DrivingEnv(15, random_seed=seed).render(ax)
handles = [None] * ratio_count
for rid, ratio in enumerate(ratios):
coeff = np.sqrt(ratio)
env = DrivingEnv(15, random_seed=seed)
solver = SampleGraphSolver(800)
solver.solve(env, max_steps=100, early_stop=False, safety_weight=coeff, time_weight=1/coeff, safety_type="linear")
solution = solver.report_solution()
solution_set = set()
for i in range(len(solution) - 1):
solution_set.add((solution[i], solution[i+1]))
for n1, n2 in solver._connections:
if (n1, n2) in solution_set or (n2, n1) in solution_set:
lwidth, color = 4, "C%d" % rid
handles[rid], = ax.plot([solver._samples[n1].x, solver._samples[n2].x], [solver._samples[n1].y, solver._samples[n2].y], lw=lwidth, c=color)
# fig.legend(handles, ["safety/time=%f" % ratio for ratio in ratios], loc=1)
plt.title("Difference path under different weights")
plt.show()
graph_with_different_weight()
|
[
"matplotlib.pyplot.semilogy",
"env.DrivingEnv",
"solvers.SampleGraphSolver",
"numpy.mean",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"solvers.GridSolver",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.logspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((650, 662), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (660, 662), True, 'from matplotlib import pyplot as plt\n'), ((667, 725), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['sample_count', 'grid_times'], {'label': '"""Grid-based"""'}), "(sample_count, grid_times, label='Grid-based')\n", (679, 725), True, 'from matplotlib import pyplot as plt\n'), ((730, 790), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['sample_count', 'graph_times'], {'label': '"""Graph-based"""'}), "(sample_count, graph_times, label='Graph-based')\n", (742, 790), True, 'from matplotlib import pyplot as plt\n'), ((795, 831), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Equivalent sample size"""'], {}), "('Equivalent sample size')\n", (805, 831), True, 'from matplotlib import pyplot as plt\n'), ((836, 866), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Running time (s)"""'], {}), "('Running time (s)')\n", (846, 866), True, 'from matplotlib import pyplot as plt\n'), ((871, 883), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (881, 883), True, 'from matplotlib import pyplot as plt\n'), ((888, 898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (896, 898), True, 'from matplotlib import pyplot as plt\n'), ((1001, 1033), 'env.DrivingEnv', 'DrivingEnv', (['(15)'], {'random_seed': 'seed'}), '(15, random_seed=seed)\n', (1011, 1033), False, 'from env import DrivingEnv\n'), ((1446, 1458), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1456, 1458), True, 'from matplotlib import pyplot as plt\n'), ((1515, 1539), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Graph size"""'], {}), "('Graph size')\n", (1525, 1539), True, 'from matplotlib import pyplot as plt\n'), ((1544, 1578), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time and safety cost"""'], {}), "('Time and safety cost')\n", (1554, 1578), True, 'from matplotlib import pyplot as plt\n'), ((1583, 1644), 'matplotlib.pyplot.title', 'plt.title', (['"""Graph based policy performance versus graph size"""'], {}), "('Graph based policy performance versus graph size')\n", (1592, 1644), True, 'from matplotlib import pyplot as plt\n'), ((1649, 1659), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1657, 1659), True, 'from matplotlib import pyplot as plt\n'), ((1728, 1760), 'env.DrivingEnv', 'DrivingEnv', (['(15)'], {'random_seed': '(1234)'}), '(15, random_seed=1234)\n', (1738, 1760), False, 'from env import DrivingEnv\n'), ((2488, 2510), 'solvers.SampleGraphSolver', 'SampleGraphSolver', (['(800)'], {}), '(800)\n', (2505, 2510), False, 'from solvers import GridSolver, SampleGraphSolver\n'), ((2605, 2620), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (2617, 2620), True, 'from matplotlib import pyplot as plt\n'), ((2673, 2731), 'matplotlib.pyplot.title', 'plt.title', (["('Graph-based solution with %s cost' % cost_type)"], {}), "('Graph-based solution with %s cost' % cost_type)\n", (2682, 2731), True, 'from matplotlib import pyplot as plt\n'), ((2736, 2746), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2744, 2746), True, 'from matplotlib import pyplot as plt\n'), ((2820, 2851), 'numpy.logspace', 'np.logspace', (['(-3)', '(3)', 'ratio_count'], {}), '(-3, 3, ratio_count)\n', (2831, 2851), True, 'import numpy as np\n'), ((2867, 2882), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (2879, 2882), True, 'from matplotlib import pyplot as plt\n'), ((3827, 3879), 'matplotlib.pyplot.title', 'plt.title', (['"""Difference path under different weights"""'], {}), "('Difference path under different weights')\n", (3836, 3879), True, 'from matplotlib import pyplot as plt\n'), ((3884, 3894), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3892, 3894), True, 'from matplotlib import pyplot as plt\n'), ((363, 395), 'env.DrivingEnv', 'DrivingEnv', (['(15)'], {'random_seed': 'seed'}), '(15, random_seed=seed)\n', (373, 395), False, 'from env import DrivingEnv\n'), ((413, 429), 'solvers.GridSolver', 'GridSolver', (['size'], {}), '(size)\n', (423, 429), False, 'from solvers import GridSolver, SampleGraphSolver\n'), ((505, 537), 'env.DrivingEnv', 'DrivingEnv', (['(15)'], {'random_seed': 'seed'}), '(15, random_seed=seed)\n', (515, 537), False, 'from env import DrivingEnv\n'), ((555, 585), 'solvers.SampleGraphSolver', 'SampleGraphSolver', (['(size * size)'], {}), '(size * size)\n', (572, 585), False, 'from solvers import GridSolver, SampleGraphSolver\n'), ((1483, 1509), 'numpy.mean', 'np.mean', (['cost_list'], {'axis': '(1)'}), '(cost_list, axis=1)\n', (1490, 1509), True, 'import numpy as np\n'), ((3024, 3038), 'numpy.sqrt', 'np.sqrt', (['ratio'], {}), '(ratio)\n', (3031, 3038), True, 'import numpy as np\n'), ((3053, 3085), 'env.DrivingEnv', 'DrivingEnv', (['(15)'], {'random_seed': 'seed'}), '(15, random_seed=seed)\n', (3063, 3085), False, 'from env import DrivingEnv\n'), ((3103, 3125), 'solvers.SampleGraphSolver', 'SampleGraphSolver', (['(800)'], {}), '(800)\n', (3120, 3125), False, 'from solvers import GridSolver, SampleGraphSolver\n'), ((223, 265), 'numpy.linspace', 'np.linspace', (['min_sample', 'max_sample', 'count'], {}), '(min_sample, max_sample, count)\n', (234, 265), True, 'import numpy as np\n'), ((1050, 1092), 'numpy.linspace', 'np.linspace', (['min_sample', 'max_sample', 'count'], {}), '(min_sample, max_sample, count)\n', (1061, 1092), True, 'import numpy as np\n'), ((1228, 1258), 'solvers.SampleGraphSolver', 'SampleGraphSolver', (['(size * size)'], {}), '(size * size)\n', (1245, 1258), False, 'from solvers import GridSolver, SampleGraphSolver\n'), ((2887, 2919), 'env.DrivingEnv', 'DrivingEnv', (['(15)'], {'random_seed': 'seed'}), '(15, random_seed=seed)\n', (2897, 2919), False, 'from env import DrivingEnv\n')]
|
import numpy as np
import math
from scipy.stats import truncnorm
class ElectricMotor:
"""
Base class for all technical electrical motor models.
A motor consists of the ode-state. These are the dynamic quantities of its ODE.
For example:
ODE-State of a DC-shunt motor: `` [i_a, i_e ] ``
* i_a: Anchor circuit current
* i_e: Exciting circuit current
Each electric motor can be parametrized by a dictionary of motor parameters,
the nominal state dictionary and the limit dictionary.
Initialization is given by initializer(dict). Can be constant state value
or random value in given interval.
dict should be like:
{ 'states'(dict): with state names and initital values
'interval'(array like): boundaries for each state
(only for random init), shape(num states, 2)
'random_init'(str): 'uniform' or 'normal'
'random_params(tuple): mue(float), sigma(int)
Example initializer(dict) for constant initialization:
{ 'states': {'omega': 16.0}}
Example initializer(dict) for random initialization:
{ 'random_init': 'normal'}
"""
#: Parameter indicating if the class is implementing the optional jacobian function
HAS_JACOBIAN = False
#: CURRENTS_IDX(list(int)): Indices for accessing all motor currents.
CURRENTS_IDX = []
#: CURRENTS(list(str)): List of the motor currents names
CURRENTS = []
#: VOLTAGES(list(str)): List of the motor input voltages names
VOLTAGES = []
#: _default_motor_parameter(dict): Default parameter dictionary for the motor
_default_motor_parameter = {}
#: _default_nominal_values(dict(float)): Default nominal motor state array
_default_nominal_values = {}
#: _default_limits(dict(float)): Default motor limits (0 for unbounded limits)
_default_limits = {}
#: _default_initial_state(dict): Default initial motor-state values
#_default_initializer = {}
_default_initializer = {'states': {},
'interval': None,
'random_init': None,
'random_params': None}
#: _default_initial_limits(dict): Default limit for initialization
_default_initial_limits = {}
@property
def nominal_values(self):
"""
Readonly motors nominal values.
Returns:
dict(float): Current nominal values of the motor.
"""
return self._nominal_values
@property
def limits(self):
"""
Readonly motors limit state array. Entries are set to the maximum physical possible values
in case of unspecified limits.
Returns:
dict(float): Limits of the motor.
"""
return self._limits
@property
def motor_parameter(self):
"""
Returns:
dict(float): The motors parameter dictionary
"""
return self._motor_parameter
@property
def initializer(self):
"""
Returns:
dict: Motor initial state and additional initializer parameter
"""
return self._initializer
@property
def initial_limits(self):
"""
Returns:
dict: nominal motor limits for choosing initial values
"""
return self._initial_limits
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
"""
:param motor_parameter: Motor parameter dictionary. Contents specified
for each motor.
:param nominal_values: Nominal values for the motor quantities.
:param limit_values: Limits for the motor quantities.
:param motor_initializer: Initial motor states (currents)
('constant', 'uniform', 'gaussian' sampled from
given interval or out of nominal motor values)
:param initial_limits: limits for of the initial state-value
"""
motor_parameter = motor_parameter or {}
self._motor_parameter = self._default_motor_parameter.copy()
self._motor_parameter.update(motor_parameter)
limit_values = limit_values or {}
self._limits = self._default_limits.copy()
self._limits.update(limit_values)
nominal_values = nominal_values or {}
self._nominal_values = self._default_nominal_values.copy()
self._nominal_values.update(nominal_values)
motor_initializer = motor_initializer or {}
self._initializer = self._default_initializer.copy()
self._initializer.update(motor_initializer)
self._initial_states = {}
if self._initializer['states'] is not None:
self._initial_states.update(self._initializer['states'])
# intialize limits, in general they're not needed to be changed
# during training or episodes
initial_limits = initial_limits or {}
self._initial_limits = self._nominal_values.copy()
self._initial_limits.update(initial_limits)
# preventing wrong user input for the basic case
assert isinstance(self._initializer, dict), 'wrong initializer'
def electrical_ode(self, state, u_in, omega, *_):
"""
Calculation of the derivatives of each motor state variable for the given inputs / The motors ODE-System.
Args:
state(ndarray(float)): The motors state.
u_in(list(float)): The motors input voltages.
omega(float): Angular velocity of the motor
Returns:
ndarray(float): Derivatives of the motors ODE-system for the given inputs.
"""
raise NotImplementedError
def electrical_jacobian(self, state, u_in, omega, *_):
"""
Calculation of the jacobian of each motor ODE for the given inputs / The motors ODE-System.
Overriding this method is optional for each subclass. If it is overridden, the parameter HAS_JACOBIAN must also
be set to True. Otherwise, the jacobian will not be called.
Args:
state(ndarray(float)): The motors state.
u_in(list(float)): The motors input voltages.
omega(float): Angular velocity of the motor
Returns:
Tuple(ndarray, ndarray, ndarray):
[0]: Derivatives of all electrical motor states over all electrical motor states shape:(states x states)
[1]: Derivatives of all electrical motor states over omega shape:(states,)
[2]: Derivative of Torque over all motor states shape:(states,)
"""
pass
def initialize(self,
state_space,
state_positions,
**__):
"""
Initializes given state values. Values can be given as a constant or
sampled random out of a statistical distribution. Initial value is in
range of the nominal values or a given interval. Values are written in
initial_states attribute
Args:
state_space(gym.Box): normalized state space boundaries (given by
physical system)
state_positions(dict): indexes of system states (given by physical
system)
Returns:
"""
# for organization purposes
interval = self._initializer['interval']
random_dist = self._initializer['random_init']
random_params = self._initializer['random_params']
self._initial_states.update(self._default_initializer['states'])
if self._initializer['states'] is not None:
self._initial_states.update(self._initializer['states'])
# different limits for InductionMotor
if any(map(lambda state: state in self._initial_states.keys(),
['psi_ralpha', 'psi_rbeta'])):
nominal_values_ = [self._initial_limits[state]
for state in self._initial_states]
upper_bound = np.asarray(np.abs(nominal_values_), dtype=float)
# state space for Induction Envs based on documentation
# ['i_salpha', 'i_sbeta', 'psi_ralpha', 'psi_rbeta', 'epsilon']
# hardcoded for Inductionmotors currently given in the toolbox
state_space_low = np.array([-1, -1, -1, -1, -1])
lower_bound = upper_bound * state_space_low
else:
if isinstance(self._nominal_values, dict):
nominal_values_ = [self._nominal_values[state]
for state in self._initial_states.keys()]
nominal_values_ = np.asarray(nominal_values_)
else:
nominal_values_ = np.asarray(self._nominal_values)
state_space_idx = [state_positions[state] for state in
self._initial_states.keys()]
upper_bound = np.asarray(nominal_values_, dtype=float)
lower_bound = upper_bound * \
np.asarray(state_space.low, dtype=float)[state_space_idx]
# clip nominal boundaries to user defined
if interval is not None:
lower_bound = np.clip(lower_bound,
a_min=
np.asarray(interval, dtype=float).T[0],
a_max=None)
upper_bound = np.clip(upper_bound,
a_min=None,
a_max=
np.asarray(interval, dtype=float).T[1])
# random initialization for each motor state (current, epsilon)
if random_dist is not None:
if random_dist == 'uniform':
initial_value = (upper_bound - lower_bound) * \
np.random.random_sample(
len(self._initial_states.keys())) + \
lower_bound
# writing initial values in initial_states dict
random_states = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(random_states)
elif random_dist in ['normal', 'gaussian']:
# specific input or middle of interval
mue = random_params[0] or (upper_bound - lower_bound) / 2 + lower_bound
sigma = random_params[1] or 1
a, b = (lower_bound - mue) / sigma, (upper_bound - mue) / sigma
initial_value = truncnorm.rvs(a, b,
loc=mue,
scale=sigma,
size=(len(self._initial_states.keys())))
# writing initial values in initial_states dict
random_states = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(random_states)
else:
# todo implement other distribution
raise NotImplementedError
# constant initialization for each motor state (current, epsilon)
elif self._initial_states is not None:
initial_value = np.atleast_1d(list(self._initial_states.values()))
# check init_value meets interval boundaries
if ((lower_bound <= initial_value).all()
and (initial_value <= upper_bound).all()):
initial_states_ = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(initial_states_)
else:
raise Exception('Initialization value has to be within nominal boundaries')
else:
raise Exception('No matching Initialization Case')
def reset(self,
state_space,
state_positions,
**__):
"""
Reset the motors state to a new initial state. (Default 0)
Args:
state_space(gym.Box): normalized state space boundaries
state_positions(dict): indexes of system states
Returns:
numpy.ndarray(float): The initial motor states.
"""
# check for valid initializer
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS))
def i_in(self, state):
"""
Args:
state(ndarray(float)): ODE state of the motor
Returns:
list(float): List of all currents flowing into the motor.
"""
raise NotImplementedError
def _update_limits(self, limits_d={}, nominal_d={}):
"""Replace missing limits and nominal values with physical maximums.
Args:
limits_d(dict): Mapping: quantitity to its limit if not specified
"""
# omega is replaced the same way for all motor types
limits_d.update(dict(omega=self._default_limits['omega']))
for qty, lim in limits_d.items():
if self._limits.get(qty, 0) == 0:
self._limits[qty] = lim
for entry in self._limits.keys():
if self._nominal_values.get(entry, 0) == 0:
self._nominal_values[entry] = nominal_d.get(entry, None) or \
self._limits[entry]
def _update_initial_limits(self, nominal_new={}, **kwargs):
"""
Complete initial states with further state limits
Args:
nominal_new(dict): new/further state limits
"""
self._initial_limits.update(nominal_new)
class DcMotor(ElectricMotor):
"""
The DcMotor and its subclasses implement the technical system of a dc motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 0.78 Armature circuit resistance
r_e Ohm 25 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.2 Exciting circuit inductance
l_e_prime H 0.0094 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_a A Armature circuit current
i_e A Exciting circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_a V Armature circuit voltage
u_e v Exciting circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i_a Armature current
i_e Exciting current
omega Angular Velocity
torque Motor generated torque
u_a Armature Voltage
u_e Exciting Voltage
======== ===========================================================
"""
# Indices for array accesses
I_A_IDX = 0
I_E_IDX = 1
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_a', 'i_e']
VOLTAGES = ['u_a', 'u_e']
_default_motor_parameter = {
'r_a': 0.78, 'r_e': 25, 'l_a': 6.3e-3, 'l_e': 1.2, 'l_e_prime': 0.0094,
'j_rotor': 0.017,
}
_default_nominal_values = {'omega': 368, 'torque': 0.0, 'i_a': 50,
'i_e': 1.2, 'u': 420}
_default_limits = {'omega': 500, 'torque': 0.0, 'i_a': 75, 'i_e': 2,
'u': 420}
_default_initializer = {'states': {'i_a': 0.0, 'i_e': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **__):
# Docstring of superclass
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
#: Matrix that contains the constant parameters of the systems equation for faster computation
self._model_constants = None
self._update_model()
self._update_limits()
def _update_model(self):
"""
Update the motors model parameters with the motor parameters.
Called internally when the motor parameters are changed or the motor is initialized.
"""
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'], 0, -mp['l_e_prime'], 1, 0],
[0, -mp['r_e'], 0, 0, 1]
])
self._model_constants[self.I_A_IDX] = self._model_constants[
self.I_A_IDX] / mp['l_a']
self._model_constants[self.I_E_IDX] = self._model_constants[
self.I_E_IDX] / mp['l_e']
def torque(self, currents):
# Docstring of superclass
return self._motor_parameter['l_e_prime'] * currents[self.I_A_IDX] * \
currents[self.I_E_IDX]
def i_in(self, currents):
# Docstring of superclass
return list(currents)
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(self._model_constants, np.array([
state[self.I_A_IDX],
state[self.I_E_IDX],
omega * state[self.I_E_IDX],
u_in[0],
u_in[1],
]))
def get_state_space(self, input_currents, input_voltages):
"""
Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high".
Args:
input_currents: Tuple of the two converters possible output currents.
input_voltages: Tuple of the two converters possible output voltages.
Returns:
tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state.
"""
a_converter = 0
e_converter = 1
low = {
'omega': -1 if input_voltages.low[a_converter] == -1
or input_voltages.low[e_converter] == -1 else 0,
'torque': -1 if input_currents.low[a_converter] == -1
or input_currents.low[e_converter] == -1 else 0,
'i_a': -1 if input_currents.low[a_converter] == -1 else 0,
'i_e': -1 if input_currents.low[e_converter] == -1 else 0,
'u_a': -1 if input_voltages.low[a_converter] == -1 else 0,
'u_e': -1 if input_voltages.low[e_converter] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i_a': 1,
'i_e': 1,
'u_a': 1,
'u_e': 1
}
return low, high
def _update_limits(self, limits_d={}):
# Docstring of superclass
# torque is replaced the same way for all DC motors
limits_d.update(dict(torque=self.torque([self._limits[state] for state
in self.CURRENTS])))
super()._update_limits(limits_d)
class DcShuntMotor(DcMotor):
"""
The DcShuntMotor is a DC motor with parallel armature and exciting circuit connected to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 0.78 Armature circuit resistance
r_e Ohm 25 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.2 Exciting circuit inductance
l_e_prime H 0.0094 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_a A Armature circuit current
i_e A Exciting circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Voltage applied to both circuits
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i_a Armature current
i_e Exciting current
omega Angular Velocity
torque Motor generated torque
u Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
VOLTAGES = ['u']
_default_nominal_values = {'omega': 368, 'torque': 0.0, 'i_a': 50,
'i_e': 1.2, 'u': 420}
_default_limits = {'omega': 500, 'torque': 0.0, 'i_a': 75, 'i_e': 2,
'u': 420}
_default_initializer = {'states': {'i_a': 0.0, 'i_e': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def i_in(self, state):
# Docstring of superclass
return [state[self.I_A_IDX] + state[self.I_E_IDX]]
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return super().electrical_ode(state, (u_in[0], u_in[0]), omega)
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega],
[0, -mp['r_e'] / mp['l_e']]
]),
np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]),
np.array([mp['l_e_prime'] * state[self.I_E_IDX],
mp['l_e_prime'] * state[self.I_A_IDX]])
)
def get_state_space(self, input_currents, input_voltages):
"""
Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high".
Args:
input_currents: The converters possible output currents.
input_voltages: The converters possible output voltages.
Returns:
tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state.
"""
lower_limit = 0
low = {
'omega': 0,
'torque': -1 if input_currents.low[0] == -1 else 0,
'i_a': -1 if input_currents.low[0] == -1 else 0,
'i_e': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i_a': 1,
'i_e': 1,
'u': 1,
}
return low, high
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limit_agenda = \
{'u': self._default_limits['u'],
'i_a': self._limits.get('i', None) or
self._limits['u'] / r_a,
'i_e': self._limits.get('i', None) or
self._limits['u'] / self.motor_parameter['r_e'],
}
super()._update_limits(limit_agenda)
class DcSeriesMotor(DcMotor):
"""
The DcSeriesMotor is a DcMotor with an armature and exciting circuit connected in series to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 2.78 Armature circuit resistance
r_e Ohm 1.0 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.6e-3 Exciting circuit inductance
l_e_prime H 0.05 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
_default_motor_parameter = {
'r_a': 2.78, 'r_e': 1.0, 'l_a': 6.3e-3, 'l_e': 1.6e-3,
'l_e_prime': 0.05, 'j_rotor': 0.017,
}
_default_nominal_values = dict(omega=80, torque=0.0, i=50, u=420)
_default_limits = dict(omega=100, torque=0.0, i=100, u=420)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'] - mp['r_e'], -mp['l_e_prime'], 1]
])
self._model_constants[self.I_IDX] = self._model_constants[
self.I_IDX] / (
mp['l_a'] + mp['l_e'])
def torque(self, currents):
# Docstring of superclass
return super().torque([currents[self.I_IDX], currents[self.I_IDX]])
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(
self._model_constants,
np.array([
state[self.I_IDX],
omega * state[self.I_IDX],
u_in[0]
])
)
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / (r_a + self._motor_parameter['r_e']),
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': 0,
'torque': 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-(mp['r_a'] + mp['r_e'] + mp['l_e_prime'] * omega) / (
mp['l_a'] + mp['l_e'])]]),
np.array([-mp['l_e_prime'] * state[self.I_IDX] / (
mp['l_a'] + mp['l_e'])]),
np.array([2 * mp['l_e_prime'] * state[self.I_IDX]])
)
class DcPermanentlyExcitedMotor(DcMotor):
"""
The DcPermanentlyExcitedMotor is a DcMotor with a Permanent Magnet instead of the excitation circuit.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 25.0 Armature circuit resistance
l_a H 3.438e-2 Armature circuit inductance
psi_e Wb 18 Magnetic Flux of the permanent magnet
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
HAS_JACOBIAN = True
_default_motor_parameter = {
'r_a': 25.0, 'l_a': 3.438e-2, 'psi_e': 18, 'j_rotor': 0.017
}
_default_nominal_values = dict(omega=22, torque=0.0, i=16, u=400)
_default_limits = dict(omega=50, torque=0.0, i=25, u=400)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
# placeholder for omega, currents and u_in
_ode_placeholder = np.zeros(2 + len(CURRENTS_IDX), dtype=np.float64)
def torque(self, state):
# Docstring of superclass
return self._motor_parameter['psi_e'] * state[self.I_IDX]
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['psi_e'], -mp['r_a'], 1.0]
])
self._model_constants[self.I_IDX] /= mp['l_a']
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
self._ode_placeholder[:] = [omega] + np.atleast_1d(
state[self.I_IDX]).tolist() \
+ [u_in[0]]
return np.matmul(self._model_constants, self._ode_placeholder)
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-mp['r_a'] / mp['l_a']]]),
np.array([-mp['psi_e'] / mp['l_a']]),
np.array([mp['psi_e']])
)
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / r_a,
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': -1 if input_voltages.low[0] == -1 else 0,
'torque': -1 if input_currents.low[0] == -1 else 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
class DcExternallyExcitedMotor(DcMotor):
# Equals DC Base Motor
HAS_JACOBIAN = True
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega],
[0, -mp['r_e'] / mp['l_e']]
]),
np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]),
np.array([mp['l_e_prime'] * state[self.I_E_IDX],
mp['l_e_prime'] * state[self.I_A_IDX]])
)
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limit_agenda = \
{'u_a': self._default_limits['u'],
'u_e': self._default_limits['u'],
'i_a': self._limits.get('i', None) or
self._limits['u'] / r_a,
'i_e': self._limits.get('i', None) or
self._limits['u'] / self.motor_parameter['r_e'],
}
super()._update_limits(limit_agenda)
class ThreePhaseMotor(ElectricMotor):
"""
The ThreePhaseMotor and its subclasses implement the technical system of Three Phase Motors.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
"""
# transformation matrix from abc to alpha-beta representation
_t23 = 2 / 3 * np.array([
[1, -0.5, -0.5],
[0, 0.5 * np.sqrt(3), -0.5 * np.sqrt(3)]
])
# transformation matrix from alpha-beta to abc representation
_t32 = np.array([
[1, 0],
[-0.5, 0.5 * np.sqrt(3)],
[-0.5, -0.5 * np.sqrt(3)]
])
@staticmethod
def t_23(quantities):
"""
Transformation from abc representation to alpha-beta representation
Args:
quantities: The properties in the abc representation like ''[u_a, u_b, u_c]''
Returns:
The converted quantities in the alpha-beta representation like ''[u_alpha, u_beta]''
"""
return np.matmul(ThreePhaseMotor._t23, quantities)
@staticmethod
def t_32(quantities):
"""
Transformation from alpha-beta representation to abc representation
Args:
quantities: The properties in the alpha-beta representation like ``[u_alpha, u_beta]``
Returns:
The converted quantities in the abc representation like ``[u_a, u_b, u_c]``
"""
return np.matmul(ThreePhaseMotor._t32, quantities)
@staticmethod
def q(quantities, epsilon):
"""
Transformation of the dq-representation into alpha-beta using the electrical angle
Args:
quantities: Array of two quantities in dq-representation. Example [i_d, i_q]
epsilon: Current electrical angle of the motor
Returns:
Array of the two quantities converted to alpha-beta-representation. Example [u_alpha, u_beta]
"""
cos = math.cos(epsilon)
sin = math.sin(epsilon)
return cos * quantities[0] - sin * quantities[1], sin * quantities[
0] + cos * quantities[1]
@staticmethod
def q_inv(quantities, epsilon):
"""
Transformation of the alpha-beta-representation into dq using the electrical angle
Args:
quantities: Array of two quantities in alpha-beta-representation. Example [u_alpha, u_beta]
epsilon: Current electrical angle of the motor
Returns:
Array of the two quantities converted to dq-representation. Example [u_d, u_q]
Note:
The transformation from alpha-beta to dq is just its inverse conversion with negated epsilon.
So this method calls q(quantities, -epsilon).
"""
return SynchronousMotor.q(quantities, -epsilon)
def q_me(self, quantities, epsilon):
"""
Transformation of the dq-representation into alpha-beta using the mechanical angle
Args:
quantities: Array of two quantities in dq-representation. Example [i_d, i_q]
epsilon: Current mechanical angle of the motor
Returns:
Array of the two quantities converted to alpha-beta-representation. Example [u_alpha, u_beta]
"""
return self.q(quantities, epsilon * self._motor_parameter['p'])
def q_inv_me(self, quantities, epsilon):
"""
Transformation of the alpha-beta-representation into dq using the mechanical angle
Args:
quantities: Array of two quantities in alpha-beta-representation. Example [u_alpha, u_beta]
epsilon: Current mechanical angle of the motor
Returns:
Array of the two quantities converted to dq-representation. Example [u_d, u_q]
Note:
The transformation from alpha-beta to dq is just its inverse conversion with negated epsilon.
So this method calls q(quantities, -epsilon).
"""
return self.q_me(quantities, -epsilon)
def _torque_limit(self):
"""
Returns:
Maximal possible torque for the given limits in self._limits
"""
raise NotImplementedError()
def _update_limits(self, limits_d={}, nominal_d={}):
# Docstring of superclass
super()._update_limits(limits_d, nominal_d)
super()._update_limits(dict(torque=self._torque_limit()))
def _update_initial_limits(self, nominal_new={}, **kwargs):
# Docstring of superclass
super()._update_initial_limits(self._nominal_values)
super()._update_initial_limits(nominal_new)
class SynchronousMotor(ThreePhaseMotor):
"""
The SynchronousMotor and its subclasses implement the technical system of a three phase synchronous motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
psi_p Wb 0.0094 Effective excitation flux (PMSM only)
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd A Direct axis voltage
u_sq A Quadrature axis voltage
u_a A Voltage through branch a
u_b A Voltage through branch b
u_c A Voltage through branch c
u_alpha A Voltage in alpha axis
u_beta A Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SD_IDX = 0
I_SQ_IDX = 1
EPSILON_IDX = 2
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_sd', 'i_sq']
VOLTAGES = ['u_sd', 'u_sq']
_model_constants = None
_initializer = None
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **kwargs):
# Docstring of superclass
nominal_values = nominal_values or {}
limit_values = limit_values or {}
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
self._update_model()
self._update_limits()
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def reset(self, state_space,
state_positions,
**__):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + 1)
def torque(self, state):
# Docstring of superclass
raise NotImplementedError
def _update_model(self):
"""
Set motor parameters into a matrix for faster computation
"""
raise NotImplementedError
def electrical_ode(self, state, u_dq, omega, *_):
"""
The differential equation of the Synchronous Motor.
Args:
state: The current state of the motor. [i_sd, i_sq, epsilon]
omega: The mechanical load
u_qd: The input voltages [u_sd, u_sq]
Returns:
The derivatives of the state vector d/dt([i_sd, i_sq, epsilon])
"""
return np.matmul(self._model_constants, np.array([
omega,
state[self.I_SD_IDX],
state[self.I_SQ_IDX],
u_dq[0],
u_dq[1],
omega * state[self.I_SD_IDX],
omega * state[self.I_SQ_IDX],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
# def initialize(self,
# state_space,
# state_positions,
# **__):
# super().initialize(state_space, state_positions)
class SynchronousReluctanceMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/AMC.2008.4516099 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {'p': 4,
'l_d': 10.1e-3,
'l_q': 4.1e-3,
'j_rotor': 0.8e-3,
'r_s': 0.57
}
_default_nominal_values = {'i': 10, 'torque': 0, 'omega': 3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_limits = {'i': 13, 'torque': 0, 'omega': 4.3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_sd, i_sq, u_sd, u_sq, omega * i_sd, omega * i_sq
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[ 0, 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[mp['p'], 0, 0, 0, 0, 0, 0]
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
return self.torque([self._limits['i_sd'] / np.sqrt(2), self._limits['i_sq'] / np.sqrt(2), 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (
(mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * \
currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * mp['p'] * omega, 0],
[-mp['l_d'] / mp['l_q'] * mp['p'] * omega, -mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX],
mp['p']
]),
np.array([
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX],
0
])
)
class PermanentMagnetSynchronousMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
epsilon Electrical rotational angle
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/TPEL.2020.3006779 (<NAME>, <NAME>, <NAME>, <NAME>)
#### and DOI: 10.1109/IEMDC.2019.8785122 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 3,
'l_d': 0.37e-3,
'l_q': 1.2e-3,
'j_rotor': 0.3883,
'r_s': 18e-3,
'psi_p': 66e-3,
}
HAS_JACOBIAN = True
_default_limits = dict(omega=12e3 * np.pi / 30, torque=0.0, i=260, epsilon=math.pi, u=300)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=240, epsilon=math.pi, u=300)
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_d, i_q, u_d, u_q, omega * i_d, omega * i_q
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[-mp['psi_p'] * mp['p'], 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[ mp['p'], 0, 0, 0, 0, 0, 0],
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
if mp['l_d'] == mp['l_q']:
return self.torque([0, self._limits['i_sq'], 0])
else:
i_n = self.nominal_values['i']
_p = mp['psi_p'] / (2 * (mp['l_d'] - mp['l_q']))
_q = - i_n ** 2 / 2
i_d_opt = - _p / 2 - np.sqrt( (_p / 2) ** 2 - _q)
i_q_opt = np.sqrt(i_n ** 2 - i_d_opt ** 2)
return self.torque([i_d_opt, i_q_opt, 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
return (
np.array([ # dx'/dx
[-mp['r_s'] / mp['l_d'], mp['l_q']/mp['l_d'] * omega * mp['p'], 0],
[-mp['l_d'] / mp['l_q'] * omega * mp['p'], - mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([ # dx'/dw
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX] - mp['p'] * mp['psi_p'] / mp['l_q'],
mp['p']
]),
np.array([ # dT/dx
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX]),
0
])
)
class InductionMotor(ThreePhaseMotor):
"""
The InductionMotor and its subclasses implement the technical system of a three phase induction motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Voltage through branch a
u_sb V Voltage through branch b
u_sc V Voltage through branch c
u_salpha V Voltage in alpha axis
u_sbeta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SALPHA_IDX = 0
I_SBETA_IDX = 1
PSI_RALPHA_IDX = 2
PSI_RBETA_IDX = 3
EPSILON_IDX = 4
CURRENTS_IDX = [0, 1]
FLUX_IDX = [2, 3]
CURRENTS = ['i_salpha', 'i_sbeta']
FLUXES = ['psi_ralpha', 'psi_rbeta']
STATOR_VOLTAGES = ['u_salpha', 'u_sbeta']
IO_VOLTAGES = ['u_sa', 'u_sb', 'u_sc', 'u_salpha', 'u_sbeta', 'u_sd',
'u_sq']
IO_CURRENTS = ['i_sa', 'i_sb', 'i_sc', 'i_salpha', 'i_sbeta', 'i_sd',
'i_sq']
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_model_constants = None
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
_initializer = None
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
# Docstring of superclass
# convert placeholder i and u to actual IO quantities
_nominal_values = self._default_nominal_values.copy()
_nominal_values.update({u: _nominal_values['u'] for u in self.IO_VOLTAGES})
_nominal_values.update({i: _nominal_values['i'] for i in self.IO_CURRENTS})
del _nominal_values['u'], _nominal_values['i']
_nominal_values.update(nominal_values or {})
# same for limits
_limit_values = self._default_limits.copy()
_limit_values.update({u: _limit_values['u'] for u in self.IO_VOLTAGES})
_limit_values.update({i: _limit_values['i'] for i in self.IO_CURRENTS})
del _limit_values['u'], _limit_values['i']
_limit_values.update(limit_values or {})
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer, initial_limits)
self._update_model()
self._update_limits(_limit_values, _nominal_values)
def reset(self,
state_space,
state_positions,
omega=None):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self._update_initial_limits(omega=omega)
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + len(self.FLUXES) + 1)
def electrical_ode(self, state, u_sr_alphabeta, omega, *args):
"""
The differential equation of the Induction Motor.
Args:
state: The momentary state of the motor. [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon]
omega: The mechanical load
u_sr_alphabeta: The input voltages [u_salpha, u_sbeta, u_ralpha, u_rbeta]
Returns:
The derivatives of the state vector d/dt( [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon])
"""
return np.matmul(self._model_constants, np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
omega,
state[self.I_SALPHA_IDX],
state[self.I_SBETA_IDX],
state[self.PSI_RALPHA_IDX],
state[self.PSI_RBETA_IDX],
omega * state[self.PSI_RALPHA_IDX],
omega * state[self.PSI_RBETA_IDX],
u_sr_alphabeta[0, 0],
u_sr_alphabeta[0, 1],
u_sr_alphabeta[1, 0],
u_sr_alphabeta[1, 1],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m'] ** 2/(mp['l_m']+mp['l_sigr']) * self._limits['i_sd'] * self._limits['i_sq'] / 2
def torque(self, states):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m']/(mp['l_m'] + mp['l_sigr']) * (states[self.PSI_RALPHA_IDX] * states[self.I_SBETA_IDX] - states[self.PSI_RBETA_IDX] * states[self.I_SALPHA_IDX])
def _flux_limit(self, omega=0, eps_mag=0, u_q_max=0.0, u_rq_max=0.0):
"""
Calculate Flux limits for given current and magnetic-field angle
Args:
omega(float): speed given by mechanical load
eps_mag(float): magnetic field angle
u_q_max(float): maximal strator voltage in q-system
u_rq_max(float): maximal rotor voltage in q-system
returns:
maximal flux values(list) in alpha-beta-system
"""
mp = self.motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
l_mr = mp['l_m'] / l_r
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
# limiting flux for a low omega
if omega == 0:
psi_d_max = mp['l_m'] * self._nominal_values['i_sd']
else:
i_d, i_q = self.q_inv([self._initial_states['i_salpha'],
self._initial_states['i_sbeta']],
eps_mag)
psi_d_max = mp['p'] * omega * sigma * l_s * i_d + \
(mp['r_s'] + mp['r_r'] * l_mr**2) * i_q + \
u_q_max + \
l_mr * u_rq_max
psi_d_max /= - mp['p'] * omega * l_mr
# clipping flux and setting nominal limit
psi_d_max = 0.9 * np.clip(psi_d_max, a_min=0, a_max=np.abs(mp['l_m'] * i_d))
# returning flux in alpha, beta system
return self.q([psi_d_max, 0], eps_mag)
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
l_s = mp['l_m']+mp['l_sigs']
l_r = mp['l_m']+mp['l_sigr']
sigma = (l_s*l_r-mp['l_m']**2) /(l_s*l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
self._model_constants = np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
[0, -1 / tau_sig, 0,mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0, 0,
+mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 1 / (sigma * l_s), 0,
-mp['l_m'] / (sigma * l_r * l_s), 0, ], # i_ralpha_dot
[0, 0, -1 / tau_sig, 0,
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2),
-mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0, 0,
1 / (sigma * l_s), 0, -mp['l_m'] / (sigma * l_r * l_s), ],
# i_rbeta_dot
[0, mp['l_m'] / tau_r, 0, -1 / tau_r, 0, 0, -mp['p'], 0, 0, 1,
0, ], # psi_ralpha_dot
[0, 0, mp['l_m'] / tau_r, 0, -1 / tau_r, mp['p'], 0, 0, 0, 0, 1, ],
# psi_rbeta_dot
[mp['p'], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # epsilon_dot
])
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
return (
np.array([ # dx'/dx
# i_alpha i_beta psi_alpha psi_beta epsilon
[-1 / tau_sig, 0,
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2),
omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0],
[0, - 1 / tau_sig,
- omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s),
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0],
[mp['l_m'] / tau_r, 0, - 1 / tau_r, - omega * mp['p'], 0],
[0, mp['l_m'] / tau_r, omega * mp['p'], - 1 / tau_r, 0],
[0, 0, 0, 0, 0]
]),
np.array([ # dx'/dw
mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[
self.PSI_RBETA_IDX],
- mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[
self.PSI_RALPHA_IDX],
- mp['p'] * state[self.PSI_RBETA_IDX],
mp['p'] * state[self.PSI_RALPHA_IDX],
mp['p']
]),
np.array([ # dT/dx
- state[self.PSI_RBETA_IDX] * 3 / 2 * mp['p'] * mp[
'l_m'] / l_r,
state[self.PSI_RALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
state[self.I_SBETA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
- state[self.I_SALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
0
])
)
class SquirrelCageInductionMotor(InductionMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Stator current through branch a
i_sb A Stator current through branch b
i_sc A Stator current through branch c
i_salpha A Stator current in alpha direction
i_sbeta A Stator current in beta direction
=============== ====== =============================================
=============== ====== =============================================
Rotor flux Unit Description
=============== ====== =============================================
psi_rd Vs Direct axis of the rotor oriented flux
psi_rq Vs Quadrature axis of the rotor oriented flux
psi_ra Vs Rotor oriented flux in branch a
psi_rb Vs Rotor oriented flux in branch b
psi_rc Vs Rotor oriented flux in branch c
psi_ralpha Vs Rotor oriented flux in alpha direction
psi_rbeta Vs Rotor oriented flux in beta direction
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Stator voltage through branch a
u_sb V Stator voltage through branch b
u_sc V Stator voltage through branch c
u_salpha V Stator voltage in alpha axis
u_sbeta V Stator voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def electrical_ode(self, state, u_salphabeta, omega, *args):
"""
The differential equation of the SCIM.
Sets u_ralpha = u_rbeta = 0 before calling the respective super function.
"""
u_ralphabeta = np.zeros_like(u_salphabeta)
u_sr_aphabeta = np.array([u_salphabeta, u_ralphabeta])
return super().electrical_ode(state, u_sr_aphabeta, omega, *args)
def _update_limits(self, limit_values={}, nominal_values={}):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
def _update_initial_limits(self, nominal_new={}, omega=None):
# Docstring of superclass
# draw a sample magnetic field angle from [-pi,pi]
eps_mag = 2 * np.pi * np.random.random_sample() - np.pi
flux_alphabeta_limits = self._flux_limit(omega=omega,
eps_mag=eps_mag,
u_q_max=self._nominal_values['u_sq'])
# using absolute value, because limits should describe upper limit
# after abs-operator, norm of alphabeta flux still equal to
# d-component of flux
flux_alphabeta_limits = np.abs(flux_alphabeta_limits)
flux_nominal_limits = {state: value for state, value in
zip(self.FLUXES, flux_alphabeta_limits)}
flux_nominal_limits.update(nominal_new)
super()._update_initial_limits(flux_nominal_limits)
class DoublyFedInductionMotor(InductionMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 12e-3 Stator resistance
r_r Ohm 21e-3 Rotor resistance
l_m H 13.5e-3 Main inductance
l_sigs H 0.2e-3 Stator-side stray inductance
l_sigr H 0.1e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 1e3 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Rotor flux Unit Description
=============== ====== =============================================
psi_rd Vs Direct axis of the rotor oriented flux
psi_rq Vs Quadrature axis of the rotor oriented flux
psi_ra Vs Rotor oriented flux in branch a
psi_rb Vs Rotor oriented flux in branch b
psi_rc Vs Rotor oriented flux in branch c
psi_ralpha Vs Rotor oriented flux in alpha direction
psi_rbeta Vs Rotor oriented flux in beta direction
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Stator voltage through branch a
u_sb V Stator voltage through branch b
u_sc V Stator voltage through branch c
u_salpha V Stator voltage in alpha axis
u_sbeta V Stator voltage in beta axis
u_ralpha V Rotor voltage in alpha axis
u_rbeta V Rotor voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
u_ralpha Rotor voltage in alpha axis
u_rbeta Rotor voltage in beta axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
ROTOR_VOLTAGES = ['u_ralpha', 'u_rbeta']
ROTOR_CURRENTS = ['i_ralpha', 'i_rbeta']
IO_ROTOR_VOLTAGES = ['u_ra', 'u_rb', 'u_rc', 'u_rd', 'u_rq']
IO_ROTOR_CURRENTS = ['i_ra', 'i_rb', 'i_rc', 'i_rd', 'i_rq']
#### Parameters taken from DOI: 10.1016/j.jestch.2016.01.015 (<NAME>, <NAME>, <NAME>)
_default_motor_parameter = {
'p': 2,
'l_m': 297.5e-3,
'l_sigs': 25.71e-3,
'l_sigr': 25.71e-3,
'j_rotor': 13.695e-3,
'r_s': 4.42,
'r_r': 3.51,
}
_default_limits = dict(omega=1800 * np.pi / 30, torque=0.0, i=9, epsilon=math.pi, u=720)
_default_nominal_values = dict(omega=1650 * np.pi / 30, torque=0.0, i=7.5, epsilon=math.pi, u=720)
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def __init__(self, **kwargs):
self.IO_VOLTAGES += self.IO_ROTOR_VOLTAGES
self.IO_CURRENTS += self.IO_ROTOR_CURRENTS
super().__init__(**kwargs)
def _update_limits(self, limit_values={}, nominal_values={}):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES+self.ROTOR_VOLTAGES,
self.IO_CURRENTS+self.ROTOR_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_r']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_r']
super()._update_limits(limits_agenda, nominal_agenda)
def _update_initial_limits(self, nominal_new={}, omega=None):
# Docstring of superclass
# draw a sample magnetic field angle from [-pi,pi]
eps_mag = 2 * np.pi * np.random.random_sample() - np.pi
flux_alphabeta_limits = self._flux_limit(omega=omega,
eps_mag=eps_mag,
u_q_max=self._nominal_values['u_sq'],
u_rq_max=self._nominal_values['u_rq'])
flux_nominal_limits = {state: value for state, value in
zip(self.FLUXES, flux_alphabeta_limits)}
super()._update_initial_limits(flux_nominal_limits)
|
[
"numpy.abs",
"numpy.sqrt",
"numpy.random.random_sample",
"numpy.asarray",
"math.cos",
"numpy.array",
"numpy.matmul",
"math.sin",
"numpy.zeros_like",
"numpy.atleast_1d"
] |
[((18618, 18695), 'numpy.array', 'np.array', (["[[-mp['r_a'], 0, -mp['l_e_prime'], 1, 0], [0, -mp['r_e'], 0, 0, 1]]"], {}), "([[-mp['r_a'], 0, -mp['l_e_prime'], 1, 0], [0, -mp['r_e'], 0, 0, 1]])\n", (18626, 18695), True, 'import numpy as np\n'), ((29603, 29660), 'numpy.array', 'np.array', (["[[-mp['r_a'] - mp['r_e'], -mp['l_e_prime'], 1]]"], {}), "([[-mp['r_a'] - mp['r_e'], -mp['l_e_prime'], 1]])\n", (29611, 29660), True, 'import numpy as np\n'), ((34832, 34875), 'numpy.array', 'np.array', (["[[-mp['psi_e'], -mp['r_a'], 1.0]]"], {}), "([[-mp['psi_e'], -mp['r_a'], 1.0]])\n", (34840, 34875), True, 'import numpy as np\n'), ((35322, 35377), 'numpy.matmul', 'np.matmul', (['self._model_constants', 'self._ode_placeholder'], {}), '(self._model_constants, self._ode_placeholder)\n', (35331, 35377), True, 'import numpy as np\n'), ((38868, 38911), 'numpy.matmul', 'np.matmul', (['ThreePhaseMotor._t23', 'quantities'], {}), '(ThreePhaseMotor._t23, quantities)\n', (38877, 38911), True, 'import numpy as np\n'), ((39305, 39348), 'numpy.matmul', 'np.matmul', (['ThreePhaseMotor._t32', 'quantities'], {}), '(ThreePhaseMotor._t32, quantities)\n', (39314, 39348), True, 'import numpy as np\n'), ((39830, 39847), 'math.cos', 'math.cos', (['epsilon'], {}), '(epsilon)\n', (39838, 39847), False, 'import math\n'), ((39863, 39880), 'math.sin', 'math.sin', (['epsilon'], {}), '(epsilon)\n', (39871, 39880), False, 'import math\n'), ((55462, 55607), 'numpy.array', 'np.array', (["[[0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']], [0, 0, -mp['r_s'], 0, 1,\n -mp['l_d'] * mp['p'], 0], [mp['p'], 0, 0, 0, 0, 0, 0]]"], {}), "([[0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']], [0, 0, -mp[\n 'r_s'], 0, 1, -mp['l_d'] * mp['p'], 0], [mp['p'], 0, 0, 0, 0, 0, 0]])\n", (55470, 55607), True, 'import numpy as np\n'), ((62464, 62633), 'numpy.array', 'np.array', (["[[0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']], [-mp['psi_p'] * mp['p'],\n 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0], [mp['p'], 0, 0, 0, 0, 0, 0]]"], {}), "([[0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']], [-mp['psi_p'] *\n mp['p'], 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0], [mp['p'], 0, 0,\n 0, 0, 0, 0]])\n", (62472, 62633), True, 'import numpy as np\n'), ((76319, 76895), 'numpy.array', 'np.array', (["[[0, -1 / tau_sig, 0, mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0, \n 0, +mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 1 / (sigma * l_s), 0, -\n mp['l_m'] / (sigma * l_r * l_s), 0], [0, 0, -1 / tau_sig, 0, mp['l_m'] *\n mp['r_r'] / (sigma * l_s * l_r ** 2), -mp['l_m'] * mp['p'] / (sigma *\n l_r * l_s), 0, 0, 1 / (sigma * l_s), 0, -mp['l_m'] / (sigma * l_r * l_s\n )], [0, mp['l_m'] / tau_r, 0, -1 / tau_r, 0, 0, -mp['p'], 0, 0, 1, 0],\n [0, 0, mp['l_m'] / tau_r, 0, -1 / tau_r, mp['p'], 0, 0, 0, 0, 1], [mp[\n 'p'], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]"], {}), "([[0, -1 / tau_sig, 0, mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r **\n 2), 0, 0, +mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 1 / (sigma * l_s),\n 0, -mp['l_m'] / (sigma * l_r * l_s), 0], [0, 0, -1 / tau_sig, 0, mp[\n 'l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), -mp['l_m'] * mp['p'] / (\n sigma * l_r * l_s), 0, 0, 1 / (sigma * l_s), 0, -mp['l_m'] / (sigma *\n l_r * l_s)], [0, mp['l_m'] / tau_r, 0, -1 / tau_r, 0, 0, -mp['p'], 0, 0,\n 1, 0], [0, 0, mp['l_m'] / tau_r, 0, -1 / tau_r, mp['p'], 0, 0, 0, 0, 1],\n [mp['p'], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])\n", (76327, 76895), True, 'import numpy as np\n'), ((85548, 85575), 'numpy.zeros_like', 'np.zeros_like', (['u_salphabeta'], {}), '(u_salphabeta)\n', (85561, 85575), True, 'import numpy as np\n'), ((85601, 85639), 'numpy.array', 'np.array', (['[u_salphabeta, u_ralphabeta]'], {}), '([u_salphabeta, u_ralphabeta])\n', (85609, 85639), True, 'import numpy as np\n'), ((87168, 87197), 'numpy.abs', 'np.abs', (['flux_alphabeta_limits'], {}), '(flux_alphabeta_limits)\n', (87174, 87197), True, 'import numpy as np\n'), ((8728, 8758), 'numpy.array', 'np.array', (['[-1, -1, -1, -1, -1]'], {}), '([-1, -1, -1, -1, -1])\n', (8736, 8758), True, 'import numpy as np\n'), ((9333, 9373), 'numpy.asarray', 'np.asarray', (['nominal_values_'], {'dtype': 'float'}), '(nominal_values_, dtype=float)\n', (9343, 9373), True, 'import numpy as np\n'), ((19456, 19560), 'numpy.array', 'np.array', (['[state[self.I_A_IDX], state[self.I_E_IDX], omega * state[self.I_E_IDX],\n u_in[0], u_in[1]]'], {}), '([state[self.I_A_IDX], state[self.I_E_IDX], omega * state[self.\n I_E_IDX], u_in[0], u_in[1]])\n', (19464, 19560), True, 'import numpy as np\n'), ((24653, 24761), 'numpy.array', 'np.array', (["[[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega], [0, -mp[\n 'r_e'] / mp['l_e']]]"], {}), "([[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega], [\n 0, -mp['r_e'] / mp['l_e']]])\n", (24661, 24761), True, 'import numpy as np\n'), ((24820, 24885), 'numpy.array', 'np.array', (["[-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]"], {}), "([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0])\n", (24828, 24885), True, 'import numpy as np\n'), ((24900, 24993), 'numpy.array', 'np.array', (["[mp['l_e_prime'] * state[self.I_E_IDX], mp['l_e_prime'] * state[self.I_A_IDX]]"], {}), "([mp['l_e_prime'] * state[self.I_E_IDX], mp['l_e_prime'] * state[\n self.I_A_IDX]])\n", (24908, 24993), True, 'import numpy as np\n'), ((30209, 30274), 'numpy.array', 'np.array', (['[state[self.I_IDX], omega * state[self.I_IDX], u_in[0]]'], {}), '([state[self.I_IDX], omega * state[self.I_IDX], u_in[0]])\n', (30217, 30274), True, 'import numpy as np\n'), ((31472, 31566), 'numpy.array', 'np.array', (["[[-(mp['r_a'] + mp['r_e'] + mp['l_e_prime'] * omega) / (mp['l_a'] + mp['l_e'])]\n ]"], {}), "([[-(mp['r_a'] + mp['r_e'] + mp['l_e_prime'] * omega) / (mp['l_a'] +\n mp['l_e'])]])\n", (31480, 31566), True, 'import numpy as np\n'), ((31599, 31673), 'numpy.array', 'np.array', (["[-mp['l_e_prime'] * state[self.I_IDX] / (mp['l_a'] + mp['l_e'])]"], {}), "([-mp['l_e_prime'] * state[self.I_IDX] / (mp['l_a'] + mp['l_e'])])\n", (31607, 31673), True, 'import numpy as np\n'), ((31710, 31761), 'numpy.array', 'np.array', (["[2 * mp['l_e_prime'] * state[self.I_IDX]]"], {}), "([2 * mp['l_e_prime'] * state[self.I_IDX]])\n", (31718, 31761), True, 'import numpy as np\n'), ((35507, 35543), 'numpy.array', 'np.array', (["[[-mp['r_a'] / mp['l_a']]]"], {}), "([[-mp['r_a'] / mp['l_a']]])\n", (35515, 35543), True, 'import numpy as np\n'), ((35558, 35594), 'numpy.array', 'np.array', (["[-mp['psi_e'] / mp['l_a']]"], {}), "([-mp['psi_e'] / mp['l_a']])\n", (35566, 35594), True, 'import numpy as np\n'), ((35609, 35632), 'numpy.array', 'np.array', (["[mp['psi_e']]"], {}), "([mp['psi_e']])\n", (35617, 35632), True, 'import numpy as np\n'), ((36804, 36912), 'numpy.array', 'np.array', (["[[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega], [0, -mp[\n 'r_e'] / mp['l_e']]]"], {}), "([[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega], [\n 0, -mp['r_e'] / mp['l_e']]])\n", (36812, 36912), True, 'import numpy as np\n'), ((36971, 37036), 'numpy.array', 'np.array', (["[-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]"], {}), "([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0])\n", (36979, 37036), True, 'import numpy as np\n'), ((37051, 37144), 'numpy.array', 'np.array', (["[mp['l_e_prime'] * state[self.I_E_IDX], mp['l_e_prime'] * state[self.I_A_IDX]]"], {}), "([mp['l_e_prime'] * state[self.I_E_IDX], mp['l_e_prime'] * state[\n self.I_A_IDX]])\n", (37059, 37144), True, 'import numpy as np\n'), ((48965, 49109), 'numpy.array', 'np.array', (['[omega, state[self.I_SD_IDX], state[self.I_SQ_IDX], u_dq[0], u_dq[1], omega *\n state[self.I_SD_IDX], omega * state[self.I_SQ_IDX]]'], {}), '([omega, state[self.I_SD_IDX], state[self.I_SQ_IDX], u_dq[0], u_dq[\n 1], omega * state[self.I_SD_IDX], omega * state[self.I_SQ_IDX]])\n', (48973, 49109), True, 'import numpy as np\n'), ((56653, 56824), 'numpy.array', 'np.array', (["[[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * mp['p'] * omega, 0], [-mp\n ['l_d'] / mp['l_q'] * mp['p'] * omega, -mp['r_s'] / mp['l_q'], 0], [0, \n 0, 0]]"], {}), "([[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * mp['p'] * omega,\n 0], [-mp['l_d'] / mp['l_q'] * mp['p'] * omega, -mp['r_s'] / mp['l_q'], \n 0], [0, 0, 0]])\n", (56661, 56824), True, 'import numpy as np\n'), ((56896, 57032), 'numpy.array', 'np.array', (["[mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX], -mp['p'] * mp[\n 'l_d'] / mp['l_q'] * state[self.I_SD_IDX], mp['p']]"], {}), "([mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX], -mp['p'] *\n mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX], mp['p']])\n", (56904, 57032), True, 'import numpy as np\n'), ((57110, 57256), 'numpy.array', 'np.array', (["[1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX], 1.5 * mp[\n 'p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX], 0]"], {}), "([1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX], \n 1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX], 0])\n", (57118, 57256), True, 'import numpy as np\n'), ((63587, 63619), 'numpy.sqrt', 'np.sqrt', (['(i_n ** 2 - i_d_opt ** 2)'], {}), '(i_n ** 2 - i_d_opt ** 2)\n', (63594, 63619), True, 'import numpy as np\n'), ((64039, 64210), 'numpy.array', 'np.array', (["[[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * omega * mp['p'], 0], [-mp\n ['l_d'] / mp['l_q'] * omega * mp['p'], -mp['r_s'] / mp['l_q'], 0], [0, \n 0, 0]]"], {}), "([[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * omega * mp['p'],\n 0], [-mp['l_d'] / mp['l_q'] * omega * mp['p'], -mp['r_s'] / mp['l_q'], \n 0], [0, 0, 0]])\n", (64047, 64210), True, 'import numpy as np\n'), ((64290, 64466), 'numpy.array', 'np.array', (["[mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX], -mp['p'] * mp[\n 'l_d'] / mp['l_q'] * state[self.I_SD_IDX] - mp['p'] * mp['psi_p'] / mp[\n 'l_q'], mp['p']]"], {}), "([mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX], -mp['p'] *\n mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX] - mp['p'] * mp['psi_p'] /\n mp['l_q'], mp['p']])\n", (64298, 64466), True, 'import numpy as np\n'), ((64549, 64716), 'numpy.array', 'np.array', (["[1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX], 1.5 * mp[\n 'p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX]), 0]"], {}), "([1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX], \n 1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * state[self.\n I_SD_IDX]), 0])\n", (64557, 64716), True, 'import numpy as np\n'), ((73165, 73462), 'numpy.array', 'np.array', (['[omega, state[self.I_SALPHA_IDX], state[self.I_SBETA_IDX], state[self.\n PSI_RALPHA_IDX], state[self.PSI_RBETA_IDX], omega * state[self.\n PSI_RALPHA_IDX], omega * state[self.PSI_RBETA_IDX], u_sr_alphabeta[0, 0\n ], u_sr_alphabeta[0, 1], u_sr_alphabeta[1, 0], u_sr_alphabeta[1, 1]]'], {}), '([omega, state[self.I_SALPHA_IDX], state[self.I_SBETA_IDX], state[\n self.PSI_RALPHA_IDX], state[self.PSI_RBETA_IDX], omega * state[self.\n PSI_RALPHA_IDX], omega * state[self.PSI_RBETA_IDX], u_sr_alphabeta[0, 0\n ], u_sr_alphabeta[0, 1], u_sr_alphabeta[1, 0], u_sr_alphabeta[1, 1]])\n', (73173, 73462), True, 'import numpy as np\n'), ((77841, 78249), 'numpy.array', 'np.array', (["[[-1 / tau_sig, 0, mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), omega *\n mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0], [0, -1 / tau_sig, -omega *\n mp['l_m'] * mp['p'] / (sigma * l_r * l_s), mp['l_m'] * mp['r_r'] / (\n sigma * l_s * l_r ** 2), 0], [mp['l_m'] / tau_r, 0, -1 / tau_r, -omega *\n mp['p'], 0], [0, mp['l_m'] / tau_r, omega * mp['p'], -1 / tau_r, 0], [0,\n 0, 0, 0, 0]]"], {}), "([[-1 / tau_sig, 0, mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2\n ), omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0], [0, -1 /\n tau_sig, -omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s), mp['l_m'] *\n mp['r_r'] / (sigma * l_s * l_r ** 2), 0], [mp['l_m'] / tau_r, 0, -1 /\n tau_r, -omega * mp['p'], 0], [0, mp['l_m'] / tau_r, omega * mp['p'], -1 /\n tau_r, 0], [0, 0, 0, 0, 0]])\n", (77849, 78249), True, 'import numpy as np\n'), ((78583, 78837), 'numpy.array', 'np.array', (["[mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[self.PSI_RBETA_IDX], -mp\n ['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[self.PSI_RALPHA_IDX], -\n mp['p'] * state[self.PSI_RBETA_IDX], mp['p'] * state[self.\n PSI_RALPHA_IDX], mp['p']]"], {}), "([mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[self.\n PSI_RBETA_IDX], -mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[self\n .PSI_RALPHA_IDX], -mp['p'] * state[self.PSI_RBETA_IDX], mp['p'] * state\n [self.PSI_RALPHA_IDX], mp['p']])\n", (78591, 78837), True, 'import numpy as np\n'), ((78993, 79273), 'numpy.array', 'np.array', (["[-state[self.PSI_RBETA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r, state[self\n .PSI_RALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r, state[self.\n I_SBETA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r, -state[self.\n I_SALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r, 0]"], {}), "([-state[self.PSI_RBETA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r, \n state[self.PSI_RALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r, state[\n self.I_SBETA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r, -state[self.\n I_SALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r, 0])\n", (79001, 79273), True, 'import numpy as np\n'), ((8437, 8460), 'numpy.abs', 'np.abs', (['nominal_values_'], {}), '(nominal_values_)\n', (8443, 8460), True, 'import numpy as np\n'), ((9064, 9091), 'numpy.asarray', 'np.asarray', (['nominal_values_'], {}), '(nominal_values_)\n', (9074, 9091), True, 'import numpy as np\n'), ((9146, 9178), 'numpy.asarray', 'np.asarray', (['self._nominal_values'], {}), '(self._nominal_values)\n', (9156, 9178), True, 'import numpy as np\n'), ((63535, 63562), 'numpy.sqrt', 'np.sqrt', (['((_p / 2) ** 2 - _q)'], {}), '((_p / 2) ** 2 - _q)\n', (63542, 63562), True, 'import numpy as np\n'), ((86707, 86732), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (86730, 86732), True, 'import numpy as np\n'), ((95012, 95037), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (95035, 95037), True, 'import numpy as np\n'), ((9444, 9484), 'numpy.asarray', 'np.asarray', (['state_space.low'], {'dtype': 'float'}), '(state_space.low, dtype=float)\n', (9454, 9484), True, 'import numpy as np\n'), ((38419, 38429), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (38426, 38429), True, 'import numpy as np\n'), ((38455, 38465), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (38462, 38465), True, 'import numpy as np\n'), ((56221, 56231), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (56228, 56231), True, 'import numpy as np\n'), ((56256, 56266), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (56263, 56266), True, 'import numpy as np\n'), ((35200, 35232), 'numpy.atleast_1d', 'np.atleast_1d', (['state[self.I_IDX]'], {}), '(state[self.I_IDX])\n', (35213, 35232), True, 'import numpy as np\n'), ((38249, 38259), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (38256, 38259), True, 'import numpy as np\n'), ((38268, 38278), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (38275, 38278), True, 'import numpy as np\n'), ((75793, 75816), 'numpy.abs', 'np.abs', (["(mp['l_m'] * i_d)"], {}), "(mp['l_m'] * i_d)\n", (75799, 75816), True, 'import numpy as np\n'), ((9712, 9745), 'numpy.asarray', 'np.asarray', (['interval'], {'dtype': 'float'}), '(interval, dtype=float)\n', (9722, 9745), True, 'import numpy as np\n'), ((9971, 10004), 'numpy.asarray', 'np.asarray', (['interval'], {'dtype': 'float'}), '(interval, dtype=float)\n', (9981, 10004), True, 'import numpy as np\n')]
|
import os
import sys
sys.path.append('.')
import argparse
import numpy as np
import os.path as osp
from multiprocessing import Process, Pool
from glob import glob
from tqdm import tqdm
import tensorflow as tf
from PIL import Image
from lib.core.config import INSTA_DIR, INSTA_IMG_DIR
def process_single_record(fname, outdir, split):
sess = tf.Session()
#print(fname)
record_name = fname.split('/')[-1]
for vid_idx, serialized_ex in enumerate(tf.python_io.tf_record_iterator(fname)):
#print(vid_idx)
os.makedirs(osp.join(outdir, split, record_name, str(vid_idx)), exist_ok=True)
example = tf.train.Example()
example.ParseFromString(serialized_ex)
N = int(example.features.feature['meta/N'].int64_list.value[0])
images_data = example.features.feature[
'image/encoded'].bytes_list.value
for i in range(N):
image = np.expand_dims(sess.run(tf.image.decode_jpeg(images_data[i], channels=3)), axis=0)
#video.append(image)
image = Image.fromarray(np.squeeze(image, axis=0))
image.save(osp.join(outdir, split, record_name, str(vid_idx), str(i)+".jpg"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--inp_dir', type=str, help='tfrecords file path', default=INSTA_DIR)
parser.add_argument('--n', type=int, help='total num of workers')
parser.add_argument('--i', type=int, help='current index of worker (from 0 to n-1)')
parser.add_argument('--split', type=str, help='train or test')
parser.add_argument('--out_dir', type=str, help='output images path', default=INSTA_IMG_DIR)
args = parser.parse_args()
fpaths = glob(f'{args.inp_dir}/{args.split}/*.tfrecord')
fpaths = sorted(fpaths)
total = len(fpaths)
fpaths = fpaths[args.i*total//args.n : (args.i+1)*total//args.n]
#print(fpaths)
#print(len(fpaths))
os.makedirs(args.out_dir, exist_ok=True)
for idx, fp in enumerate(fpaths):
process_single_record(fp, args.out_dir, args.split)
|
[
"tensorflow.train.Example",
"os.makedirs",
"argparse.ArgumentParser",
"tensorflow.python_io.tf_record_iterator",
"tensorflow.Session",
"numpy.squeeze",
"sys.path.append",
"glob.glob",
"tensorflow.image.decode_jpeg"
] |
[((21, 41), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (36, 41), False, 'import sys\n'), ((348, 360), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (358, 360), True, 'import tensorflow as tf\n'), ((1226, 1251), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1249, 1251), False, 'import argparse\n'), ((1714, 1761), 'glob.glob', 'glob', (['f"""{args.inp_dir}/{args.split}/*.tfrecord"""'], {}), "(f'{args.inp_dir}/{args.split}/*.tfrecord')\n", (1718, 1761), False, 'from glob import glob\n'), ((1937, 1977), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {'exist_ok': '(True)'}), '(args.out_dir, exist_ok=True)\n', (1948, 1977), False, 'import os\n'), ((462, 500), 'tensorflow.python_io.tf_record_iterator', 'tf.python_io.tf_record_iterator', (['fname'], {}), '(fname)\n', (493, 500), True, 'import tensorflow as tf\n'), ((632, 650), 'tensorflow.train.Example', 'tf.train.Example', ([], {}), '()\n', (648, 650), True, 'import tensorflow as tf\n'), ((1067, 1092), 'numpy.squeeze', 'np.squeeze', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1077, 1092), True, 'import numpy as np\n'), ((939, 987), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['images_data[i]'], {'channels': '(3)'}), '(images_data[i], channels=3)\n', (959, 987), True, 'import tensorflow as tf\n')]
|
from pathlib import Path
import sys
path = str(Path(Path(__file__).parent.absolute()).parent.absolute())
sys.path.insert(0, path)
from mnist_utils.util import _x, _y_int
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import accuracy_score, adjusted_rand_score
import numpy as np
from fast_pytorch_kmeans import KMeans
import torch
from tabulate import tabulate
#global vars
kmeans_main = None
cluster_ids_x = None
def classify_clusters(l1, l2):
ref_labels = {}
for i in range(len(np.unique(l1))):
index = np.where(l1 == i,1,0)
ref_labels[i] = np.bincount(l2[index==1]).argmax()
decimal_labels = np.zeros(len(l1))
for i in range(len(l1)):
decimal_labels[i] = ref_labels[l1[i]]
return decimal_labels
def init_clustring_scikit(cluster_count=10):
global kmeans_main
kmeans_main = MiniBatchKMeans(n_clusters=cluster_count, verbose=False)
kmeans_main.fit(_x)
def test_accuracy_scikit():
global kmeans_main
decimal_labels = classify_clusters(kmeans_main.labels_, _y_int)
print("predicted labels:\t", decimal_labels[:16].astype('int'))
print("true labels:\t\t",_y_int[:16])
print(60 * '_')
AP = accuracy_score(decimal_labels,_y_int)
RI = adjusted_rand_score(decimal_labels,_y_int)
print("Accuracy (PURITY):" , AP)
print("Accuracy (RAND INDEX):" , RI)
return AP, RI
def init_clustring_torch(cluster_count=10):
global clusters_from_label, cluster_ids_x
_kmeans = KMeans(n_clusters=cluster_count, mode='euclidean', verbose=1)
x = torch.from_numpy(_x)
cluster_ids_x = _kmeans.fit_predict(x)
def test_accuracy_torch():
global cluster_ids_x
decimal_labels = classify_clusters(cluster_ids_x.cpu().detach().numpy(), _y_int)
print("predicted labels:\t", decimal_labels[:16].astype('int'))
print("true labels:\t\t",_y_int[:16])
print(60 * '_')
AP = accuracy_score(decimal_labels,_y_int)
RI = adjusted_rand_score(decimal_labels,_y_int)
print("Accuracy (PURITY):" , AP)
print("Accuracy (RAND INDEX):" , RI)
return AP, RI
def pipeline(lib="torch", cluster_count_max=300, coefficient=2):
cluster_count = len(np.unique(_y_int))
result = []
if lib == "torch":
while cluster_count <= cluster_count_max:
print(10 * "*" + "TRYING WITH " + str(cluster_count) + 10 * "*")
init_clustring_torch(cluster_count)
AP, RI = test_accuracy_torch()
result.append([cluster_count, AP, RI])
cluster_count *= coefficient
cluster_count = int(cluster_count)
elif lib == "scikit":
while cluster_count <= cluster_count_max:
print(10 * "*" + "TRYING WITH " + str(cluster_count) + 10 * "*")
init_clustring_scikit(cluster_count)
AP, RI = test_accuracy_scikit()
result.append([cluster_count, AP, RI])
cluster_count *= coefficient
cluster_count = int(cluster_count)
else:
print("LIB NOT SUPPORTED")
print(tabulate(result, headers=['K', 'AP', 'RI']))
pipeline(cluster_count_max=200, coefficient=3, lib="scikit")
|
[
"fast_pytorch_kmeans.KMeans",
"tabulate.tabulate",
"sys.path.insert",
"numpy.unique",
"pathlib.Path",
"numpy.where",
"sklearn.cluster.MiniBatchKMeans",
"sklearn.metrics.adjusted_rand_score",
"torch.from_numpy",
"numpy.bincount",
"sklearn.metrics.accuracy_score"
] |
[((105, 129), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (120, 129), False, 'import sys\n'), ((849, 905), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'cluster_count', 'verbose': '(False)'}), '(n_clusters=cluster_count, verbose=False)\n', (864, 905), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((1189, 1227), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['decimal_labels', '_y_int'], {}), '(decimal_labels, _y_int)\n', (1203, 1227), False, 'from sklearn.metrics import accuracy_score, adjusted_rand_score\n'), ((1236, 1279), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['decimal_labels', '_y_int'], {}), '(decimal_labels, _y_int)\n', (1255, 1279), False, 'from sklearn.metrics import accuracy_score, adjusted_rand_score\n'), ((1481, 1542), 'fast_pytorch_kmeans.KMeans', 'KMeans', ([], {'n_clusters': 'cluster_count', 'mode': '"""euclidean"""', 'verbose': '(1)'}), "(n_clusters=cluster_count, mode='euclidean', verbose=1)\n", (1487, 1542), False, 'from fast_pytorch_kmeans import KMeans\n'), ((1551, 1571), 'torch.from_numpy', 'torch.from_numpy', (['_x'], {}), '(_x)\n', (1567, 1571), False, 'import torch\n'), ((1893, 1931), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['decimal_labels', '_y_int'], {}), '(decimal_labels, _y_int)\n', (1907, 1931), False, 'from sklearn.metrics import accuracy_score, adjusted_rand_score\n'), ((1940, 1983), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['decimal_labels', '_y_int'], {}), '(decimal_labels, _y_int)\n', (1959, 1983), False, 'from sklearn.metrics import accuracy_score, adjusted_rand_score\n'), ((541, 564), 'numpy.where', 'np.where', (['(l1 == i)', '(1)', '(0)'], {}), '(l1 == i, 1, 0)\n', (549, 564), True, 'import numpy as np\n'), ((2169, 2186), 'numpy.unique', 'np.unique', (['_y_int'], {}), '(_y_int)\n', (2178, 2186), True, 'import numpy as np\n'), ((3027, 3070), 'tabulate.tabulate', 'tabulate', (['result'], {'headers': "['K', 'AP', 'RI']"}), "(result, headers=['K', 'AP', 'RI'])\n", (3035, 3070), False, 'from tabulate import tabulate\n'), ((508, 521), 'numpy.unique', 'np.unique', (['l1'], {}), '(l1)\n', (517, 521), True, 'import numpy as np\n'), ((587, 614), 'numpy.bincount', 'np.bincount', (['l2[index == 1]'], {}), '(l2[index == 1])\n', (598, 614), True, 'import numpy as np\n'), ((52, 66), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (56, 66), False, 'from pathlib import Path\n')]
|
import numpy as np
from JacobiPolynomials import *
import math
# 1D - LINE
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi1D(C,x):
p = np.zeros(C+2)
for i in range(0,C+2):
p[i] = JacobiPolynomials(i,x,0,0)[-1]*np.sqrt((2.*i+1.)/2.)
return p
# 2D - TRI
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi2D(C,x):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to C+1 at the point x=(r,s) in [-1,1]^2 (i.e. on the reference quad)
"""
N = int( (C+2.)*(C+3.)/2. )
p = np.zeros(N)
r = x[0]; s = x[1]
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0 # counter for the polynomials order
# Loop on degree
for nDeg in range(0,C+2):
# Loop by increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1.; q_i = 1.
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; q_i = q_i*(1.-s)/2.
# Value for j
j = nDeg-i
if j==0:
p_j = 1.
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]
# factor = np.sqrt( (2.*i+1.)*(i+j+1.)/2. )
factor = math.sqrt( (2.*i+1.)*(i+j+1.)/2. )
p[ncount] = ( p_i*q_i*p_j )*factor
ncount += 1
return p
def NormalisedJacobiTri(C,x):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to n at the point x=(xi,eta) in the reference triangle
"""
xi = x[0]; eta = x[1]
if eta==1:
r = -1.; s=1.;
else:
r = 2.*(1+xi)/(1.-eta)-1.
s = eta
return NormalisedJacobi2D(C,np.array([r,s]))
def GradNormalisedJacobiTri(C,x,EvalOpt=0):
""" Computes the orthogonal base of 2D polynomials of degree less
or equal to n at the point x=(xi,eta) in the reference triangle
"""
N = int((C+2.)*(C+3.)/2.)
p = np.zeros(N);
dp_dxi = np.zeros(N)
dp_deta = np.zeros(N)
r = x[0]; s = x[1]
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if EvalOpt==1:
if s==1:
s=0.99999999999999
xi = (1.+r)*(1.-s)/2.-1
eta = s
dr_dxi = 2./(1.-eta)
dr_deta = 2.*(1.+xi)/(1.-eta)**2
# Derivative of s is not needed because s=eta
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1; q_i = 1; dp_i = 0; dq_i = 0
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; dp_i = JacobiPolynomials(i-1,r,1.,1.)[-1]*(i+1.)/2.
q_i = q_i*(1.-s)/2.; dq_i = 1.*q_i*(-i)/(1-s)
# Value for j
j = nDeg-i
if j==0:
p_j = 1; dp_j = 0
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; dp_j = JacobiPolynomials(j-1,s,2.*i+2.,1.)[-1]*(j+2.*i+2.)/2.
factor = math.sqrt( (2.*i+1.)*(i+j+1.)/2. )
# Normalized polynomial
p[ncount] = ( p_i*q_i*p_j )*factor
# Derivatives with respect to (r,s)
dp_dr = ( (dp_i)*q_i*p_j )*factor
dp_ds = ( p_i*(dq_i*p_j+q_i*dp_j) )*factor
# Derivatives with respect to (xi,eta)
dp_dxi[ncount] = dp_dr*dr_dxi
dp_deta[ncount] = dp_dr*dr_deta + dp_ds
ncount += 1
return p,dp_dxi,dp_deta
# 3D - TET
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
#------------------------------------------------------------------------------------------------------------------#
def NormalisedJacobi3D(C,x):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
N = int((C+2)*(C+3)*(C+4)/6.)
p = np.zeros(N)
r = x[0]; s = x[1]; t = x[2]
# Ordering: 1st incresing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1; q_i = 1
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; q_i = q_i*(1.-s)/2.
# Loop increasing j
for j in range(0,nDeg-i+1):
if j==0:
p_j = 1; q_j = ((1.-t)/2.)**i
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; q_j = q_j*(1.-t)/2.
# Value for k
k = nDeg-(i+j)
if k==0:
p_k = 1.
else:
p_k = JacobiPolynomials(k,t,2.*(i+j)+2.,0.)[-1]
factor = math.sqrt( (2.*i+1.)*(i+j+1.)*(2.*(i+j+k)+3.)/4. )
p[ncount] = ( p_i*q_i*p_j*q_j*p_k )*factor
ncount += 1
return p
def NormalisedJacobiTet(C,x):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
xi = x[0]; eta = x[1]; zeta = x[2]
if (eta+zeta)==0:
r = -1; s=1
elif zeta==1:
r = -1; s=1 # or s=-1 (check that nothing changes)
else:
r = -2.*(1+xi)/(eta+zeta)-1.;
s = 2.*(1+eta)/(1-zeta)-1.;
t = zeta
return NormalisedJacobi3D(C,[r,s,t])
# return NormalisedJacobi3D_Native(C,[r,s,t])
def GradNormalisedJacobiTet(C,x,EvalOpt=0):
"""Computes the orthogonal base of 3D polynomials of degree less
or equal to n at the point x=(r,s,t) in [-1,1]^3
"""
N = int((C+2)*(C+3)*(C+4)/6.)
p = np.zeros(N)
dp_dxi = np.zeros(N)
dp_deta = np.zeros(N)
dp_dzeta = np.zeros(N)
r = x[0]; s = x[1]; t = x[2]
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if EvalOpt==1:
if t==1.:
t=0.999999999999
if np.isclose(s,1.):
s=0.999999999999
if np.isclose(s,1.):
s=0.99999999999999
eta = (1./2.)*(s-s*t-1.-t)
xi = -(1./2.)*(r+1)*(eta+t)-1.
zeta = 1.0*t
# THIS MAY RUIN THE CONVERGENCE, BUT FOR POST PROCESSING ITS FINE
if eta == 0. and zeta == 0.:
eta = 1.0e-14
zeta = 1e-14
eta_zeta = eta+zeta
if np.isclose(eta_zeta,0.):
eta_zeta = 0.000000001
dr_dxi = -2./eta_zeta
dr_deta = 2.*(1.+xi)/eta_zeta**2
dr_dzeta = dr_deta
ds_deta = 2./(1.-zeta)
ds_dzeta = 2.*(1.+eta)/(1.-zeta)**2
# Derivative of t is not needed because t=zeta
#--------------------------------------------------------
# if np.allclose(eta+zeta,0):
# dr_dxi = -2./(0.001)**2
# dr_deta = 2.*(1.+xi)/(0.001)**2
# else:
# dr_dxi = -2./(eta+zeta)
# dr_deta = 2.*(1.+xi)/(eta+zeta)**2
# dr_dzeta = dr_deta
# if np.allclose(eta+zeta,0):
# ds_deta = 2./(0.001)
# ds_dzeta = 2.*(1.+eta)/(0.001)**2
# else:
# ds_deta = 2./(1.-zeta)
# ds_dzeta = 2.*(1.+eta)/(1.-zeta)**2
#--------------------------------------------------------
# Ordering: 1st increasing the degree and 2nd lexicogafic order
ncount = 0
# Loop on degree
for nDeg in range(0,C+2):
# Loop increasing i
for i in range(0,nDeg+1):
if i==0:
p_i = 1.; q_i = 1.; dp_i = 0.; dq_i = 0.
else:
p_i = JacobiPolynomials(i,r,0.,0.)[-1]; dp_i = JacobiPolynomials(i-1,r,1.,1.)[-1]*(i+1.)/2.
q_i = q_i*(1.-s)/2.; dq_i = q_i*(-i)/(1.-s)
# Loop increasing j
for j in range(0,nDeg-i+1):
if j==0:
p_j = 1; q_j = ((1.-t)/2.)**i; dp_j = 0; dq_j = q_j*(-(i+j))/(1.-t);
else:
p_j = JacobiPolynomials(j,s,2.*i+1.,0.)[-1]; dp_j = JacobiPolynomials(j-1,s,2.*i+2.,1.)[-1]*(j+2.*i+2.)/2.
q_j = q_j*(1.-t)/2.; dq_j = q_j*(-(i+j))/(1.-t)
# Value for k
k = nDeg-(i+j);
if k==0:
p_k = 1.; dp_k = 0.;
else:
p_k = JacobiPolynomials(k,t,2.*(i+j)+2.,0.)[-1]; dp_k = JacobiPolynomials(k-1,t,2.*(i+j)+3.,1.)[-1]*(k+2.*i+2.*j+3.)/2.
factor = math.sqrt( (2.*i+1.)*(i+j+1.)*(2.*(i+j+k)+3.)/4. )
# Normalized polynomial
p[ncount] = ( p_i*q_i*p_j*q_j*p_k )*factor
# Derivatives with respect to (r,s,t)
dp_dr = ( (dp_i)*q_i*p_j*q_j*p_k )*factor
dp_ds = ( p_i*(dq_i*p_j+q_i*dp_j)*q_j*p_k )*factor
dp_dt = ( p_i*q_i*p_j*(dq_j*p_k+q_j*dp_k) )*factor
# Derivatives with respect to (xi,eta,zeta)
dp_dxi[ncount] = dp_dr*dr_dxi
dp_deta[ncount] = dp_dr*dr_deta + dp_ds*ds_deta
dp_dzeta[ncount] = dp_dr*dr_dzeta + dp_ds*ds_dzeta + dp_dt
ncount += 1
return p,dp_dxi,dp_deta,dp_dzeta
|
[
"numpy.sqrt",
"numpy.isclose",
"math.sqrt",
"numpy.array",
"numpy.zeros"
] |
[((464, 479), 'numpy.zeros', 'np.zeros', (['(C + 2)'], {}), '(C + 2)\n', (472, 479), True, 'import numpy as np\n'), ((1188, 1199), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1196, 1199), True, 'import numpy as np\n'), ((2575, 2586), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2583, 2586), True, 'import numpy as np\n'), ((2602, 2613), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2610, 2613), True, 'import numpy as np\n'), ((2628, 2639), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2636, 2639), True, 'import numpy as np\n'), ((4724, 4735), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4732, 4735), True, 'import numpy as np\n'), ((6497, 6508), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (6505, 6508), True, 'import numpy as np\n'), ((6525, 6536), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (6533, 6536), True, 'import numpy as np\n'), ((6552, 6563), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (6560, 6563), True, 'import numpy as np\n'), ((6579, 6590), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (6587, 6590), True, 'import numpy as np\n'), ((6828, 6846), 'numpy.isclose', 'np.isclose', (['s', '(1.0)'], {}), '(s, 1.0)\n', (6838, 6846), True, 'import numpy as np\n'), ((7140, 7165), 'numpy.isclose', 'np.isclose', (['eta_zeta', '(0.0)'], {}), '(eta_zeta, 0.0)\n', (7150, 7165), True, 'import numpy as np\n'), ((2322, 2338), 'numpy.array', 'np.array', (['[r, s]'], {}), '([r, s])\n', (2330, 2338), True, 'import numpy as np\n'), ((6773, 6791), 'numpy.isclose', 'np.isclose', (['s', '(1.0)'], {}), '(s, 1.0)\n', (6783, 6791), True, 'import numpy as np\n'), ((552, 582), 'numpy.sqrt', 'np.sqrt', (['((2.0 * i + 1.0) / 2.0)'], {}), '((2.0 * i + 1.0) / 2.0)\n', (559, 582), True, 'import numpy as np\n'), ((1862, 1910), 'math.sqrt', 'math.sqrt', (['((2.0 * i + 1.0) * (i + j + 1.0) / 2.0)'], {}), '((2.0 * i + 1.0) * (i + j + 1.0) / 2.0)\n', (1871, 1910), False, 'import math\n'), ((3685, 3733), 'math.sqrt', 'math.sqrt', (['((2.0 * i + 1.0) * (i + j + 1.0) / 2.0)'], {}), '((2.0 * i + 1.0) * (i + j + 1.0) / 2.0)\n', (3694, 3733), False, 'import math\n'), ((5606, 5682), 'math.sqrt', 'math.sqrt', (['((2.0 * i + 1.0) * (i + j + 1.0) * (2.0 * (i + j + k) + 3.0) / 4.0)'], {}), '((2.0 * i + 1.0) * (i + j + 1.0) * (2.0 * (i + j + k) + 3.0) / 4.0)\n', (5615, 5682), False, 'import math\n'), ((9156, 9232), 'math.sqrt', 'math.sqrt', (['((2.0 * i + 1.0) * (i + j + 1.0) * (2.0 * (i + j + k) + 3.0) / 4.0)'], {}), '((2.0 * i + 1.0) * (i + j + 1.0) * (2.0 * (i + j + k) + 3.0) / 4.0)\n', (9165, 9232), False, 'import math\n')]
|
#!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
from matplotlib.patches import Wedge
import numpy as np
if len(argv) > 1:
pathToSimFolder = argv[1]
else:
pathToSimFolder = "../data/"
parameters, electrodes = readParameters(pathToSimFolder)
electrodeNumber = len(electrodes)
acceptorPos = np.zeros((int(parameters["acceptorNumber"]), 2))
try:
donorPos = np.zeros((int(parameters["donorNumber"]), 2))
except KeyError:
donorPos = np.zeros(
(int(parameters["acceptorNumber"] * parameters["compensationFactor"]), 2)
)
with open(join(pathToSimFolder, "device.txt")) as deviceFile:
line = next(deviceFile)
line = next(deviceFile)
for i in range(acceptorPos.shape[0]):
acceptorPos[i] = next(deviceFile).split(" ")
line = next(deviceFile)
line = next(deviceFile)
for i in range(donorPos.shape[0]):
donorPos[i] = next(deviceFile).split(" ")
# print(acceptorPos)
# print(donorPos)
electrodePositions = np.empty((len(electrodes), 2))
for i in range(len(electrodes)):
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
electrodePositions[i] = [0, electrodes[i][0] * parameters["lenY"]]
if electrodes[i][1] == 1:
electrodePositions[i] = [
parameters["lenX"],
electrodes[i][0] * parameters["lenY"],
]
if electrodes[i][1] == 2:
electrodePositions[i] = [electrodes[i][0] * parameters["lenX"], 0]
if electrodes[i][1] == 3:
electrodePositions[i] = [
electrodes[i][0] * parameters["lenX"],
parameters["lenY"],
]
elif parameters["geometry"] == "circle":
electrodePositions[i] = [
parameters["radius"] * np.cos(electrodes[i][0] / 360 * 2 * np.pi),
parameters["radius"] * np.sin(electrodes[i][0] / 360 * 2 * np.pi),
]
# print(electrodePositions)
def colorMaker(x):
from matplotlib import colors
from scipy.interpolate import interp1d
cols = ["darkred", "darkgreen"]
rgbaData = np.array([colors.to_rgba(c) for c in cols])
rInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 0])
gInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 1])
bInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 2])
return np.array([rInterpolater(x), gInterpolater(x), bInterpolater(x), 1])
inp = ["0_0", "0_1", "1_0", "1_1"]
for fileNumber in [1, 2, 3, 4]:
print(inp[fileNumber - 1])
# for fileNumber in [1]:
data = np.genfromtxt(
join(pathToSimFolder, f"swapTrackFile{fileNumber}.txt"),
delimiter=";",
dtype=int,
)
trajectoriesSortedByStartEnd = [
[[] for j in range(len(electrodes))] for i in range(len(electrodes))
]
trajectories = []
hops = 20000
IDs = {}
hitID = 0
for i in range(hops):
hoppingSite1 = data[i, 0]
hoppingSite2 = data[i, 1]
# print("hoppingSite1",hoppingSite1,"hoppingSite2",hoppingSite2)
if hoppingSite1 in IDs:
ID = IDs[hoppingSite1]
del IDs[hoppingSite1]
# print("found ID",ID)
else:
ID = hitID
hitID += 1
trajectories.append([])
# print("new ID", ID)
if hoppingSite2 < parameters["acceptorNumber"]:
IDs[hoppingSite2] = ID
trajectories[ID].append([hoppingSite1, hoppingSite2])
# sort trajectories
for i in range(len(trajectories)):
if trajectories[i][0][0] >= parameters["acceptorNumber"]:
if trajectories[i][-1][1] >= parameters["acceptorNumber"]:
trajectoriesSortedByStartEnd[
trajectories[i][0][0] - int(parameters["acceptorNumber"])
][trajectories[i][-1][1] - int(parameters["acceptorNumber"])].append(
trajectories[i]
)
# print(trajectories[i][0][0], trajectories[i][-1][1])
for k in range(len(electrodes)):
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
electodePlotWidth = 8
for i in range(len(electrodes)):
if i == parameters["outputElectrode"]:
col = "blue"
elif i == parameters["inputElectrode1"]:
if fileNumber in [3, 4]:
col = "red"
else:
col = "rosybrown"
elif i == parameters["inputElectrode2"]:
if fileNumber in [2, 4]:
col = "red"
else:
col = "rosybrown"
else:
col = "green"
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
angle = 0
xy = (
0 - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 1:
angle = 0
xy = (
parameters["lenX"] - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 2:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
0 - electodePlotWidth / 2,
)
elif electrodes[i][1] == 3:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
parameters["lenY"] - electodePlotWidth / 2,
)
ax.add_artist(
plt.Rectangle(
xy,
electodePlotWidth,
parameters["electrodeWidth"],
angle=angle,
fc=col,
ec=col,
zorder=-1,
)
)
elif parameters["geometry"] == "circle":
electrodeWidth = (
parameters["electrodeWidth"]
/ (parameters["radius"] * 2 * np.pi)
* 360
) # in degrees
ax.add_artist(
Wedge(
(0, 0),
parameters["radius"] + electodePlotWidth / 2,
electrodes[i][0] - electrodeWidth / 2,
electrodes[i][0] + electrodeWidth / 2,
width=electodePlotWidth,
fc=col,
ec=col,
zorder=-1,
)
)
ax.scatter(acceptorPos[:, 0], acceptorPos[:, 1], c="k", marker=".", s=20)
ax.scatter(donorPos[:, 0], donorPos[:, 1], c="k", marker="x", s=20)
for l in range(len(electrodes)):
trajectories = trajectoriesSortedByStartEnd[k][l]
for i in range(len(trajectories)):
for j in range(len(trajectories[i])):
hoppingSite1 = trajectories[i][j][0]
hoppingSite2 = trajectories[i][j][1]
if hoppingSite1 >= parameters["acceptorNumber"]:
x1, y1 = (
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][1],
)
else:
x1, y1 = (
acceptorPos[hoppingSite1, 0],
acceptorPos[hoppingSite1, 1],
)
if hoppingSite2 >= parameters["acceptorNumber"]:
x2, y2 = (
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][1],
)
else:
x2, y2 = (
acceptorPos[hoppingSite2, 0],
acceptorPos[hoppingSite2, 1],
)
# ax.plot([x1,x2],[y1,y2],"-",alpha=0.05,color="k",linewidth=2)
ax.plot(
[x1, x2],
[y1, y2],
"-",
alpha=0.05,
color=color(l, len(electrodes)),
linewidth=2,
)
# if currentRatio>0.5:
# ax.arrow((x2+x1)/2,(y2+y1)/2,(x2-x1)*0.001,(y2-y1)*0.001,color=colorMaker(abs(currentRatio-0.5)*2),ec=None,alpha=absBins[i,j],linewidth=0,head_width=(currentRatio-0.5)*20)
ax.axis("off")
if parameters["geometry"] == "circle":
ax.add_artist(
plt.Circle((0, 0), parameters["radius"], fc="none", ec="k", zorder=-2)
)
elif parameters["geometry"] == "rect":
ax.add_artist(
plt.Rectangle(
(0, 0),
parameters["lenX"],
parameters["lenY"],
fc="none",
ec="k",
zorder=-2,
)
)
if parameters["geometry"] == "rect":
ax.set_xlim(
-electodePlotWidth / 2, parameters["lenX"] + electodePlotWidth / 2
)
ax.set_ylim(
-electodePlotWidth / 2, parameters["lenY"] + electodePlotWidth / 2
)
elif parameters["geometry"] == "circle":
ax.set_xlim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_ylim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_aspect("equal")
plt.savefig(
join(pathToSimFolder, f"trajectory_fromEl_{k}_{inp[fileNumber-1]}.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
for k in range(len(electrodes)):
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
electodePlotWidth = 8
for i in range(len(electrodes)):
if i == parameters["outputElectrode"]:
col = "blue"
elif i == parameters["inputElectrode1"]:
if fileNumber in [3, 4]:
col = "red"
else:
col = "rosybrown"
elif i == parameters["inputElectrode2"]:
if fileNumber in [2, 4]:
col = "red"
else:
col = "rosybrown"
else:
col = "green"
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
angle = 0
xy = (
0 - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 1:
angle = 0
xy = (
parameters["lenX"] - electodePlotWidth / 2,
electrodes[i][0] * parameters["lenY"]
- parameters["electrodeWidth"] / 2,
)
elif electrodes[i][1] == 2:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
0 - electodePlotWidth / 2,
)
elif electrodes[i][1] == 3:
angle = 90
xy = (
electrodes[i][0] * parameters["lenX"]
+ parameters["electrodeWidth"] / 2,
parameters["lenY"] - electodePlotWidth / 2,
)
ax.add_artist(
plt.Rectangle(
xy,
electodePlotWidth,
parameters["electrodeWidth"],
angle=angle,
fc=col,
ec=col,
zorder=-1,
)
)
elif parameters["geometry"] == "circle":
electrodeWidth = (
parameters["electrodeWidth"]
/ (parameters["radius"] * 2 * np.pi)
* 360
) # in degrees
ax.add_artist(
Wedge(
(0, 0),
parameters["radius"] + electodePlotWidth / 2,
electrodes[i][0] - electrodeWidth / 2,
electrodes[i][0] + electrodeWidth / 2,
width=electodePlotWidth,
fc=col,
ec=col,
zorder=-1,
)
)
ax.scatter(acceptorPos[:, 0], acceptorPos[:, 1], c="k", marker=".", s=20)
ax.scatter(donorPos[:, 0], donorPos[:, 1], c="k", marker="x", s=20)
for l in range(len(electrodes)):
trajectories = trajectoriesSortedByStartEnd[l][k]
for i in range(len(trajectories)):
for j in range(len(trajectories[i])):
hoppingSite1 = trajectories[i][j][0]
hoppingSite2 = trajectories[i][j][1]
if hoppingSite1 >= parameters["acceptorNumber"]:
x1, y1 = (
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite1 - int(parameters["acceptorNumber"])
][1],
)
else:
x1, y1 = (
acceptorPos[hoppingSite1, 0],
acceptorPos[hoppingSite1, 1],
)
if hoppingSite2 >= parameters["acceptorNumber"]:
x2, y2 = (
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][0],
electrodePositions[
hoppingSite2 - int(parameters["acceptorNumber"])
][1],
)
else:
x2, y2 = (
acceptorPos[hoppingSite2, 0],
acceptorPos[hoppingSite2, 1],
)
# ax.plot([x1,x2],[y1,y2],"-",alpha=0.05,color="k",linewidth=2)
ax.plot(
[x1, x2],
[y1, y2],
"-",
alpha=0.05,
color=color(l, len(electrodes)),
linewidth=2,
)
# if currentRatio>0.5:
# ax.arrow((x2+x1)/2,(y2+y1)/2,(x2-x1)*0.001,(y2-y1)*0.001,color=colorMaker(abs(currentRatio-0.5)*2),ec=None,alpha=absBins[i,j],linewidth=0,head_width=(currentRatio-0.5)*20)
ax.axis("off")
if parameters["geometry"] == "circle":
ax.add_artist(
plt.Circle((0, 0), parameters["radius"], fc="none", ec="k", zorder=-2)
)
elif parameters["geometry"] == "rect":
ax.add_artist(
plt.Rectangle(
(0, 0),
parameters["lenX"],
parameters["lenY"],
fc="none",
ec="k",
zorder=-2,
)
)
if parameters["geometry"] == "rect":
ax.set_xlim(
-electodePlotWidth / 2, parameters["lenX"] + electodePlotWidth / 2
)
ax.set_ylim(
-electodePlotWidth / 2, parameters["lenY"] + electodePlotWidth / 2
)
elif parameters["geometry"] == "circle":
ax.set_xlim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_ylim(
-parameters["radius"] - electodePlotWidth,
parameters["radius"] + electodePlotWidth,
)
ax.set_aspect("equal")
plt.savefig(
join(pathToSimFolder, f"trajectory_toEl_{k}_{inp[fileNumber-1]}.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
|
[
"matplotlib.pylab.subplots",
"matplotlib.pylab.Circle",
"matplotlib.patches.Wedge",
"matplotlib.colors.to_rgba",
"os.path.join",
"matplotlib.pylab.Rectangle",
"numpy.cos",
"numpy.sin",
"matplotlib.pylab.close"
] |
[((637, 672), 'os.path.join', 'join', (['pathToSimFolder', '"""device.txt"""'], {}), "(pathToSimFolder, 'device.txt')\n", (641, 672), False, 'from os.path import join\n'), ((2671, 2726), 'os.path.join', 'join', (['pathToSimFolder', 'f"""swapTrackFile{fileNumber}.txt"""'], {}), "(pathToSimFolder, f'swapTrackFile{fileNumber}.txt')\n", (2675, 2726), False, 'from os.path import join\n'), ((4144, 4196), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4.980614173228346, 3.2)'}), '(1, 1, figsize=(4.980614173228346, 3.2))\n', (4156, 4196), True, 'import matplotlib.pylab as plt\n'), ((10953, 10967), 'matplotlib.pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (10962, 10967), True, 'import matplotlib.pylab as plt\n'), ((11025, 11077), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4.980614173228346, 3.2)'}), '(1, 1, figsize=(4.980614173228346, 3.2))\n', (11037, 11077), True, 'import matplotlib.pylab as plt\n'), ((17832, 17846), 'matplotlib.pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (17841, 17846), True, 'import matplotlib.pylab as plt\n'), ((2169, 2186), 'matplotlib.colors.to_rgba', 'colors.to_rgba', (['c'], {}), '(c)\n', (2183, 2186), False, 'from matplotlib import colors\n'), ((10786, 10859), 'os.path.join', 'join', (['pathToSimFolder', 'f"""trajectory_fromEl_{k}_{inp[fileNumber - 1]}.png"""'], {}), "(pathToSimFolder, f'trajectory_fromEl_{k}_{inp[fileNumber - 1]}.png')\n", (10790, 10859), False, 'from os.path import join\n'), ((17667, 17738), 'os.path.join', 'join', (['pathToSimFolder', 'f"""trajectory_toEl_{k}_{inp[fileNumber - 1]}.png"""'], {}), "(pathToSimFolder, f'trajectory_toEl_{k}_{inp[fileNumber - 1]}.png')\n", (17671, 17738), False, 'from os.path import join\n'), ((9649, 9719), 'matplotlib.pylab.Circle', 'plt.Circle', (['(0, 0)', "parameters['radius']"], {'fc': '"""none"""', 'ec': '"""k"""', 'zorder': '(-2)'}), "((0, 0), parameters['radius'], fc='none', ec='k', zorder=-2)\n", (9659, 9719), True, 'import matplotlib.pylab as plt\n'), ((16530, 16600), 'matplotlib.pylab.Circle', 'plt.Circle', (['(0, 0)', "parameters['radius']"], {'fc': '"""none"""', 'ec': '"""k"""', 'zorder': '(-2)'}), "((0, 0), parameters['radius'], fc='none', ec='k', zorder=-2)\n", (16540, 16600), True, 'import matplotlib.pylab as plt\n'), ((1846, 1888), 'numpy.cos', 'np.cos', (['(electrodes[i][0] / 360 * 2 * np.pi)'], {}), '(electrodes[i][0] / 360 * 2 * np.pi)\n', (1852, 1888), True, 'import numpy as np\n'), ((1925, 1967), 'numpy.sin', 'np.sin', (['(electrodes[i][0] / 360 * 2 * np.pi)'], {}), '(electrodes[i][0] / 360 * 2 * np.pi)\n', (1931, 1967), True, 'import numpy as np\n'), ((6088, 6199), 'matplotlib.pylab.Rectangle', 'plt.Rectangle', (['xy', 'electodePlotWidth', "parameters['electrodeWidth']"], {'angle': 'angle', 'fc': 'col', 'ec': 'col', 'zorder': '(-1)'}), "(xy, electodePlotWidth, parameters['electrodeWidth'], angle=\n angle, fc=col, ec=col, zorder=-1)\n", (6101, 6199), True, 'import matplotlib.pylab as plt\n'), ((9824, 9920), 'matplotlib.pylab.Rectangle', 'plt.Rectangle', (['(0, 0)', "parameters['lenX']", "parameters['lenY']"], {'fc': '"""none"""', 'ec': '"""k"""', 'zorder': '(-2)'}), "((0, 0), parameters['lenX'], parameters['lenY'], fc='none', ec\n ='k', zorder=-2)\n", (9837, 9920), True, 'import matplotlib.pylab as plt\n'), ((12969, 13080), 'matplotlib.pylab.Rectangle', 'plt.Rectangle', (['xy', 'electodePlotWidth', "parameters['electrodeWidth']"], {'angle': 'angle', 'fc': 'col', 'ec': 'col', 'zorder': '(-1)'}), "(xy, electodePlotWidth, parameters['electrodeWidth'], angle=\n angle, fc=col, ec=col, zorder=-1)\n", (12982, 13080), True, 'import matplotlib.pylab as plt\n'), ((16705, 16801), 'matplotlib.pylab.Rectangle', 'plt.Rectangle', (['(0, 0)', "parameters['lenX']", "parameters['lenY']"], {'fc': '"""none"""', 'ec': '"""k"""', 'zorder': '(-2)'}), "((0, 0), parameters['lenX'], parameters['lenY'], fc='none', ec\n ='k', zorder=-2)\n", (16718, 16801), True, 'import matplotlib.pylab as plt\n'), ((6707, 6906), 'matplotlib.patches.Wedge', 'Wedge', (['(0, 0)', "(parameters['radius'] + electodePlotWidth / 2)", '(electrodes[i][0] - electrodeWidth / 2)', '(electrodes[i][0] + electrodeWidth / 2)'], {'width': 'electodePlotWidth', 'fc': 'col', 'ec': 'col', 'zorder': '(-1)'}), "((0, 0), parameters['radius'] + electodePlotWidth / 2, electrodes[i][0\n ] - electrodeWidth / 2, electrodes[i][0] + electrodeWidth / 2, width=\n electodePlotWidth, fc=col, ec=col, zorder=-1)\n", (6712, 6906), False, 'from matplotlib.patches import Wedge\n'), ((13588, 13787), 'matplotlib.patches.Wedge', 'Wedge', (['(0, 0)', "(parameters['radius'] + electodePlotWidth / 2)", '(electrodes[i][0] - electrodeWidth / 2)', '(electrodes[i][0] + electrodeWidth / 2)'], {'width': 'electodePlotWidth', 'fc': 'col', 'ec': 'col', 'zorder': '(-1)'}), "((0, 0), parameters['radius'] + electodePlotWidth / 2, electrodes[i][0\n ] - electrodeWidth / 2, electrodes[i][0] + electrodeWidth / 2, width=\n electodePlotWidth, fc=col, ec=col, zorder=-1)\n", (13593, 13787), False, 'from matplotlib.patches import Wedge\n')]
|
# coding: utf-8
"""
2018-03-19.
Maximum screenshots in 1 second by computing BGRA raw values to RGB.
GNU/Linux
pil_frombytes 139
mss_rgb 119
pil_frombytes_rgb 51
numpy_flip 31
numpy_slice 29
macOS
pil_frombytes 209
mss_rgb 174
pil_frombytes_rgb 113
numpy_flip 39
numpy_slice 36
Windows
pil_frombytes 81
mss_rgb 66
pil_frombytes_rgb 42
numpy_flip 25
numpy_slice 22
"""
from __future__ import print_function
import time
import numpy
from PIL import Image
import mss
def mss_rgb(im):
return im.rgb
def numpy_flip(im):
frame = numpy.array(im, dtype=numpy.uint8)
return numpy.flip(frame[:, :, :3], 2).tobytes()
def numpy_slice(im):
return numpy.array(im, dtype=numpy.uint8)[..., [2, 1, 0]].tobytes()
def pil_frombytes_rgb(im):
return Image.frombytes('RGB', im.size, im.rgb).tobytes()
def pil_frombytes(im):
return Image.frombytes('RGB', im.size, im.bgra, 'raw', 'BGRX').tobytes()
def benchmark():
with mss.mss() as sct:
im = sct.grab(sct.monitors[0])
for func in (pil_frombytes,
mss_rgb,
pil_frombytes_rgb,
numpy_flip,
numpy_slice):
count = 0
start = time.time()
while (time.time() - start) <= 1:
func(im)
im._ScreenShot__rgb = None
count += 1
print(func.__name__.ljust(17), count)
benchmark()
|
[
"numpy.flip",
"mss.mss",
"numpy.array",
"PIL.Image.frombytes",
"time.time"
] |
[((655, 689), 'numpy.array', 'numpy.array', (['im'], {'dtype': 'numpy.uint8'}), '(im, dtype=numpy.uint8)\n', (666, 689), False, 'import numpy\n'), ((1057, 1066), 'mss.mss', 'mss.mss', ([], {}), '()\n', (1064, 1066), False, 'import mss\n'), ((701, 731), 'numpy.flip', 'numpy.flip', (['frame[:, :, :3]', '(2)'], {}), '(frame[:, :, :3], 2)\n', (711, 731), False, 'import numpy\n'), ((877, 916), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', 'im.size', 'im.rgb'], {}), "('RGB', im.size, im.rgb)\n", (892, 916), False, 'from PIL import Image\n'), ((963, 1018), 'PIL.Image.frombytes', 'Image.frombytes', (['"""RGB"""', 'im.size', 'im.bgra', '"""raw"""', '"""BGRX"""'], {}), "('RGB', im.size, im.bgra, 'raw', 'BGRX')\n", (978, 1018), False, 'from PIL import Image\n'), ((1330, 1341), 'time.time', 'time.time', ([], {}), '()\n', (1339, 1341), False, 'import time\n'), ((776, 810), 'numpy.array', 'numpy.array', (['im'], {'dtype': 'numpy.uint8'}), '(im, dtype=numpy.uint8)\n', (787, 810), False, 'import numpy\n'), ((1361, 1372), 'time.time', 'time.time', ([], {}), '()\n', (1370, 1372), False, 'import time\n')]
|
import os
import random
import argparse
import multiprocessing
import numpy as np
import torch
from torchvision import models, transforms
from torch.utils.data import DataLoader, Dataset
from pathlib import Path
from PIL import Image
from utils import Bar, config, mkdir_p, AverageMeter
from datetime import datetime
from tensorboardX import SummaryWriter
from byol_pytorch import BYOL
# arguments
parser = argparse.ArgumentParser(description='byol-lightning-test')
# Architecture & hyper-parameter
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet',
help='model architecture: | [resnet, ...] (default: resnet18)')
parser.add_argument('--depth', type=int, default=18, help='Model depth.')
parser.add_argument('-c', '--checkpoint', default='../checkpoints', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--epoch', type=int, default=100, help='Epoch')
parser.add_argument('--batch-size', type=int, default=32, help='Epoch')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
# Device options
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--gpu-id', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
# Paths
parser.add_argument('-d', '--dataset', default='neu', type=str)
parser.add_argument('--image_folder', type=str, required=True,
help='path to your folder of images for self-supervised learning')
parser.add_argument('--board-path', '--bp', default='../board', type=str,
help='tensorboardx path')
parser.add_argument('--board-tag', '--tg', default='byol', type=str,
help='tensorboardx writer tag')
args = parser.parse_args()
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Torch Seed
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
# Random Lib Seed
random.seed(args.manualSeed)
# Numpy Seed
np.random.seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
# constants
args.image_size = 256
NUM_GPUS = 1
IMAGE_EXTS = ['.jpg', '.png', '.jpeg', '.bmp']
NUM_WORKERS = multiprocessing.cpu_count()
# task_time = datetime.now().isoformat()
# args.checkpoint = os.path.join(args.checkpoint, args.dataset, "{}-{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
# args.depth,
# args.batch_size,
# args.lr,
# args.board_tag),
# task_time)
# if not os.path.isdir(args.checkpoint):
# mkdir_p(args.checkpoint)
# config.save_config(args, os.path.join(args.checkpoint, "config.txt"))
#
# writer_train = SummaryWriter(
# log_dir=os.path.join(args.board_path, args.dataset, "{}-{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
# args.depth,
# args.batch_size,
# args.lr,
# args.board_tag),
# task_time, "train"))
args.task_time = datetime.now().isoformat()
output_name = "{}{:d}-bs{:d}-lr{:.5f}-{}".format(args.arch,
args.depth,
args.batch_size,
args.lr,
args.board_tag)
args.checkpoint = os.path.join(args.checkpoint, args.dataset, output_name, args.task_time)
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
config.save_config(args, os.path.join(args.checkpoint, "config.txt"))
writer_train = SummaryWriter(
log_dir=os.path.join(args.board_path, args.dataset, output_name, args.task_time))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def expand_greyscale(t):
return t.expand(3, -1, -1)
class ImagesDataset(Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.paths = []
for path in Path(f'{folder}').glob('**/*'):
_, ext = os.path.splitext(path)
if ext.lower() in IMAGE_EXTS:
self.paths.append(path)
print(f'{len(self.paths)} images found')
self.transform = transforms.Compose([
transforms.Resize(args.image_size),
transforms.RandomSizedCrop((args.image_size, args.image_size)),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.ToTensor(),
# normalize
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
img = img.convert('RGB')
return self.transform(img)
if args.arch is "resnet":
if args.depth == 18:
model = models.resnet18(pretrained=False).cuda()
elif args.depth == 34:
model = models.resnet34(pretrained=False).cuda()
elif args.depth == 50:
model = models.resnet50(pretrained=False).cuda()
elif args.depth == 101:
model = models.resnet101(pretrained=False).cuda()
else:
assert ("Not supported Depth")
learner = BYOL(
model,
image_size=args.image_size,
hidden_layer='avgpool',
projection_size=256,
projection_hidden_size=4096,
moving_average_decay=0.99,
use_momentum=False # turn off momentum in the target encoder
)
opt = torch.optim.Adam(learner.parameters(), lr=args.lr)
ds = ImagesDataset(args.image_folder, args.image_size)
trainloader = DataLoader(ds, batch_size=args.batch_size, num_workers=NUM_WORKERS, shuffle=True)
losses = AverageMeter()
for epoch in range(args.epoch):
bar = Bar('Processing', max=len(trainloader))
for batch_idx, inputs in enumerate(trainloader):
loss = learner(inputs.cuda())
losses.update(loss.data.item(), inputs.size(0))
opt.zero_grad()
loss.backward()
opt.step()
# plot progress
bar.suffix = 'Epoch {epoch} - ({batch}/{size}) | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} '.format(
epoch=epoch,
batch=batch_idx + 1,
size=len(trainloader),
total=bar.elapsed_td,
eta=bar.eta_td,
loss=loss.item(),
)
n_iter = epoch * len(trainloader) + batch_idx + 1
writer_train.add_scalar('Train/loss', loss.data.item(), n_iter)
bar.next()
writer_train.add_scalar('Avg.loss', losses.avg, epoch)
bar.finish()
# save your improved network
torch.save(model.state_dict(), os.path.join(args.checkpoint, 'byol.pt'))
|
[
"utils.mkdir_p",
"torchvision.models.resnet18",
"multiprocessing.cpu_count",
"torchvision.transforms.ColorJitter",
"torch.cuda.is_available",
"argparse.ArgumentParser",
"pathlib.Path",
"os.path.isdir",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"random.randint",
"torchvision.models.resnet50",
"torchvision.transforms.RandomSizedCrop",
"os.path.splitext",
"torchvision.models.resnet101",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torch.cuda.manual_seed_all",
"PIL.Image.open",
"os.path.join",
"random.seed",
"datetime.datetime.now",
"torchvision.models.resnet34",
"torch.utils.data.DataLoader",
"utils.AverageMeter",
"byol_pytorch.BYOL"
] |
[((409, 467), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""byol-lightning-test"""'}), "(description='byol-lightning-test')\n", (432, 467), False, 'import argparse\n'), ((1935, 1960), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1958, 1960), False, 'import torch\n'), ((2082, 2110), 'random.seed', 'random.seed', (['args.manualSeed'], {}), '(args.manualSeed)\n', (2093, 2110), False, 'import random\n'), ((2125, 2156), 'numpy.random.seed', 'np.random.seed', (['args.manualSeed'], {}), '(args.manualSeed)\n', (2139, 2156), True, 'import numpy as np\n'), ((2327, 2354), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2352, 2354), False, 'import multiprocessing\n'), ((4161, 4233), 'os.path.join', 'os.path.join', (['args.checkpoint', 'args.dataset', 'output_name', 'args.task_time'], {}), '(args.checkpoint, args.dataset, output_name, args.task_time)\n', (4173, 4233), False, 'import os\n'), ((4502, 4577), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (4522, 4577), False, 'from torchvision import models, transforms\n'), ((5992, 6161), 'byol_pytorch.BYOL', 'BYOL', (['model'], {'image_size': 'args.image_size', 'hidden_layer': '"""avgpool"""', 'projection_size': '(256)', 'projection_hidden_size': '(4096)', 'moving_average_decay': '(0.99)', 'use_momentum': '(False)'}), "(model, image_size=args.image_size, hidden_layer='avgpool',\n projection_size=256, projection_hidden_size=4096, moving_average_decay=\n 0.99, use_momentum=False)\n", (5996, 6161), False, 'from byol_pytorch import BYOL\n'), ((6353, 6439), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'batch_size': 'args.batch_size', 'num_workers': 'NUM_WORKERS', 'shuffle': '(True)'}), '(ds, batch_size=args.batch_size, num_workers=NUM_WORKERS, shuffle\n =True)\n', (6363, 6439), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((6445, 6459), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (6457, 6459), False, 'from utils import Bar, config, mkdir_p, AverageMeter\n'), ((2038, 2062), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2052, 2062), False, 'import random\n'), ((2174, 2217), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.manualSeed'], {}), '(args.manualSeed)\n', (2200, 2217), False, 'import torch\n'), ((4241, 4271), 'os.path.isdir', 'os.path.isdir', (['args.checkpoint'], {}), '(args.checkpoint)\n', (4254, 4271), False, 'import os\n'), ((4277, 4301), 'utils.mkdir_p', 'mkdir_p', (['args.checkpoint'], {}), '(args.checkpoint)\n', (4284, 4301), False, 'from utils import Bar, config, mkdir_p, AverageMeter\n'), ((4327, 4370), 'os.path.join', 'os.path.join', (['args.checkpoint', '"""config.txt"""'], {}), "(args.checkpoint, 'config.txt')\n", (4339, 4370), False, 'import os\n'), ((7379, 7419), 'os.path.join', 'os.path.join', (['args.checkpoint', '"""byol.pt"""'], {}), "(args.checkpoint, 'byol.pt')\n", (7391, 7419), False, 'import os\n'), ((3806, 3820), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3818, 3820), False, 'from datetime import datetime\n'), ((4415, 4487), 'os.path.join', 'os.path.join', (['args.board_path', 'args.dataset', 'output_name', 'args.task_time'], {}), '(args.board_path, args.dataset, output_name, args.task_time)\n', (4427, 4487), False, 'import os\n'), ((5483, 5499), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (5493, 5499), False, 'from PIL import Image\n'), ((4899, 4921), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (4915, 4921), False, 'import os\n'), ((4846, 4863), 'pathlib.Path', 'Path', (['f"""{folder}"""'], {}), "(f'{folder}')\n", (4850, 4863), False, 'from pathlib import Path\n'), ((5113, 5147), 'torchvision.transforms.Resize', 'transforms.Resize', (['args.image_size'], {}), '(args.image_size)\n', (5130, 5147), False, 'from torchvision import models, transforms\n'), ((5161, 5223), 'torchvision.transforms.RandomSizedCrop', 'transforms.RandomSizedCrop', (['(args.image_size, args.image_size)'], {}), '((args.image_size, args.image_size))\n', (5187, 5223), False, 'from torchvision import models, transforms\n'), ((5237, 5274), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.4)', '(0.4)', '(0.4)'], {}), '(0.4, 0.4, 0.4)\n', (5259, 5274), False, 'from torchvision import models, transforms\n'), ((5288, 5309), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5307, 5309), False, 'from torchvision import models, transforms\n'), ((5637, 5670), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (5652, 5670), False, 'from torchvision import models, transforms\n'), ((5721, 5754), 'torchvision.models.resnet34', 'models.resnet34', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (5736, 5754), False, 'from torchvision import models, transforms\n'), ((5805, 5838), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (5820, 5838), False, 'from torchvision import models, transforms\n'), ((5890, 5924), 'torchvision.models.resnet101', 'models.resnet101', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (5906, 5924), False, 'from torchvision import models, transforms\n')]
|
import cv2
import pickle
import numpy as np
from flag import Flag
flag = Flag()
with open('assets/colors.h5', 'rb') as f:
colors = pickle.loads(f.read())
with open('label.txt', 'r') as f:
classes = f.readlines()
def detector(image, label):
image = np.asarray(image * 255., np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
indices = np.squeeze(np.max(np.max(label, axis=0, keepdims=True), axis=1, keepdims=True))
indices = np.where(indices > 0.5)[0]
for i in indices:
output = np.asarray(label[:, :, i], dtype=np.float)
output[output > flag.threshold] = 255.
output[output <= flag.threshold] = 0.
output = np.asarray(output, dtype=np.uint8)
kernel = np.ones((2, 2), np.float32) / 4
output = cv2.filter2D(output, -1, kernel)
# cv2.imshow('out', cv2.resize(output, (256, 256)))
# cv2.waitKey(0)
_, contours, _ = cv2.findContours(output, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_L1)
for contour in contours:
# print(contour)
col_wise = contour[:, :, 0]
row_wise = contour[:, :, 1]
x1 = min(col_wise)[0] / flag.y_size * flag.x_size
y1 = min(row_wise)[0] / flag.y_size * flag.x_size
x2 = max(col_wise)[0] / flag.y_size * flag.x_size
y2 = max(row_wise)[0] / flag.y_size * flag.x_size
# print(x1, y1, x2, y2)
c = colors[i]
image = cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (int(c[0]), int(c[1]), int(c[2])), 2)
# print('class =', classes[i-1])
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, classes[i - 1][:-1], (int(x1), int(y1)), font, .8, (int(c[0]), int(c[1]), int(c[2])), 2,
cv2.LINE_AA)
return image
if __name__ == '__main__':
flag = Flag()
images = np.load('dataset/valid_x.npy')
labels = np.load('dataset/valid_y.npy')
# print(images.shape)
image = images[100]
label = labels[100]
image = detector(image, label)
cv2.imshow('image', image)
cv2.waitKey(0)
|
[
"numpy.ones",
"numpy.where",
"numpy.asarray",
"cv2.filter2D",
"cv2.imshow",
"numpy.max",
"cv2.cvtColor",
"cv2.findContours",
"numpy.load",
"cv2.waitKey",
"flag.Flag"
] |
[((74, 80), 'flag.Flag', 'Flag', ([], {}), '()\n', (78, 80), False, 'from flag import Flag\n'), ((264, 299), 'numpy.asarray', 'np.asarray', (['(image * 255.0)', 'np.uint8'], {}), '(image * 255.0, np.uint8)\n', (274, 299), True, 'import numpy as np\n'), ((311, 349), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (323, 349), False, 'import cv2\n'), ((1865, 1871), 'flag.Flag', 'Flag', ([], {}), '()\n', (1869, 1871), False, 'from flag import Flag\n'), ((1885, 1915), 'numpy.load', 'np.load', (['"""dataset/valid_x.npy"""'], {}), "('dataset/valid_x.npy')\n", (1892, 1915), True, 'import numpy as np\n'), ((1929, 1959), 'numpy.load', 'np.load', (['"""dataset/valid_y.npy"""'], {}), "('dataset/valid_y.npy')\n", (1936, 1959), True, 'import numpy as np\n'), ((2073, 2099), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (2083, 2099), False, 'import cv2\n'), ((2104, 2118), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2115, 2118), False, 'import cv2\n'), ((458, 481), 'numpy.where', 'np.where', (['(indices > 0.5)'], {}), '(indices > 0.5)\n', (466, 481), True, 'import numpy as np\n'), ((525, 567), 'numpy.asarray', 'np.asarray', (['label[:, :, i]'], {'dtype': 'np.float'}), '(label[:, :, i], dtype=np.float)\n', (535, 567), True, 'import numpy as np\n'), ((678, 712), 'numpy.asarray', 'np.asarray', (['output'], {'dtype': 'np.uint8'}), '(output, dtype=np.uint8)\n', (688, 712), True, 'import numpy as np\n'), ((779, 811), 'cv2.filter2D', 'cv2.filter2D', (['output', '(-1)', 'kernel'], {}), '(output, -1, kernel)\n', (791, 811), False, 'import cv2\n'), ((922, 987), 'cv2.findContours', 'cv2.findContours', (['output', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_TC89_L1'], {}), '(output, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_L1)\n', (938, 987), False, 'import cv2\n'), ((382, 418), 'numpy.max', 'np.max', (['label'], {'axis': '(0)', 'keepdims': '(True)'}), '(label, axis=0, keepdims=True)\n', (388, 418), True, 'import numpy as np\n'), ((730, 757), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.float32'], {}), '((2, 2), np.float32)\n', (737, 757), True, 'import numpy as np\n')]
|
import requests
import io
import dask
from bs4 import BeautifulSoup as BS
import nltk
import pandas
import numpy as np
def News(ticker):
B = BS(requests.get(f"https://www.wsj.com/market-data/quotes/{ticker}", headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}).content, features="html.parser")
News = B.find('ul', {'id': "newsSummary_c"})
News = [a.getText() for a in News.find_all('a')]
News = [nltk.word_tokenize(h) for h in News]
return dask.dataframe.from_array(np.asarray(News))
api_key = '<KEY>'
def daily(ticker, outputsize = 'compact'):
csv = pandas.read_csv(io.StringIO(requests.get(f'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&&symbol={ticker}&apikey={api_key}&outputsize={outputsize}&datatype=csv').content.decode('utf-8')))
return csv
def intraday_data(ticker, time='1min', outputsize = 'compact'):
return pandas.read_csv(io.StringIO(requests.get(f'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={ticker}&interval={time}&apikey={api_key}&outputsize={outputsize}&datatype=csv').content.decode('utf-8')))
def tickers():
return pandas.read_csv("NYSE_TICKERS.csv").iloc[:,0]
|
[
"requests.get",
"numpy.asarray",
"nltk.word_tokenize",
"pandas.read_csv"
] |
[((504, 525), 'nltk.word_tokenize', 'nltk.word_tokenize', (['h'], {}), '(h)\n', (522, 525), False, 'import nltk\n'), ((578, 594), 'numpy.asarray', 'np.asarray', (['News'], {}), '(News)\n', (588, 594), True, 'import numpy as np\n'), ((150, 370), 'requests.get', 'requests.get', (['f"""https://www.wsj.com/market-data/quotes/{ticker}"""'], {'headers': "{'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }"}), "(f'https://www.wsj.com/market-data/quotes/{ticker}', headers={\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n })\n", (162, 370), False, 'import requests\n'), ((1247, 1282), 'pandas.read_csv', 'pandas.read_csv', (['"""NYSE_TICKERS.csv"""'], {}), "('NYSE_TICKERS.csv')\n", (1262, 1282), False, 'import pandas\n'), ((717, 884), 'requests.get', 'requests.get', (['f"""https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&&symbol={ticker}&apikey={api_key}&outputsize={outputsize}&datatype=csv"""'], {}), "(\n f'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&&symbol={ticker}&apikey={api_key}&outputsize={outputsize}&datatype=csv'\n )\n", (729, 884), False, 'import requests\n'), ((1026, 1202), 'requests.get', 'requests.get', (['f"""https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={ticker}&interval={time}&apikey={api_key}&outputsize={outputsize}&datatype=csv"""'], {}), "(\n f'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={ticker}&interval={time}&apikey={api_key}&outputsize={outputsize}&datatype=csv'\n )\n", (1038, 1202), False, 'import requests\n')]
|
from abc import ABCMeta, abstractmethod
from functools import partial
from typing import Tuple, Union
import numexpr
import numpy as np
from scipy import sparse, special
from tabmat import MatrixBase, StandardizedMatrix
from ._functions import (
binomial_logit_eta_mu_deviance,
binomial_logit_rowwise_gradient_hessian,
gamma_deviance,
gamma_log_eta_mu_deviance,
gamma_log_likelihood,
gamma_log_rowwise_gradient_hessian,
normal_deviance,
normal_identity_eta_mu_deviance,
normal_identity_rowwise_gradient_hessian,
normal_log_likelihood,
poisson_deviance,
poisson_log_eta_mu_deviance,
poisson_log_likelihood,
poisson_log_rowwise_gradient_hessian,
tweedie_deviance,
tweedie_log_eta_mu_deviance,
tweedie_log_likelihood,
tweedie_log_rowwise_gradient_hessian,
)
from ._link import IdentityLink, Link, LogitLink, LogLink
from ._util import _safe_lin_pred, _safe_sandwich_dot
class ExponentialDispersionModel(metaclass=ABCMeta):
r"""Base class for reproductive Exponential Dispersion Models (EDM).
The PDF of :math:`Y \sim \mathrm{EDM}(\mu, \phi)` is given by
.. math::
p(y \mid \theta, \phi)
&= c(y, \phi) \exp((\theta y - A(\theta)_ / \phi) \\
&= \tilde{c}(y, \phi) \exp(-d(y, \mu) / (2\phi))
with mean :math:`\mathrm{E}(Y) = A'(\theta) = \mu`, variance
:math:`\mathrm{var}(Y) = \phi \cdot v(\mu)`, unit variance
:math:`v(\mu)` and unit deviance :math:`d(y, \mu)`.
Properties
----------
lower_bound
upper_bound
include_lower_bound
include_upper_bound
Methods
-------
in_y_range
unit_variance
unit_variance_derivative
variance
variance_derivative
unit_deviance
unit_deviance_derivative
deviance
deviance_derivative
starting_mu
_mu_deviance_derivative
eta_mu_deviance
gradient_hessian
References
----------
https://en.wikipedia.org/wiki/Exponential_dispersion_model.
"""
@property
@abstractmethod
def lower_bound(self) -> float:
"""Get the lower bound of values for the EDM."""
pass
@property
@abstractmethod
def upper_bound(self) -> float:
"""Get the upper bound of values for the EDM."""
pass
@property
def include_lower_bound(self) -> bool:
"""Return whether ``lower_bound`` is allowed as a value of ``y``."""
pass
@property
def include_upper_bound(self) -> bool:
"""Return whether ``upper_bound`` is allowed as a value of ``y``."""
pass
def in_y_range(self, x) -> np.ndarray:
"""Return ``True`` if ``x`` is in the valid range of the EDM.
Parameters
----------
x : array-like, shape (n_samples,)
Target values.
Returns
-------
np.ndarray
"""
if self.include_lower_bound:
if self.include_upper_bound:
return np.logical_and(
np.greater_equal(x, self.lower_bound),
np.less_equal(x, self.upper_bound),
)
else:
return np.logical_and(
np.greater_equal(x, self.lower_bound), np.less(x, self.upper_bound)
)
else:
if self.include_upper_bound:
return np.logical_and(
np.greater(x, self.lower_bound), np.less_equal(x, self.upper_bound)
)
else:
return np.logical_and(
np.greater(x, self.lower_bound), np.less(x, self.upper_bound)
)
@abstractmethod
def unit_variance(self, mu):
r"""Compute the unit variance function.
The unit variance :math:`v(\mu)` determines the variance as a function
of the mean :math:`\mu` by
:math:`\mathrm{var}(y_i) = (\phi / s_i) \times v(\mu_i)`. It can
also be derived from the unit deviance :math:`d(y, \mu)` as
.. math::
v(\mu) = \frac{2}{\frac{\partial^2 d(y, \mu)}{\partial\mu^2}}\big|_{y=\mu}.
See also :func:`variance`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
@abstractmethod
def unit_variance_derivative(self, mu):
r"""Compute the derivative of the unit variance with respect to ``mu``.
Return :math:`v'(\mu)`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
def variance(self, mu: np.ndarray, dispersion=1, sample_weight=1) -> np.ndarray:
r"""Compute the variance function.
The variance of :math:`Y_i \sim \mathrm{EDM}(\mu_i, \phi / s_i)` is
:math:`\mathrm{var}(Y_i) = (\phi / s_i) * v(\mu_i)`, with unit variance
:math:`v(\mu)` and weights :math:`s_i`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
dispersion : float, optional (default=1)
Dispersion parameter :math:`\phi`.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return self.unit_variance(mu) * dispersion / sample_weight
def variance_derivative(self, mu, dispersion=1, sample_weight=1):
r"""Compute the derivative of the variance with respect to ``mu``.
The derivative of the variance is equal to
:math:`(\phi / s_i) * v'(\mu_i)`, where :math:`v(\mu)` is the unit
variance and :math:`s_i` are weights.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
dispersion : float, optional (default=1)
Dispersion parameter :math:`\phi`.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return self.unit_variance_derivative(mu) * dispersion / sample_weight
@abstractmethod
def unit_deviance(self, y, mu):
r"""Compute the unit deviance.
In terms of the log likelihood :math:`L`, the unit deviance is
:math:`-2\phi\times [L(y, \mu, \phi) - L(y, y, \phi)].`
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
"""
pass
def unit_deviance_derivative(self, y, mu):
r"""Compute the derivative of the unit deviance with respect to ``mu``.
The derivative of the unit deviance is given by
:math:`-2 \times (y - \mu) / v(\mu)`, where :math:`v(\mu)` is the unit
variance.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
array-like, shape (n_samples,)
"""
return -2 * (y - mu) / self.unit_variance(mu)
def deviance(self, y, mu, sample_weight=1):
r"""Compute the deviance.
The deviance is a weighted sum of the unit deviances,
:math:`\sum_i s_i \times d(y_i, \mu_i)`, where :math:`d(y, \mu)` is the
unit deviance and :math:`s` are weights. In terms of the log likelihood,
it is :math:`-2\phi \times [L(y, \mu, \phi / s) - L(y, y, \phi / s)]`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inversely proportional.
Returns
-------
float
"""
if sample_weight is None:
return np.sum(self.unit_deviance(y, mu))
else:
return np.sum(self.unit_deviance(y, mu) * sample_weight)
def deviance_derivative(self, y, mu, sample_weight=1):
r"""Compute the derivative of the deviance with respect to ``mu``.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,) (default=1)
Weights or exposure to which variance is inverse proportional.
Returns
-------
array-like, shape (n_samples,)
"""
return sample_weight * self.unit_deviance_derivative(y, mu)
def _mu_deviance_derivative(
self,
coef: np.ndarray,
X,
y: np.ndarray,
sample_weight: np.ndarray,
link: Link,
offset: np.ndarray = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute ``mu`` and the derivative of the deviance \
with respect to coefficients."""
lin_pred = _safe_lin_pred(X, coef, offset)
mu = link.inverse(lin_pred)
d1 = link.inverse_derivative(lin_pred)
temp = d1 * self.deviance_derivative(y, mu, sample_weight)
if coef.size == X.shape[1] + 1:
devp = np.concatenate(([temp.sum()], temp @ X))
else:
devp = temp @ X # same as X.T @ temp
return mu, devp
def eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
):
"""
Compute ``eta``, ``mu`` and the deviance.
Compute:
* the linear predictor, ``eta``, as ``cur_eta + factor * X_dot_d``;
* the link-function-transformed prediction, ``mu``;
* the deviance.
Returns
-------
numpy.ndarray, shape (X.shape[0],)
The linear predictor, ``eta``.
numpy.ndarray, shape (X.shape[0],)
The link-function-transformed prediction, ``mu``.
float
The deviance.
"""
# eta_out and mu_out are filled inside self._eta_mu_deviance,
# avoiding allocating new arrays for every line search loop
eta_out = np.empty_like(cur_eta)
mu_out = np.empty_like(cur_eta)
deviance = self._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
return eta_out, mu_out, deviance
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
"""
Update ``eta`` and ``mu`` and compute the deviance.
This is a default implementation that should work for all valid
distributions and link functions. To implement a custom optimized
version for a specific distribution and link function, please override
this function in the subclass.
Returns
-------
float
"""
eta_out[:] = cur_eta + factor * X_dot_d
mu_out[:] = link.inverse(eta_out)
return self.deviance(y, mu_out, sample_weight=sample_weight)
def rowwise_gradient_hessian(
self,
link: Link,
coef: np.ndarray,
dispersion,
X: Union[MatrixBase, StandardizedMatrix],
y: np.ndarray,
sample_weight: np.ndarray,
eta: np.ndarray,
mu: np.ndarray,
offset: np.ndarray = None,
):
"""
Compute the gradient and negative Hessian of the log likelihood row-wise.
Returns
-------
numpy.ndarray, shape (X.shape[0],)
The gradient of the log likelihood, row-wise.
numpy.ndarray, shape (X.shape[0],)
The negative Hessian of the log likelihood, row-wise.
"""
gradient_rows = np.empty_like(mu)
hessian_rows = np.empty_like(mu)
self._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
# To form the full Hessian matrix from the IRLS sample_weight:
# hessian_matrix = _safe_sandwich_dot(X, hessian_rows, intercept=intercept)
return gradient_rows, hessian_rows
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
"""
Update ``gradient_rows`` and ``hessian_rows`` in place.
This is a default implementation that should work for all valid
distributions and link functions. To implement a custom optimized
version for a specific distribution and link function, please override
this function in the subclass.
"""
# FOR TWEEDIE: sigma_inv = weights / (mu ** p) during optimization bc phi = 1
sigma_inv = get_one_over_variance(self, link, mu, eta, 1.0, sample_weight)
d1 = link.inverse_derivative(eta) # = h'(eta)
# Alternatively:
# h'(eta) = h'(g(mu)) = 1/g'(mu), note that h is inverse of g
# d1 = 1./link.derivative(mu)
d1_sigma_inv = d1 * sigma_inv
gradient_rows[:] = d1_sigma_inv * (y - mu)
hessian_rows[:] = d1 * d1_sigma_inv
def _fisher_information(
self, link, X, y, mu, sample_weight, dispersion, fit_intercept
):
"""Compute the expected information matrix.
Parameters
----------
link : Link
A link function (i.e. an instance of :class:`~glum._link.Link`).
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
W = (link.inverse_derivative(link.link(mu)) ** 2) * get_one_over_variance(
self, link, mu, link.inverse(mu), dispersion, sample_weight
)
return _safe_sandwich_dot(X, W, intercept=fit_intercept)
def _observed_information(
self, link, X, y, mu, sample_weight, dispersion, fit_intercept
):
"""Compute the observed information matrix.
Parameters
----------
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
linpred = link.link(mu)
W = (
-link.inverse_derivative2(linpred) * (y - mu)
+ (link.inverse_derivative(linpred) ** 2)
* (
1
+ (y - mu) * self.unit_variance_derivative(mu) / self.unit_variance(mu)
)
) * get_one_over_variance(self, link, mu, linpred, dispersion, sample_weight)
return _safe_sandwich_dot(X, W, intercept=fit_intercept)
def _score_matrix(self, link, X, y, mu, sample_weight, dispersion, fit_intercept):
"""Compute the score.
Parameters
----------
X : array-like
Training data.
y : array-like
Target values.
mu : array-like
Predicted mean.
sample_weight : array-like
Weights or exposure to which variance is inversely proportional.
dispersion : float
The dispersion parameter.
fit_intercept : bool
Whether the model has an intercept.
"""
linpred = link.link(mu)
W = (
get_one_over_variance(self, link, mu, linpred, dispersion, sample_weight)
* link.inverse_derivative(linpred)
* (y - mu)
).reshape(-1, 1)
if fit_intercept:
if sparse.issparse(X):
return sparse.hstack((W, X.multiply(W)))
else:
return np.hstack((W, np.multiply(X, W)))
else:
if sparse.issparse(X):
return X.multiply(W)
else:
return np.multiply(X, W)
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
pearson_residuals = ((y - mu) ** 2) / self.unit_variance(mu)
if sample_weight is None:
numerator = pearson_residuals.sum()
else:
numerator = np.dot(pearson_residuals, sample_weight)
elif method == "deviance":
numerator = self.deviance(y, mu, sample_weight)
else:
raise NotImplementedError(f"Method {method} hasn't been implemented.")
if sample_weight is None:
return numerator / (len(y) - ddof)
else:
return numerator / (sample_weight.sum() - ddof)
class TweedieDistribution(ExponentialDispersionModel):
r"""A class for the Tweedie distribution.
A Tweedie distribution with mean :math:`\mu = \mathrm{E}(Y)` is uniquely
defined by its mean-variance relationship
:math:`\mathrm{var}(Y) \propto \mu^{\mathrm{power}}`.
Special cases are:
====== ================
Power Distribution
====== ================
0 Normal
1 Poisson
(1, 2) Compound Poisson
2 Gamma
3 Inverse Gaussian
====== ================
Parameters
----------
power : float, optional (default=0)
The variance power of the `unit_variance`
:math:`v(\mu) = \mu^{\mathrm{power}}`. For
:math:`0 < \mathrm{power} < 1`, no distribution exists.
"""
upper_bound = np.Inf
include_upper_bound = False
def __init__(self, power=0):
# validate power and set _upper_bound, _include_upper_bound attrs
self.power = power
@property
def lower_bound(self) -> Union[float, int]:
"""Return the lowest value of ``y`` allowed."""
if self.power <= 0:
return -np.Inf
if self.power >= 1:
return 0
raise ValueError
@property
def include_lower_bound(self) -> bool:
"""Return whether ``lower_bound`` is allowed as a value of ``y``."""
if self.power <= 0:
return False
if (self.power >= 1) and (self.power < 2):
return True
if self.power >= 2:
return False
raise ValueError
@property
def power(self) -> float:
"""Return the Tweedie power parameter."""
return self._power
@power.setter
def power(self, power):
if not isinstance(power, (int, float)):
raise TypeError(f"power must be an int or float, input was {power}")
if (power > 0) and (power < 1):
raise ValueError("For 0<power<1, no distribution exists.")
# Prevents upcasting when working with 32-bit data
self._power = power if isinstance(power, int) else np.float32(power)
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Compute the unit variance of a Tweedie distribution ``v(mu) = mu^power``.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
numpy.ndarray, shape (n_samples,)
"""
p = self.power # noqa: F841
return numexpr.evaluate("mu ** p")
def unit_variance_derivative(self, mu: np.ndarray) -> np.ndarray:
r"""Compute the derivative of the unit variance of a Tweedie distribution.
Equation: :math:`v(\mu) = p \times \mu^{(p-1)}`.
Parameters
----------
mu : array-like, shape (n_samples,)
Predicted mean.
Returns
-------
numpy.ndarray, shape (n_samples,)
"""
p = self.power # noqa: F841
return numexpr.evaluate("p * mu ** (p - 1)")
def deviance(self, y, mu, sample_weight=None) -> float:
"""Compute the deviance.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
"""
p = self.power
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
sample_weight = np.ones_like(y) if sample_weight is None else sample_weight
# NOTE: the dispersion parameter is only necessary to convey
# type information on account of a bug in Cython
if p == 0:
return normal_deviance(y, sample_weight, mu, dispersion=1.0)
if p == 1:
return poisson_deviance(y, sample_weight, mu, dispersion=1.0)
elif p == 2:
return gamma_deviance(y, sample_weight, mu, dispersion=1.0)
else:
return tweedie_deviance(y, sample_weight, mu, p=float(p))
def unit_deviance(self, y, mu):
"""Get the deviance of each observation."""
p = self.power
if p == 0: # Normal distribution
return (y - mu) ** 2
if p == 1: # Poisson distribution
return 2 * (special.xlogy(y, y / mu) - y + mu)
elif p == 2: # Gamma distribution
return 2 * (np.log(mu / y) + y / mu - 1)
else:
mu1mp = mu ** (1 - p)
return 2 * (
(np.maximum(y, 0) ** (2 - p)) / ((1 - p) * (2 - p))
- y * mu1mp / (1 - p)
+ mu * mu1mp / (2 - p)
)
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
f = None
if self.power == 0 and isinstance(link, IdentityLink):
f = normal_identity_rowwise_gradient_hessian
elif self.power == 1 and isinstance(link, LogLink):
f = poisson_log_rowwise_gradient_hessian
elif self.power == 2 and isinstance(link, LogLink):
f = gamma_log_rowwise_gradient_hessian
elif 1 < self.power < 2 and isinstance(link, LogLink):
f = partial(tweedie_log_rowwise_gradient_hessian, p=self.power)
if f is not None:
return f(y, sample_weight, eta, mu, gradient_rows, hessian_rows)
return super()._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
f = None
if self.power == 0 and isinstance(link, IdentityLink):
f = normal_identity_eta_mu_deviance
elif self.power == 1 and isinstance(link, LogLink):
f = poisson_log_eta_mu_deviance
elif self.power == 2 and isinstance(link, LogLink):
f = gamma_log_eta_mu_deviance
elif 1 < self.power < 2 and isinstance(link, LogLink):
f = partial(tweedie_log_eta_mu_deviance, p=self.power)
if f is not None:
return f(cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out, factor)
return super()._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
def log_likelihood(self, y, mu, sample_weight=None, dispersion=None) -> float:
r"""Compute the log likelihood.
For ``1 < power < 2``, we use the series approximation by Dunn and Smyth
(2005) to compute the normalization term.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
dispersion : float, optional (default=None)
Dispersion parameter :math:`\phi`. Estimated if ``None``.
"""
p = self.power
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
sample_weight = np.ones_like(y) if sample_weight is None else sample_weight
if (p != 1) and (dispersion is None):
dispersion = self.dispersion(y, mu, sample_weight)
if p == 0:
return normal_log_likelihood(y, sample_weight, mu, float(dispersion))
if p == 1:
# NOTE: the dispersion parameter is only necessary to convey
# type information on account of a bug in Cython
return poisson_log_likelihood(y, sample_weight, mu, 1.0)
elif p == 2:
return gamma_log_likelihood(y, sample_weight, mu, float(dispersion))
elif p < 2:
return tweedie_log_likelihood(
y, sample_weight, mu, float(p), float(dispersion)
)
else:
raise NotImplementedError
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
p = self.power # noqa: F841
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
formula = "((y - mu) ** 2) / (mu ** p)"
if sample_weight is None:
return numexpr.evaluate(formula).sum() / (len(y) - ddof)
else:
formula = f"sample_weight * {formula}"
return numexpr.evaluate(formula).sum() / (sample_weight.sum() - ddof)
return super().dispersion(
y, mu, sample_weight=sample_weight, ddof=ddof, method=method
)
class NormalDistribution(TweedieDistribution):
"""Class for the Normal (a.k.a. Gaussian) distribution."""
def __init__(self):
super().__init__(power=0)
class PoissonDistribution(TweedieDistribution):
"""Class for the scaled Poisson distribution."""
def __init__(self):
super().__init__(power=1)
class GammaDistribution(TweedieDistribution):
"""Class for the Gamma distribution."""
def __init__(self):
super().__init__(power=2)
class InverseGaussianDistribution(TweedieDistribution):
"""Class for the scaled Inverse Gaussian distribution."""
def __init__(self):
super().__init__(power=3)
class GeneralizedHyperbolicSecant(ExponentialDispersionModel):
"""A class for the Generalized Hyperbolic Secant (GHS) distribution.
The GHS distribution is for targets ``y`` in ``(-∞, +∞)``.
"""
lower_bound = -np.Inf
upper_bound = np.Inf
include_lower_bound = False
include_upper_bound = False
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level expected variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 1 + mu**2
def unit_variance_derivative(self, mu: np.ndarray) -> np.ndarray:
"""Get the derivative of the unit variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 2 * mu
def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level deviance.
See superclass documentation.
Parameters
----------
y : array-like
mu : array-like
Returns
-------
array-like
"""
return 2 * y * (np.arctan(y) - np.arctan(mu)) + np.log(
(1 + mu**2) / (1 + y**2)
)
class BinomialDistribution(ExponentialDispersionModel):
"""A class for the Binomial distribution.
The Binomial distribution is for targets ``y`` in ``[0, 1]``.
"""
lower_bound = 0
upper_bound = 1
include_lower_bound = True
include_upper_bound = True
def __init__(self):
return
def unit_variance(self, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level expected variance.
See superclass documentation.
Parameters
----------
mu : array-like
Returns
-------
array-like
"""
return mu * (1 - mu)
def unit_variance_derivative(self, mu):
"""Get the derivative of the unit variance.
See superclass documentation.
Parameters
----------
mu : array-like or float
Returns
-------
array-like
"""
return 1 - 2 * mu
def unit_deviance(self, y: np.ndarray, mu: np.ndarray) -> np.ndarray:
"""Get the unit-level deviance.
See superclass documentation.
Parameters
----------
y : array-like
mu : array-like
Returns
-------
array-like
"""
# see Wooldridge and Papke (1996) for the fractional case
return -2 * (special.xlogy(y, mu) + special.xlogy(1 - y, 1 - mu))
def _rowwise_gradient_hessian(
self, link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
):
if isinstance(link, LogitLink):
return binomial_logit_rowwise_gradient_hessian(
y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
return super()._rowwise_gradient_hessian(
link, y, sample_weight, eta, mu, gradient_rows, hessian_rows
)
def _eta_mu_deviance(
self,
link: Link,
factor: float,
cur_eta: np.ndarray,
X_dot_d: np.ndarray,
y: np.ndarray,
sample_weight: np.ndarray,
eta_out: np.ndarray,
mu_out: np.ndarray,
):
if isinstance(link, LogitLink):
return binomial_logit_eta_mu_deviance(
cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out, factor
)
return super()._eta_mu_deviance(
link, factor, cur_eta, X_dot_d, y, sample_weight, eta_out, mu_out
)
def log_likelihood(self, y, mu, sample_weight=None, dispersion=1) -> float:
"""Compute the log likelihood.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=1)
Sample weights.
dispersion : float, optional (default=1)
Ignored.
"""
ll = special.xlogy(y, mu) + special.xlogy(1 - y, 1 - mu)
return np.sum(ll) if sample_weight is None else np.dot(ll, sample_weight)
def dispersion(self, y, mu, sample_weight=None, ddof=1, method="pearson") -> float:
r"""Estimate the dispersion parameter :math:`\phi`.
Parameters
----------
y : array-like, shape (n_samples,)
Target values.
mu : array-like, shape (n_samples,)
Predicted mean.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights or exposure to which variance is inversely proportional.
ddof : int, optional (default=1)
Degrees of freedom consumed by the model for ``mu``.
method = {'pearson', 'deviance'}, optional (default='pearson')
Whether to base the estimate on the Pearson residuals or the deviance.
Returns
-------
float
"""
y, mu, sample_weight = _as_float_arrays(y, mu, sample_weight)
if method == "pearson":
formula = "((y - mu) ** 2) / (mu * (1 - mu))"
if sample_weight is None:
return numexpr.evaluate(formula).sum() / (len(y) - ddof)
else:
formula = f"sample_weight * {formula}"
return numexpr.evaluate(formula).sum() / (sample_weight.sum() - ddof)
return super().dispersion(
y, mu, sample_weight=sample_weight, ddof=ddof, method=method
)
def guess_intercept(
y: np.ndarray,
sample_weight: np.ndarray,
link: Link,
distribution: ExponentialDispersionModel,
eta: Union[np.ndarray, float] = None,
):
"""
Say we want to find the scalar `b` that minimizes ``LL(eta + b)``, with \
``eta`` fixed.
An exact solution exists for Tweedie distributions with a log link and for
the normal distribution with identity link. An exact solution also exists
for the case of logit with no offset.
If the distribution and corresponding link are something else, we use the
Tweedie or normal solution, depending on the link function.
"""
avg_y = np.average(y, weights=sample_weight)
if isinstance(link, IdentityLink):
# This is only correct for normal. For other distributions, answer is unknown,
# but assume that we want sum(y) = sum(mu)
if eta is None:
return avg_y
avg_eta = eta if np.isscalar(eta) else np.average(eta, weights=sample_weight)
return avg_y - avg_eta
elif isinstance(link, LogLink):
# This is only correct for Tweedie
log_avg_y = np.log(avg_y)
assert np.isfinite(log_avg_y).all()
if eta is None:
return log_avg_y
mu = np.exp(eta)
if isinstance(distribution, TweedieDistribution):
p = distribution.power
else:
p = 1 # Like Poisson
if np.isscalar(mu):
first = np.log(y.dot(sample_weight) * mu ** (1 - p))
second = np.log(sample_weight.sum() * mu ** (2 - p))
else:
first = np.log((y * mu ** (1 - p)).dot(sample_weight))
second = np.log((mu ** (2 - p)).dot(sample_weight))
return first - second
elif isinstance(link, LogitLink):
log_odds = np.log(avg_y) - np.log(np.average(1 - y, weights=sample_weight))
if eta is None:
return log_odds
avg_eta = eta if np.isscalar(eta) else np.average(eta, weights=sample_weight)
return log_odds - avg_eta
else:
return link.link(y.dot(sample_weight))
def get_one_over_variance(
distribution: ExponentialDispersionModel,
link: Link,
mu: np.ndarray,
eta: np.ndarray,
dispersion,
sample_weight: np.ndarray,
):
"""
Get one over the variance.
For Tweedie: ``sigma_inv = sample_weight / (mu ** p)`` during optimization,
because ``phi = 1``.
For Binomial with Logit link: Simplifies to
``variance = phi / ( sample_weight * (exp(eta) + 2 + exp(-eta)))``.
More numerically accurate.
"""
if isinstance(distribution, BinomialDistribution) and isinstance(link, LogitLink):
max_float_for_exp = np.log(np.finfo(eta.dtype).max / 10)
if np.any(np.abs(eta) > max_float_for_exp):
eta = np.clip(eta, -max_float_for_exp, max_float_for_exp) # type: ignore
return sample_weight * (np.exp(eta) + 2 + np.exp(-eta)) / dispersion
return 1.0 / distribution.variance(
mu, dispersion=dispersion, sample_weight=sample_weight
)
def _as_float_arrays(*args):
"""Convert to a float array, passing ``None`` through, and broadcast."""
never_broadcast = {} # type: ignore
maybe_broadcast = {}
always_broadcast = {}
for ix, arg in enumerate(args):
if isinstance(arg, (int, float)):
maybe_broadcast[ix] = np.array([arg], dtype="float")
elif arg is None:
never_broadcast[ix] = None
else:
always_broadcast[ix] = np.asanyarray(arg, dtype="float")
if always_broadcast and maybe_broadcast:
to_broadcast = {**always_broadcast, **maybe_broadcast}
_broadcast = np.broadcast_arrays(*to_broadcast.values())
broadcast = dict(zip(to_broadcast.keys(), _broadcast))
elif always_broadcast:
_broadcast = np.broadcast_arrays(*always_broadcast.values())
broadcast = dict(zip(always_broadcast.keys(), _broadcast))
else:
broadcast = maybe_broadcast # possibly `{}`
out = {**never_broadcast, **broadcast}
return [out[ix] for ix in range(len(args))]
|
[
"numpy.clip",
"scipy.special.xlogy",
"numpy.less_equal",
"numpy.log",
"numpy.asanyarray",
"numpy.array",
"numpy.isfinite",
"numpy.greater_equal",
"numpy.multiply",
"numpy.less",
"numpy.greater",
"numpy.isscalar",
"numpy.exp",
"numpy.dot",
"numpy.maximum",
"numpy.arctan",
"numpy.abs",
"numpy.average",
"scipy.sparse.issparse",
"numpy.finfo",
"numpy.ones_like",
"numpy.sum",
"numpy.empty_like",
"functools.partial",
"numexpr.evaluate",
"numpy.float32"
] |
[((34790, 34826), 'numpy.average', 'np.average', (['y'], {'weights': 'sample_weight'}), '(y, weights=sample_weight)\n', (34800, 34826), True, 'import numpy as np\n'), ((10487, 10509), 'numpy.empty_like', 'np.empty_like', (['cur_eta'], {}), '(cur_eta)\n', (10500, 10509), True, 'import numpy as np\n'), ((10527, 10549), 'numpy.empty_like', 'np.empty_like', (['cur_eta'], {}), '(cur_eta)\n', (10540, 10549), True, 'import numpy as np\n'), ((12227, 12244), 'numpy.empty_like', 'np.empty_like', (['mu'], {}), '(mu)\n', (12240, 12244), True, 'import numpy as np\n'), ((12268, 12285), 'numpy.empty_like', 'np.empty_like', (['mu'], {}), '(mu)\n', (12281, 12285), True, 'import numpy as np\n'), ((20726, 20753), 'numexpr.evaluate', 'numexpr.evaluate', (['"""mu ** p"""'], {}), "('mu ** p')\n", (20742, 20753), False, 'import numexpr\n'), ((21216, 21253), 'numexpr.evaluate', 'numexpr.evaluate', (['"""p * mu ** (p - 1)"""'], {}), "('p * mu ** (p - 1)')\n", (21232, 21253), False, 'import numexpr\n'), ((16425, 16443), 'scipy.sparse.issparse', 'sparse.issparse', (['X'], {}), '(X)\n', (16440, 16443), False, 'from scipy import sparse, special\n'), ((16606, 16624), 'scipy.sparse.issparse', 'sparse.issparse', (['X'], {}), '(X)\n', (16621, 16624), False, 'from scipy import sparse, special\n'), ((20313, 20330), 'numpy.float32', 'np.float32', (['power'], {}), '(power)\n', (20323, 20330), True, 'import numpy as np\n'), ((21765, 21780), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (21777, 21780), True, 'import numpy as np\n'), ((25551, 25566), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (25563, 25566), True, 'import numpy as np\n'), ((29711, 29747), 'numpy.log', 'np.log', (['((1 + mu ** 2) / (1 + y ** 2))'], {}), '((1 + mu ** 2) / (1 + y ** 2))\n', (29717, 29747), True, 'import numpy as np\n'), ((32648, 32668), 'scipy.special.xlogy', 'special.xlogy', (['y', 'mu'], {}), '(y, mu)\n', (32661, 32668), False, 'from scipy import sparse, special\n'), ((32671, 32699), 'scipy.special.xlogy', 'special.xlogy', (['(1 - y)', '(1 - mu)'], {}), '(1 - y, 1 - mu)\n', (32684, 32699), False, 'from scipy import sparse, special\n'), ((32715, 32725), 'numpy.sum', 'np.sum', (['ll'], {}), '(ll)\n', (32721, 32725), True, 'import numpy as np\n'), ((32756, 32781), 'numpy.dot', 'np.dot', (['ll', 'sample_weight'], {}), '(ll, sample_weight)\n', (32762, 32781), True, 'import numpy as np\n'), ((35079, 35095), 'numpy.isscalar', 'np.isscalar', (['eta'], {}), '(eta)\n', (35090, 35095), True, 'import numpy as np\n'), ((35101, 35139), 'numpy.average', 'np.average', (['eta'], {'weights': 'sample_weight'}), '(eta, weights=sample_weight)\n', (35111, 35139), True, 'import numpy as np\n'), ((35270, 35283), 'numpy.log', 'np.log', (['avg_y'], {}), '(avg_y)\n', (35276, 35283), True, 'import numpy as np\n'), ((35395, 35406), 'numpy.exp', 'np.exp', (['eta'], {}), '(eta)\n', (35401, 35406), True, 'import numpy as np\n'), ((35559, 35574), 'numpy.isscalar', 'np.isscalar', (['mu'], {}), '(mu)\n', (35570, 35574), True, 'import numpy as np\n'), ((36941, 36992), 'numpy.clip', 'np.clip', (['eta', '(-max_float_for_exp)', 'max_float_for_exp'], {}), '(eta, -max_float_for_exp, max_float_for_exp)\n', (36948, 36992), True, 'import numpy as np\n'), ((37508, 37538), 'numpy.array', 'np.array', (['[arg]'], {'dtype': '"""float"""'}), "([arg], dtype='float')\n", (37516, 37538), True, 'import numpy as np\n'), ((16704, 16721), 'numpy.multiply', 'np.multiply', (['X', 'W'], {}), '(X, W)\n', (16715, 16721), True, 'import numpy as np\n'), ((17841, 17881), 'numpy.dot', 'np.dot', (['pearson_residuals', 'sample_weight'], {}), '(pearson_residuals, sample_weight)\n', (17847, 17881), True, 'import numpy as np\n'), ((31081, 31101), 'scipy.special.xlogy', 'special.xlogy', (['y', 'mu'], {}), '(y, mu)\n', (31094, 31101), False, 'from scipy import sparse, special\n'), ((31104, 31132), 'scipy.special.xlogy', 'special.xlogy', (['(1 - y)', '(1 - mu)'], {}), '(1 - y, 1 - mu)\n', (31117, 31132), False, 'from scipy import sparse, special\n'), ((36889, 36900), 'numpy.abs', 'np.abs', (['eta'], {}), '(eta)\n', (36895, 36900), True, 'import numpy as np\n'), ((37653, 37686), 'numpy.asanyarray', 'np.asanyarray', (['arg'], {'dtype': '"""float"""'}), "(arg, dtype='float')\n", (37666, 37686), True, 'import numpy as np\n'), ((2996, 3033), 'numpy.greater_equal', 'np.greater_equal', (['x', 'self.lower_bound'], {}), '(x, self.lower_bound)\n', (3012, 3033), True, 'import numpy as np\n'), ((3055, 3089), 'numpy.less_equal', 'np.less_equal', (['x', 'self.upper_bound'], {}), '(x, self.upper_bound)\n', (3068, 3089), True, 'import numpy as np\n'), ((3186, 3223), 'numpy.greater_equal', 'np.greater_equal', (['x', 'self.lower_bound'], {}), '(x, self.lower_bound)\n', (3202, 3223), True, 'import numpy as np\n'), ((3225, 3253), 'numpy.less', 'np.less', (['x', 'self.upper_bound'], {}), '(x, self.upper_bound)\n', (3232, 3253), True, 'import numpy as np\n'), ((3386, 3417), 'numpy.greater', 'np.greater', (['x', 'self.lower_bound'], {}), '(x, self.lower_bound)\n', (3396, 3417), True, 'import numpy as np\n'), ((3419, 3453), 'numpy.less_equal', 'np.less_equal', (['x', 'self.upper_bound'], {}), '(x, self.upper_bound)\n', (3432, 3453), True, 'import numpy as np\n'), ((3549, 3580), 'numpy.greater', 'np.greater', (['x', 'self.lower_bound'], {}), '(x, self.lower_bound)\n', (3559, 3580), True, 'import numpy as np\n'), ((3582, 3610), 'numpy.less', 'np.less', (['x', 'self.upper_bound'], {}), '(x, self.upper_bound)\n', (3589, 3610), True, 'import numpy as np\n'), ((29679, 29691), 'numpy.arctan', 'np.arctan', (['y'], {}), '(y)\n', (29688, 29691), True, 'import numpy as np\n'), ((29694, 29707), 'numpy.arctan', 'np.arctan', (['mu'], {}), '(mu)\n', (29703, 29707), True, 'import numpy as np\n'), ((35299, 35321), 'numpy.isfinite', 'np.isfinite', (['log_avg_y'], {}), '(log_avg_y)\n', (35310, 35321), True, 'import numpy as np\n'), ((35938, 35951), 'numpy.log', 'np.log', (['avg_y'], {}), '(avg_y)\n', (35944, 35951), True, 'import numpy as np\n'), ((36080, 36096), 'numpy.isscalar', 'np.isscalar', (['eta'], {}), '(eta)\n', (36091, 36096), True, 'import numpy as np\n'), ((36102, 36140), 'numpy.average', 'np.average', (['eta'], {'weights': 'sample_weight'}), '(eta, weights=sample_weight)\n', (36112, 36140), True, 'import numpy as np\n'), ((36841, 36860), 'numpy.finfo', 'np.finfo', (['eta.dtype'], {}), '(eta.dtype)\n', (36849, 36860), True, 'import numpy as np\n'), ((37059, 37071), 'numpy.exp', 'np.exp', (['(-eta)'], {}), '(-eta)\n', (37065, 37071), True, 'import numpy as np\n'), ((16557, 16574), 'numpy.multiply', 'np.multiply', (['X', 'W'], {}), '(X, W)\n', (16568, 16574), True, 'import numpy as np\n'), ((22569, 22593), 'scipy.special.xlogy', 'special.xlogy', (['y', '(y / mu)'], {}), '(y, y / mu)\n', (22582, 22593), False, 'from scipy import sparse, special\n'), ((23490, 23549), 'functools.partial', 'partial', (['tweedie_log_rowwise_gradient_hessian'], {'p': 'self.power'}), '(tweedie_log_rowwise_gradient_hessian, p=self.power)\n', (23497, 23549), False, 'from functools import partial\n'), ((24465, 24515), 'functools.partial', 'partial', (['tweedie_log_eta_mu_deviance'], {'p': 'self.power'}), '(tweedie_log_eta_mu_deviance, p=self.power)\n', (24472, 24515), False, 'from functools import partial\n'), ((35961, 36001), 'numpy.average', 'np.average', (['(1 - y)'], {'weights': 'sample_weight'}), '(1 - y, weights=sample_weight)\n', (35971, 36001), True, 'import numpy as np\n'), ((37041, 37052), 'numpy.exp', 'np.exp', (['eta'], {}), '(eta)\n', (37047, 37052), True, 'import numpy as np\n'), ((22671, 22685), 'numpy.log', 'np.log', (['(mu / y)'], {}), '(mu / y)\n', (22677, 22685), True, 'import numpy as np\n'), ((27405, 27430), 'numexpr.evaluate', 'numexpr.evaluate', (['formula'], {}), '(formula)\n', (27421, 27430), False, 'import numexpr\n'), ((27551, 27576), 'numexpr.evaluate', 'numexpr.evaluate', (['formula'], {}), '(formula)\n', (27567, 27576), False, 'import numexpr\n'), ((33814, 33839), 'numexpr.evaluate', 'numexpr.evaluate', (['formula'], {}), '(formula)\n', (33830, 33839), False, 'import numexpr\n'), ((33960, 33985), 'numexpr.evaluate', 'numexpr.evaluate', (['formula'], {}), '(formula)\n', (33976, 33985), False, 'import numexpr\n'), ((22790, 22806), 'numpy.maximum', 'np.maximum', (['y', '(0)'], {}), '(y, 0)\n', (22800, 22806), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import collections
# import itertools
import numpy as np
# from sklearn import linear_model as linear # for VAR
# from .utils import sliding_window as window
# from .utils.distance import kmeans, dists_sq
# from .utils import distance as dist
# from python import compress
# ================================================================ shifts lut
SHIFT_PAIRS_16 = [
(7, 1), # ~0 - .5 = ~-.5
(3, 1), # .125 - .5 = -.375
(2, 1), # .25 - .5 = -.25
# (4, 2), # .0625 - .25 = -.1875
(3, 2), # .125 - .5 = -.125
(4, 3), # .0625 - .125 = -.0625
(0, 0), # 1 - 1 = 0
(3, 4), # .125 - .0625 = .0625
(2, 3), # .25 - .125 - .125
(2, 4), # .25 - .0625 = .1875
(1, 2), # .5 - .25 = .25
(1, 3), # .5 - .125 = .375
(0, 1), # 1 - .5 = .5
(0, 2), # 1 - .25 = .75
(0, 3), # 1 - .125 = .875
(0, 4), # 1 - .0625 = .9375
(0, 7), # 1 - ~0 = ~1
]
# should be equivalent to `all_shifts(max_shift=5, omit_duplicates=True)`
# EDIT: wait, no, not true because we have shifts of 7 at the ends
SHIFT_PAIRS_26 = [
(7, 1), # ~0 - .5 = ~-.5
(5, 1), # .0625 - .5 = -.46875 # added
(4, 1), # .0625 - .5 = -.4375 # added, max 4
(3, 1), # .125 - .5 = -.375
(2, 1), # .25 - .5 = -.25
(5, 2), # .03125- .25 = -.21875
(4, 2), # .0625 - .25 = -.1875 # added, max 4
(3, 2), # .125 - .25 = -.125
(5, 3), # .03125- .125 = -.09375 # added
(4, 3), # .0625 - .125 = -.0625
(5, 4), # .03125- .0625 = -.03125 # added
(0, 0), # 1 - 1 = 0
(4, 5), # .0625 - .03125= .03125
(3, 4), # .125 - .0625 = .0625
(3, 5), # .125 - .03125= .09375 # added
(2, 3), # .25 - .125 - .125
(2, 4), # .25 - .0625 = .1875
(2, 5), # .25 - .03125= .21875 # added
(1, 2), # .5 - .25 = .25
(1, 3), # .5 - .125 = .375
(1, 4), # .5 - .0625 = .4375 # added, max 4
(1, 5), # .5 - .03125= .46875 # added
(0, 1), # 1 - .5 = .5
(0, 2), # 1 - .25 = .75
(0, 3), # 1 - .125 = .875
(0, 4), # 1 - .0625 = .9375
(0, 5), # 1 - .03125= .96875 # added
(0, 7), # 1 - ~0 = ~1
]
def all_shifts(max_shift=-1, omit_duplicates=True):
vals = {}
nbits = 8
x = 1 << nbits # reference val; 256 for nbits
if max_shift < 0:
max_shift = nbits - 1
if omit_duplicates:
vals[(0, 0)] = 0
for a in range(max_shift + 1):
for b in range(max_shift + 1):
if omit_duplicates and a == b:
continue
vals[(a, b)] = (x >> a) - (x >> b)
keys, coeffs = list(zip(*list(vals.items())))
keys = np.array(keys)
coeffs = np.array(coeffs)
order = np.argsort(coeffs)
# print "shift results:"
# print keys[order]
# print coeffs[order]
return keys[order], coeffs[order]
# okay, looks like (according to test immediately below) these values are
# identical to what's in our existing LUT; this makes sense given that impls
# are basically identical
def _i16_for_shifts(pos_shift, neg_shift, nbits=8):
start_val = 1 << nbits # 256 for nbits = 8
return (start_val >> pos_shift) - (start_val >> neg_shift)
# TODO actual unit test
def _test_shift_coeffs(nbits=8):
shifts, shift_coeffs = all_shifts()
for (pos_shift, neg_shift), coeff in zip(shifts, shift_coeffs):
assert _i16_for_shifts(pos_shift, neg_shift) == coeff
for val in range(-128, 128):
two_shifts_val = (val >> pos_shift) - (val >> neg_shift)
# ya, this fails; multiply and rshift != using shifts directly
# assert (val * coeff) >> nbits == two_shifts_val
# this way works; requires two multiplies though...
pos_coef = 1 << (nbits - pos_shift)
neg_coef = 1 << (nbits - neg_shift)
pos = (val * pos_coef) >> nbits
neg = (val * neg_coef) >> nbits
assert pos - neg == two_shifts_val
# this way also fails
# pos = val * pos_coef
# neg = val * neg_coef
# assert (pos - neg) >> nbits == two_shifts_val
# def coeff_lut():
# """create lookup table `T` such that `T[coeff]` yields the two indices
# whose associated coefficients are immediately above and below `coeff`"""
# shifts, shift_coeffs = all_shifts()
SHIFTS, SHIFT_COEFFS = all_shifts()
# ================================================================ funcs
def binary_search(array, val):
M = len(array)
first = 0
middle = int(M / 2)
last = M - 1
while (first <= last):
middle_val = array[middle]
if middle_val < val:
first = middle + 1
elif middle_val == val:
return middle
else: # middle_val > val
last = middle - 1
middle = int((first + last) / 2)
return middle
class OnlineRegressor(object):
def __init__(self, block_sz=8, verbose=0, method='linreg',
shifts=SHIFTS, shift_coeffs=SHIFT_COEFFS, numbits=8, ntaps=1):
# self.prev0 = 0
# self.prev1 = 0
# self.mod = 1 << nbits
# self.shift0 = 0
# self.shift1 = 1
self.block_sz = block_sz
self.verbose = verbose
self.method = method
self.shifts = shifts
self.shift_coeffs = shift_coeffs
self.numbits = numbits
self.ntaps = ntaps
self.last_val = 0
self.last_delta = 0
self.coef = 0
self.coef = 256
self.counter = 0
# self.counter = 256 << (1 + self.numbits - 8) # TODO indirect to learning rate, not just 1 # noqa
# self.counter = 8 << 1 # equivalent to adding 8 to round to nearest?
# self.counter = self.coef
self.t = 0
self.grad_counter = 0
self.offset = 0
self.offset_counter = 0
shift_by = (1 + self.numbits - 8)
self.coeffs = np.zeros(self.ntaps, dtype=np.int32) + 256
self.counters = np.zeros(self.ntaps, dtype=np.int32) + (256 << shift_by)
# self.approx_256_over_x = 1
self.Sxy = 0
self.Sxx = 0
self.errs = []
# print "using shifts, coeffs:"
# print shifts
# print shift_coeffs
# for logging
# self.best_idx_offset_counts = np.zeros(3, dtype=np.int64)
self.best_idx_counts = np.zeros(len(self.shifts), dtype=np.int64)
# counts_len = len(self.shifts) if method == 'linreg' else 512
# self.best_idx_counts = np.zeros(counts_len, dtype=np.int64)
self.best_coef_counts = collections.Counter()
self.best_offset_counts = collections.Counter()
def feed_group(self, group):
pass # TODO determine optimal filter here
# errhat = a*x0 - b*x0 - a*x1 + b*x1
# = a(x0 - x1) + b(x1 - x0)
# = c(x0 - x1), where c = (a - b)
#
# we should compute c, and find shifts (which correspond to a, b) that
# approximate it well; also note that errhat is prediction of the delta
#
# this is just linear regression between (x0 - x1) and new val, with
# some extra logic at the end to get shifts based on regression coeff
# deltas; these are our target variable
deltas = np.zeros(group.size, dtype=group.dtype)
deltas[1:] = group[1:] - group[:-1]
deltas[0] = group[0] - self.last_val
self.last_val = group[-1]
# deltas from previous time step; these are our indep variable
diffs = np.zeros(group.size, dtype=group.dtype)
diffs[1:] = deltas[:-1]
diffs[0] = self.last_delta
self.last_delta = deltas[-1]
x = diffs
y = deltas
# linear regression
if self.method == 'linreg':
Sxy = np.sum(x * y)
Sxx = np.sum(x * x)
# print "x, y dtypes: ", x.dtype, y.dtype
# print "Sxx, Sxy dtypes: ", Sxx.dtype, Sxy.dtype
coeff = (Sxy << 8) / Sxx # shift to mirror what we'll need to do in C
idx = binary_search(self.shift_coeffs, coeff)
def compute_errs(x, y, shifts):
predictions = (x >> shifts[0]) - (x >> shifts[1])
return y - predictions
# These are commented out because, empirically, they're
# *never* chosen
#
# best_idx_offset = 0
#
# def compute_total_cost(errs, block_sz=self.block_sz):
# raw_costs = compress.nbits_cost(errs)
# block_costs_rows = raw_costs.reshape(-1, block_sz)
# block_costs = np.max(block_costs_rows, axis=1)
# return np.sum(block_costs)
#
# cost = compute_total_cost(errs)
# if idx > 0:
# errs2 = compute_errs(x, y, SHIFTS[idx - 1])
# cost2 = compute_total_cost(errs)
# if cost2 < cost:
# ret = errs2
# best_idx_offset = -1
# if idx < (len(SHIFTS) - 1):
# errs3 = compute_errs(x, y, SHIFTS[idx + 1])
# cost3 = compute_total_cost(errs)
# if cost3 < cost:
# ret = errs3
# best_idx_offset = 1
# self.best_idx_offset_counts[best_idx_offset] += 1
errs = compute_errs(x, y, self.shifts[idx])
self.best_idx_counts[idx] += 1 # for logging
elif self.method == 'gradient':
# update coeffs using last entry in each block
# learning_rate_shift = 7 # learning rate of 2^(-learning_rate_shift)
# learning_rate_shift = 8 # learning rate of 2^(-learning_rate_shift)
# learning_rate_shift = 12 # learning rate of 2^(-learning_rate_shift)
# learning_rate_shift = 4 # learning rate of 2^(-learning_rate_shift)
# learning_rate_shift = 2 # learning rate of 2^(-learning_rate_shift)
predictions = (x * self.coef) >> int(min(self.numbits, 8))
for tap_idx in range(1, self.ntaps):
predictions[tap_idx:] += (x[:-tap_idx] * self.coeffs[tap_idx])
predictions += self.offset
errs = y - predictions
for b in range(8): # for each block
# only update based on a few values for efficiency
which_idxs = 8 * b + np.array([3, 7]) # downsample by 4
# which_idxs = 8 * b + np.array([1, 3, 5, 7]) # downsample by 2
grads = 0
# grads = np.zeros(self.ntaps)
# offsets = 0
for idx in which_idxs:
xval = x[idx]
# xval = x[idx] >> (self.numbits - 8)
# grad = int(-errs[idx] * x[idx]) >> 8
# grad = int(-errs[idx] * x[idx]) // 256
# y0 = np.abs(self.approx_256_over_x) * np.sign(xval)
# y0 = 1 + (256 - xval) >> 8
# y0 = 3 - ((3 * xval) >> 8)
# grad = int(-(errs[idx] << 8) / xval) if xval != 0 else 0 # works great
# self.counter -= grad # equivalent to above two lines
# if self.t % 100 == 0:
# print "grad:", grad
# continue
# # xabs = self.t # TODO rm
# xabs = np.abs(xval)
# if xabs == 0:
# lzcnt = self.numbits
# else:
# lzcnt = self.numbits - 1 - int(np.log2(xabs))
# lzcnt = max(0, lzcnt - 1) # round up to nearest power of 2
# # lzcnt = min(15, lzcnt + 1) # round up to nearest power of 2
# # numerator = 1 << self.numbits
# # recip = 1 << (lzcnt - 8) if lzcnt >= 8 else
# # recip = np.sign(xval) << (8 + lzcnt)
# shift_amt = max(0, lzcnt - (self.numbits - 8)) # usually 0, maybe 1 sometimes
# recip = (1 << shift_amt) * np.sign(xval)
# grad = int(-errs[idx] * recip)
# # grad = int(grad / len(which_idxs))
# normal grad descent
# grad = int(-errs[idx] * np.sign(xval)) # div by sqrt(hessian)
# grad = int(-errs[idx] * xval) >> self.numbits # true gradient
# approx newton step for log(nbits)
err = errs[idx]
# if False: # TODO rm
# if self.numbits > 8:
# grad = int(-(1 + err)) if err > 0 else int(-(err - 1))
# else:
# grad = int(-err) # don't add 1
# self.grad_counter += (grad - (self.grad_counter >> 8))
# wtf this works so well for 16b, despite ignoring sign of x...
# (when also only shifting counter by learning rate, not
# an additional 8)
# grad = -err
# grad = -(err + np.sign(err)) * np.sign(xval)
# grad = -err * np.sign(xval)
# these both seem to work pretty well; prolly need to directly
# compare them
# grad = -err * np.sign(xval)
# grad = -np.sign(err) * xval # significantly better than prev line
grad = np.sign(err) * xval # significantly better than prev line
# ^ duh; above is minimizer for L1 loss
# grad = -np.sign(err) * np.sign(xval) << (self.numbits - 8)
# sub_from = ((1 << self.numbits) - 1) * np.sign(xval)
# approx_recip_x = sub_from - xval
# grad = -np.sign(err) * approx_recip_x
grads += int(grad)
# grads += grad >> 1 # does this help with overflow?
# simulate int8 overflow, adjusted for fact that we do 8 blocks
# per group (so 1024, 2048 instead of 128, 256)
mod = int(1 << self.numbits)
offset = mod // 2
grads = ((grads + offset) % mod) - offset
# grads = ((grads + 1024) % 2048) - 1024 # wrecks accuracy
# grads = ((grads + 8192) % 16384) - 8192 # no effect
self.errs.append(err)
# offsets += np.sign(err) # optimize bias for l1 loss
# this is the other one we should actually consider doing
#
# grad = int(-errs[idx] * np.sign(xval))
# # approximation of what we'd end up doing with a LUT
# shift_to_just_4b = self.numbits - 4
# # y0 = ((xval >> shift_to_just_4b) + 1) << shift_to_just_4b
# shifted_xval = xval >> shift_to_just_4b
# if shifted_xval != 0:
# y0 = int(256. / shifted_xval) << shift_to_just_4b
# else:
# y0 = 16*np.sign(xval) << shift_to_just_4b
# # y0 = y0 * int(2 - (xval * y0 / 256)) # diverges
# y0 = int(256. / xval) if xval else 0
# y0 = (1 << int(8 - np.floor(np.log2(xval)))) * np.sign(xval)
# y0 = 4 * np.sign(xval)
# self.approx_256_over_x = int( y0*(2 - (int(xval*y0) >> 8)) ) # noqa # doesn't work
# grad = int(-errs[idx] * self.approx_256_over_x)
# grad = int(-errs[idx] * y0)
# grad = int(-errs[idx] * xval) # works
# grad = int(-errs[idx] * 2*np.sign(xval))
# this_best_coef = self.coef - grad
# self.counter += this_best_coef - self.coef
# self.counter -= grad # equivalent to above two lines
# self.counter -= grad >> learning_rate_shift
# if self.t < 8:
# if self.t % 50 == 0:
# if (self.t < 5 == 0) and (b == 0):
# if (self.t % 50 == 0) and (b == 0):
# # print "errs: ", errs[-7], errs[-5], errs[-3], errs[-1]
# print "t, b = ", self.t, b
# print "errs: ", errs[-10:]
# print "xs: ", x[-10:]
# # print "sum(|xs|)", np.sum(np.abs(x))
# print "grads: ", grads
# print "counter:", self.counter
# # print "grad counter:", self.grad_counter
# # # print "recip, grad: ", recip, grad
# self.coef = self.counter >> min(self.t, learning_rate_shift)
# self.coef = self.counter >> learning_rate_shift
learning_rate_shift = 1
# learning_rate_shift = 4
# grad_learning_shift = 1
# grad_learning_shift = 4
# offset_learning_shift = 4
# compute average gradient for batch
# grad = int(4 * grads / len(which_idxs)) # div by 16
grad = int(grads / len(which_idxs)) # div by 64
# grad = grads
# self.grad_counter += grad - (self.grad_counter >> grad_learning_shift)
# self.grad_counter += grad
#
# this is the pair of lines that we know works well for UCR
#
# self.counter -= grad
self.counter += grad
self.coef = self.counter >> (learning_rate_shift + (self.numbits - 8))
# self.coef = self.counter >> learning_rate_shift
# self.coef -= (self.grad_counter >> grad_learning_shift) >> learning_rate_shift
# learn_shift = int(min(learning_rate_shift, np.log2(self.t + 1)))
# self.coef = self.counter >> (learn_shift + (self.numbits - 8))
# self.coef = self.counter >> learn_shift # for use with l1 loss
# self.coef -= (self.grad_counter >> grad_learning_shift) >> learn_shift
# self.coef -= (self.grad_counter >> grad_learning_shift) >> learning_rate_shift
# self.coef = 192 # global soln for olive oil
# quantize coeff by rounding to nearest 16; this seems to help
# quite a bit, at least for stuff that really should be double
# delta coded (starlight curves, presumably timestamps)
# self.coef = ((self.coef + 8) >> 4) << 4
self.coef = (self.coef >> 4) << 4 # just round towards 0
# self.coef = (self.coef >> 5) << 5 # just round towards 0
# like above, but use sign since shift and unshift round towards 0
# EDIT: no apparent difference, though perhaps cuz almost nothing
# actually wants a negative coef
# self.coef = ((self.coef + 8 * np.sign(self.coef)) >> 4) << 4
# offset = int(offsets / len(which_idxs)) # div by 64
# self.offset_counter += offset
# # self.offset = self.offset_counter >> offset_learning_shift
# self.offset = 0 # offset doesn't seem to help at all
# self.coef = 0 # why are estimates biased? TODO rm
# self.coef = 256
# self.coef = self.counter
# self.coef = np.clip(self.coef, -256, 256) # apparently important
# self.coef = np.clip(self.coef, -128, 256) # apparently important
# if self.t < 8:
# if self.t % 100 == 0:
# print "----- t = {}".format(self.t)
# print "offset, offset counter: ", self.offset, self.offset_counter
# # print "grad, grads sum: ", grad, grads
# # print "learn shift: ", learn_shift
# # print "errs[:10]: ", errs[:16]
# # print "-grads[:10]: ", errs[:16] * x[:16]
# # print "signed errs[:10]: ", errs[:16] * np.sign(x[:16])
# print "new coeff, grad_counter, counter = ", self.coef, self.grad_counter, self.counter
# # print "new coeff, grad counter = ", self.coef, self.grad_counter
# self.best_idx_counts[self.coef] += 1 # for logging
self.best_coef_counts[self.coef] += 1
self.best_offset_counts[self.offset] += 1
# errs -= self.offset # do this at the end to not mess up training
elif self.method == 'exact':
# print "using exact method"
if self.numbits <= 8:
predictions = (x * self.coef) >> self.numbits
else:
predictions = ((x >> 8) * self.coef)
errs = y - predictions
learn_shift = 6
# shift = learn_shift + 2*self.numbits - 8
shift = learn_shift
# only update based on a few values for efficiency
start_idx = 0 if self.t > 0 else 8
for idx in np.arange(start_idx, len(x), 8):
# xval = x[idx] # >> (self.numbits - 8)
# yval = y[idx] # >> (self.numbits - 8)
xval = x[idx] >> (self.numbits - 8)
yval = y[idx] >> (self.numbits - 8)
# # this way works just like global one, or maybe better
# self.Sxx += xval * xval
# self.Sxy += xval * yval
# moving average way; seemingly works just as well
# Exx = self.Sxx >> learn_shift
# Exy = self.Sxy >> learn_shift
Exy = self.Sxy >> shift
Exx = self.Sxx >> shift
# adjust_shift = 2 *
diff_xx = (xval * xval) - Exx
diff_xy = (xval * yval) - Exy
self.Sxx += diff_xx
self.Sxy += diff_xy
# if min(self.Sxy, self.Sxx) >= 1024:
# self.Sxx /= 2
# self.Sxy /= 2
Exy = self.Sxy >> shift
Exx = self.Sxx >> shift
self.coef = int((Exy << 8) / Exx) # works really well
# none of this really works
# # print "Exy, Exx = ", Exy, Exx
# print "xval, yval: ", xval, yval
# print "diff_xx, diff_xy, Exy, Exx = ", diff_xx, diff_xy, Exy, Exx
# # numerator = 1 << (2 * self.numbits)
# numerator = 256
# nbits = int(min(4, np.log2(Exx))) if Exx > 1 else 1
# assert numerator >= np.abs(Exx)
# # print "nbits: ", nbits
# recip = int((numerator >> nbits) / (Exx >> nbits)) << nbits
# # recip = recip >> (2 * self.numbits - 8)
# print "numerator, recip: ", numerator, recip
# self.coef = int(Exy * recip)
self.best_coef_counts[self.coef] += 1
self.t += 1
return errs
# while (first <= last) {
# if (array[middle] < search)
# first = middle + 1;
# else if (array[middle] == search) {
# printf("%d found at location %d.\n", search, middle+1);
# break;
# }
# else
# last = middle - 1;
# middle = (first + last)/2;
# }
def sub_online_regress(blocks, verbose=0, group_sz_blocks=8, max_shift=4,
only_16_shifts=True, method='linreg', numbits=8,
drop_first_half=False, **sink):
# drop_first_half=True, **sink):
blocks = blocks.astype(np.int32)
if only_16_shifts:
shifts = SHIFT_PAIRS_16
shift_coeffs = [_i16_for_shifts(*pair) for pair in shifts]
else:
shifts, shift_coeffs = all_shifts(max_shift=max_shift)
encoder = OnlineRegressor(block_sz=blocks.shape[1], verbose=verbose,
shifts=shifts, shift_coeffs=shift_coeffs,
method=method, numbits=numbits)
# print "using group_sz_blocks: ", group_sz_blocks
# print "using method: ", method
# print "using nbits: ", numbits
out = np.empty(blocks.shape, dtype=np.int32)
if group_sz_blocks < 1:
group_sz_blocks = len(blocks) # global model
ngroups = int(len(blocks) / group_sz_blocks)
for g in range(ngroups):
# if verbose and (g > 0) and (g % 100 == 0):
# print "running on block ", g
start_idx = g * group_sz_blocks
end_idx = start_idx + group_sz_blocks
group = blocks[start_idx:end_idx]
errs = encoder.feed_group(group.ravel())
out[start_idx:end_idx] = errs.reshape(group.shape)
out[end_idx:] = blocks[end_idx:]
if verbose > 1:
if method == 'linreg':
if group_sz_blocks != len(blocks):
import hipsterplot as hp # pip install hipsterplot
# hp.plot(x_vals=encoder.shift_coeffs, y_vals=encoder.best_idx_counts,
hp.plot(encoder.best_idx_counts,
num_x_chars=len(encoder.shift_coeffs), num_y_chars=12)
else:
coef_idx = np.argmax(encoder.best_idx_counts)
coef = encoder.shift_coeffs[coef_idx]
print("global linreg coeff: ", coef)
else:
coeffs_counts = np.array(encoder.best_coef_counts.most_common())
print("min, max coeff: {}, {}".format(
coeffs_counts[:, 0].min(), coeffs_counts[:, 0].max()))
# print("most common (coeff, counts):\n", coeffs_counts[:16])
# bias_counts = np.array(encoder.best_offset_counts.most_common())
# print "most common (bias, counts):\n", bias_counts[:16]
errs = np.array(encoder.errs)
print("raw err mean, median, std, >0 frac: {}, {}, {}, {}".format(
errs.mean(), np.median(errs), errs.std(), np.mean(errs > 0)))
if drop_first_half and method == 'gradient':
keep_idx = len(out) // 2
out[:keep_idx] = out[keep_idx:(2*keep_idx)]
print("NOTE: duplicating second half of data into first half!!" \
" (blocks {}:)".format(keep_idx))
return out
def _test_moving_avg(x0=0):
# vals = np.zeros(5, dtype=np.int32) + 100
vals = np.zeros(5, dtype=np.int32) - 100
shft = 3
counter = x0 << shft
xhats = []
for v in vals:
xhat = counter >> shft
xhats.append(xhat)
counter += (v - xhat)
print("vals: ", vals)
print("xhats: ", xhats)
# ================================================================ main
def main():
np.set_printoptions(formatter={'float': lambda x: '{:.3f}'.format(x)})
# print "all shifts:\n", all_shifts()
# _test_shift_coeffs()
_test_moving_avg()
# print "shifts_16, coeffs"
# print SHIFT_PAIRS_16
# print [_i16_for_shifts(*pair) for pair in SHIFT_PAIRS_16]
# x = np.array([5], dtype=np.int32)
# print "shifting x left: ", x << 5
# blocks = np.arange(8 * 64, dtype=np.int32).reshape(-1, 8)
# sub_online_regress(blocks)
if __name__ == '__main__':
main()
|
[
"numpy.mean",
"numpy.median",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"collections.Counter",
"numpy.empty",
"numpy.sum",
"numpy.sign"
] |
[((3011, 3025), 'numpy.array', 'np.array', (['keys'], {}), '(keys)\n', (3019, 3025), True, 'import numpy as np\n'), ((3039, 3055), 'numpy.array', 'np.array', (['coeffs'], {}), '(coeffs)\n', (3047, 3055), True, 'import numpy as np\n'), ((3068, 3086), 'numpy.argsort', 'np.argsort', (['coeffs'], {}), '(coeffs)\n', (3078, 3086), True, 'import numpy as np\n'), ((24700, 24738), 'numpy.empty', 'np.empty', (['blocks.shape'], {'dtype': 'np.int32'}), '(blocks.shape, dtype=np.int32)\n', (24708, 24738), True, 'import numpy as np\n'), ((6931, 6952), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (6950, 6952), False, 'import collections\n'), ((6987, 7008), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (7006, 7008), False, 'import collections\n'), ((7622, 7661), 'numpy.zeros', 'np.zeros', (['group.size'], {'dtype': 'group.dtype'}), '(group.size, dtype=group.dtype)\n', (7630, 7661), True, 'import numpy as np\n'), ((7873, 7912), 'numpy.zeros', 'np.zeros', (['group.size'], {'dtype': 'group.dtype'}), '(group.size, dtype=group.dtype)\n', (7881, 7912), True, 'import numpy as np\n'), ((26835, 26862), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'np.int32'}), '(5, dtype=np.int32)\n', (26843, 26862), True, 'import numpy as np\n'), ((6271, 6307), 'numpy.zeros', 'np.zeros', (['self.ntaps'], {'dtype': 'np.int32'}), '(self.ntaps, dtype=np.int32)\n', (6279, 6307), True, 'import numpy as np\n'), ((6338, 6374), 'numpy.zeros', 'np.zeros', (['self.ntaps'], {'dtype': 'np.int32'}), '(self.ntaps, dtype=np.int32)\n', (6346, 6374), True, 'import numpy as np\n'), ((8139, 8152), 'numpy.sum', 'np.sum', (['(x * y)'], {}), '(x * y)\n', (8145, 8152), True, 'import numpy as np\n'), ((8171, 8184), 'numpy.sum', 'np.sum', (['(x * x)'], {}), '(x * x)\n', (8177, 8184), True, 'import numpy as np\n'), ((26294, 26316), 'numpy.array', 'np.array', (['encoder.errs'], {}), '(encoder.errs)\n', (26302, 26316), True, 'import numpy as np\n'), ((25696, 25730), 'numpy.argmax', 'np.argmax', (['encoder.best_idx_counts'], {}), '(encoder.best_idx_counts)\n', (25705, 25730), True, 'import numpy as np\n'), ((26425, 26440), 'numpy.median', 'np.median', (['errs'], {}), '(errs)\n', (26434, 26440), True, 'import numpy as np\n'), ((26454, 26471), 'numpy.mean', 'np.mean', (['(errs > 0)'], {}), '(errs > 0)\n', (26461, 26471), True, 'import numpy as np\n'), ((10758, 10774), 'numpy.array', 'np.array', (['[3, 7]'], {}), '([3, 7])\n', (10766, 10774), True, 'import numpy as np\n'), ((13866, 13878), 'numpy.sign', 'np.sign', (['err'], {}), '(err)\n', (13873, 13878), True, 'import numpy as np\n')]
|
import numpy as np
def mean_or_nan(xs):
"""Return its mean a non-empty sequence, numpy.nan for a empty one."""
return np.mean(xs) if xs else np.nan
|
[
"numpy.mean"
] |
[((128, 139), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (135, 139), True, 'import numpy as np\n')]
|
### based on https://github.com/kylemcdonald/Parametric-t-SNE/blob/master/Parametric%20t-SNE%20(Keras).ipynb
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.losses import categorical_crossentropy
from tqdm.autonotebook import tqdm
import tensorflow as tf
def Hbeta(D, beta):
"""Computes the Gaussian kernel values given a vector of
squared Euclidean distances, and the precision of the Gaussian kernel.
The function also computes the perplexity (P) of the distribution."""
P = np.exp(-D * beta)
sumP = np.sum(P)
H = np.log(sumP) + beta * np.sum(np.multiply(D, P)) / sumP
P = P / sumP
return H, P
def x2p(X, u=15, tol=1e-4, print_iter=500, max_tries=50, verbose=0):
"""
% X2P Identifies appropriate sigma's to get kk NNs up to some tolerance
%
% [P, beta] = x2p(xx, kk, tol)
%
% Identifies the required precision (= 1 / variance^2) to obtain a Gaussian
% kernel with a certain uncertainty for every datapoint. The desired
% uncertainty can be specified through the perplexity u (default = 15). The
% desired perplexity is obtained up to some tolerance that can be specified
% by tol (default = 1e-4).
% The function returns the final Gaussian kernel in P, as well as the
% employed precisions per instance in beta.
%
"""
# Initialize some variables
n = X.shape[0] # number of instances
P = np.zeros((n, n)) # empty probability matrix
beta = np.ones(n) # empty precision vector
logU = np.log(u) # log of perplexity (= entropy)
# Compute pairwise distances
if verbose > 0:
print("Computing pairwise distances...")
sum_X = np.sum(np.square(X), axis=1)
# note: translating sum_X' from matlab to numpy means using reshape to add a dimension
D = sum_X + sum_X[:, None] + -2 * X.dot(X.T)
# Run over all datapoints
if verbose > 0:
print("Computing P-values...")
for i in range(n):
if verbose > 1 and print_iter and i % print_iter == 0:
print("Computed P-values {} of {} datapoints...".format(i, n))
# Set minimum and maximum values for precision
betamin = float("-inf")
betamax = float("+inf")
# Compute the Gaussian kernel and entropy for the current precision
indices = np.concatenate((np.arange(0, i), np.arange(i + 1, n)))
Di = D[i, indices]
H, thisP = Hbeta(Di, beta[i])
# Evaluate whether the perplexity is within tolerance
Hdiff = H - logU
tries = 0
while abs(Hdiff) > tol and tries < max_tries:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta[i]
if np.isinf(betamax):
beta[i] *= 2
else:
beta[i] = (beta[i] + betamax) / 2
else:
betamax = beta[i]
if np.isinf(betamin):
beta[i] /= 2
else:
beta[i] = (beta[i] + betamin) / 2
# Recompute the values
H, thisP = Hbeta(Di, beta[i])
Hdiff = H - logU
tries += 1
# Set the final row of P
P[i, indices] = thisP
if verbose > 0:
print("Mean value of sigma: {}".format(np.mean(np.sqrt(1 / beta))))
print("Minimum value of sigma: {}".format(np.min(np.sqrt(1 / beta))))
print("Maximum value of sigma: {}".format(np.max(np.sqrt(1 / beta))))
return P, beta
def compute_joint_probabilities(
samples, batch_size=5000, d=2, perplexity=30, tol=1e-5, verbose=0
):
""" This function computes the probababilities in X, split up into batches
% Gaussians employed in the high-dimensional space have the specified
% perplexity (default = 30). The number of degrees of freedom of the
% Student-t distribution may be specified through v (default = d - 1).
"""
v = d - 1
# Initialize some variables
n = samples.shape[0]
batch_size = min(batch_size, n)
# Precompute joint probabilities for all batches
if verbose > 0:
print("Precomputing P-values...")
batch_count = int(n / batch_size)
P = np.zeros((batch_count, batch_size, batch_size))
# for each batch of data
for i, start in enumerate(tqdm(range(0, n - batch_size + 1, batch_size))):
# select batch
curX = samples[start : start + batch_size]
# compute affinities using fixed perplexity
P[i], _ = x2p(curX, perplexity, tol, verbose=verbose)
# make sure we don't have NaN's
P[i][np.isnan(P[i])] = 0
# make symmetric
P[i] = P[i] + P[i].T # / 2
# obtain estimation of joint probabilities
P[i] = P[i] / P[i].sum()
P[i] = np.maximum(P[i], np.finfo(P[i].dtype).eps)
return P
def z2p(z, d, n, eps=10e-15):
""" Computes the low dimensional probability
"""
v = d - 1
sum_act = tf.math.reduce_sum(tf.math.square(z), axis=1)
Q = K.reshape(sum_act, [-1, 1]) + -2 * tf.keras.backend.dot(z, tf.transpose(z))
Q = (sum_act + Q) / v
Q = tf.math.pow(1 + Q, -(v + 1) / 2)
Q *= 1 - np.eye(n)
Q /= tf.math.reduce_sum(Q)
Q = tf.math.maximum(Q, eps)
return Q
def tsne_loss(d, batch_size, eps=10e-15):
# v = d - 1.0
def loss(P, Z):
""" KL divergence
P is the joint probabilities for this batch (Keras loss functions call this y_true)
Z is the low-dimensional output (Keras loss functions call this y_pred)
"""
Q = z2p(Z, d, n=batch_size, eps=eps)
return tf.math.reduce_sum(P * tf.math.log((P + eps) / (Q + eps)))
return loss
|
[
"tensorflow.math.pow",
"numpy.sqrt",
"tensorflow.transpose",
"tensorflow.math.log",
"numpy.log",
"numpy.arange",
"numpy.multiply",
"numpy.exp",
"numpy.isinf",
"numpy.eye",
"numpy.ones",
"tensorflow.keras.backend.reshape",
"numpy.square",
"tensorflow.math.maximum",
"numpy.isnan",
"numpy.finfo",
"tensorflow.math.square",
"numpy.sum",
"numpy.zeros",
"tensorflow.math.reduce_sum"
] |
[((532, 549), 'numpy.exp', 'np.exp', (['(-D * beta)'], {}), '(-D * beta)\n', (538, 549), True, 'import numpy as np\n'), ((561, 570), 'numpy.sum', 'np.sum', (['P'], {}), '(P)\n', (567, 570), True, 'import numpy as np\n'), ((1437, 1453), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1445, 1453), True, 'import numpy as np\n'), ((1493, 1503), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1500, 1503), True, 'import numpy as np\n'), ((1541, 1550), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (1547, 1550), True, 'import numpy as np\n'), ((4228, 4275), 'numpy.zeros', 'np.zeros', (['(batch_count, batch_size, batch_size)'], {}), '((batch_count, batch_size, batch_size))\n', (4236, 4275), True, 'import numpy as np\n'), ((5143, 5175), 'tensorflow.math.pow', 'tf.math.pow', (['(1 + Q)', '(-(v + 1) / 2)'], {}), '(1 + Q, -(v + 1) / 2)\n', (5154, 5175), True, 'import tensorflow as tf\n'), ((5208, 5229), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['Q'], {}), '(Q)\n', (5226, 5229), True, 'import tensorflow as tf\n'), ((5238, 5261), 'tensorflow.math.maximum', 'tf.math.maximum', (['Q', 'eps'], {}), '(Q, eps)\n', (5253, 5261), True, 'import tensorflow as tf\n'), ((579, 591), 'numpy.log', 'np.log', (['sumP'], {}), '(sumP)\n', (585, 591), True, 'import numpy as np\n'), ((1706, 1718), 'numpy.square', 'np.square', (['X'], {}), '(X)\n', (1715, 1718), True, 'import numpy as np\n'), ((4998, 5015), 'tensorflow.math.square', 'tf.math.square', (['z'], {}), '(z)\n', (5012, 5015), True, 'import tensorflow as tf\n'), ((5033, 5060), 'tensorflow.keras.backend.reshape', 'K.reshape', (['sum_act', '[-1, 1]'], {}), '(sum_act, [-1, 1])\n', (5042, 5060), True, 'from tensorflow.keras import backend as K\n'), ((5189, 5198), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (5195, 5198), True, 'import numpy as np\n'), ((4625, 4639), 'numpy.isnan', 'np.isnan', (['P[i]'], {}), '(P[i])\n', (4633, 4639), True, 'import numpy as np\n'), ((2350, 2365), 'numpy.arange', 'np.arange', (['(0)', 'i'], {}), '(0, i)\n', (2359, 2365), True, 'import numpy as np\n'), ((2367, 2386), 'numpy.arange', 'np.arange', (['(i + 1)', 'n'], {}), '(i + 1, n)\n', (2376, 2386), True, 'import numpy as np\n'), ((2747, 2764), 'numpy.isinf', 'np.isinf', (['betamax'], {}), '(betamax)\n', (2755, 2764), True, 'import numpy as np\n'), ((2946, 2963), 'numpy.isinf', 'np.isinf', (['betamin'], {}), '(betamin)\n', (2954, 2963), True, 'import numpy as np\n'), ((4822, 4842), 'numpy.finfo', 'np.finfo', (['P[i].dtype'], {}), '(P[i].dtype)\n', (4830, 4842), True, 'import numpy as np\n'), ((5092, 5107), 'tensorflow.transpose', 'tf.transpose', (['z'], {}), '(z)\n', (5104, 5107), True, 'import tensorflow as tf\n'), ((5650, 5684), 'tensorflow.math.log', 'tf.math.log', (['((P + eps) / (Q + eps))'], {}), '((P + eps) / (Q + eps))\n', (5661, 5684), True, 'import tensorflow as tf\n'), ((608, 625), 'numpy.multiply', 'np.multiply', (['D', 'P'], {}), '(D, P)\n', (619, 625), True, 'import numpy as np\n'), ((3344, 3361), 'numpy.sqrt', 'np.sqrt', (['(1 / beta)'], {}), '(1 / beta)\n', (3351, 3361), True, 'import numpy as np\n'), ((3422, 3439), 'numpy.sqrt', 'np.sqrt', (['(1 / beta)'], {}), '(1 / beta)\n', (3429, 3439), True, 'import numpy as np\n'), ((3500, 3517), 'numpy.sqrt', 'np.sqrt', (['(1 / beta)'], {}), '(1 / beta)\n', (3507, 3517), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Compare to numpy data"""
import sys
import numpy as np
import multipletau
from test_correlate import get_sample_arrays_cplx
def test_corresponds_ac():
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = np.concatenate(get_sample_arrays_cplx()).real
m = 16
restau = multipletau.autocorrelate(a=1*a,
m=m,
copy=True,
normalize=True,
dtype=np.float_)
reslin = multipletau.correlate_numpy(a=1*a,
v=1*a,
copy=True,
normalize=True,
dtype=np.float_)
idx = np.array(restau[:, 0].real, dtype=int)[:m]
assert np.allclose(reslin[idx, 1], restau[:m, 1])
def test_corresponds_ac_first_loop():
"""
numpy correlation:
G_m = sum_i(a_i*a_{i+m})
multipletau correlation 2nd order:
b_j = (a_{2i} + a_{2i+1} / 2)
G_m = sum_j(b_j*b_{j+1})
= 1/4*sum_i(a_{2i} * a_{2i+m} +
a_{2i} * a_{2i+m+1} +
a_{2i+1} * a_{2i+m} +
a_{2i+1} * a_{2i+m+1}
)
The values after the first m+1 lag times in the multipletau
correlation differ from the normal correlation, because the
traces are averaged over two consecutive items, effectively
halving the size of the trace. The multiple-tau correlation
can be compared to the regular correlation by using an even
sized sequence (here 222) in which the elements 2i and 2i+1
are equal, as is done in this test.
"""
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = [arr / np.average(arr) for arr in get_sample_arrays_cplx()]
a = np.concatenate(a)[:222]
# two consecutive elements are the same, so the multiple-tau method
# corresponds to the numpy correlation for the first loop.
a[::2] = a[1::2]
for m in [2, 4, 6, 8, 10, 12, 14, 16]:
restau = multipletau.correlate(a=a,
v=a.imag+1j*a.real,
m=m,
copy=True,
normalize=False,
dtype=np.complex_)
reslin = multipletau.correlate_numpy(a=a,
v=a.imag+1j*a.real,
copy=True,
normalize=False,
dtype=np.complex_)
idtau = np.where(restau[:, 0] == m+2)[0][0]
tau3 = restau[idtau, 1] # m+1 initial bins
idref = np.where(reslin[:, 0] == m+2)[0][0]
tau3ref = reslin[idref, 1]
assert np.allclose(tau3, tau3ref)
def test_corresponds_ac_nonormalize():
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = np.concatenate(get_sample_arrays_cplx()).real
m = 16
restau = multipletau.autocorrelate(a=1*a,
m=m,
copy=True,
normalize=False,
dtype=np.float_)
reslin = multipletau.correlate_numpy(a=1*a,
v=1*a,
copy=True,
normalize=False,
dtype=np.float_)
idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
def test_corresponds_cc():
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = np.concatenate(get_sample_arrays_cplx())
m = 16
restau = multipletau.correlate(a=a,
v=a.imag+1j*a.real,
m=m,
copy=True,
normalize=True,
dtype=np.complex_)
reslin = multipletau.correlate_numpy(a=a,
v=a.imag+1j*a.real,
copy=True,
normalize=True,
dtype=np.complex_)
idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
def test_corresponds_cc_nonormalize():
myframe = sys._getframe()
myname = myframe.f_code.co_name
print("running ", myname)
a = np.concatenate(get_sample_arrays_cplx())
m = 16
restau = multipletau.correlate(a=a,
v=a.imag+1j*a.real,
m=m,
copy=True,
normalize=False,
dtype=np.complex_)
reslin = multipletau.correlate_numpy(a=a,
v=a.imag+1j*a.real,
copy=True,
normalize=False,
dtype=np.complex_)
idx = np.array(restau[:, 0].real, dtype=int)[:m+1]
assert np.allclose(reslin[idx, 1], restau[:m+1, 1])
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
[
"multipletau.correlate_numpy",
"numpy.allclose",
"numpy.average",
"numpy.where",
"sys._getframe",
"numpy.array",
"multipletau.correlate",
"numpy.concatenate",
"test_correlate.get_sample_arrays_cplx",
"multipletau.autocorrelate"
] |
[((215, 230), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (228, 230), False, 'import sys\n'), ((377, 465), 'multipletau.autocorrelate', 'multipletau.autocorrelate', ([], {'a': '(1 * a)', 'm': 'm', 'copy': '(True)', 'normalize': '(True)', 'dtype': 'np.float_'}), '(a=1 * a, m=m, copy=True, normalize=True, dtype=np\n .float_)\n', (402, 465), False, 'import multipletau\n'), ((629, 722), 'multipletau.correlate_numpy', 'multipletau.correlate_numpy', ([], {'a': '(1 * a)', 'v': '(1 * a)', 'copy': '(True)', 'normalize': '(True)', 'dtype': 'np.float_'}), '(a=1 * a, v=1 * a, copy=True, normalize=True,\n dtype=np.float_)\n', (656, 722), False, 'import multipletau\n'), ((945, 987), 'numpy.allclose', 'np.allclose', (['reslin[idx, 1]', 'restau[:m, 1]'], {}), '(reslin[idx, 1], restau[:m, 1])\n', (956, 987), True, 'import numpy as np\n'), ((1834, 1849), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (1847, 1849), False, 'import sys\n'), ((3117, 3132), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (3130, 3132), False, 'import sys\n'), ((3279, 3368), 'multipletau.autocorrelate', 'multipletau.autocorrelate', ([], {'a': '(1 * a)', 'm': 'm', 'copy': '(True)', 'normalize': '(False)', 'dtype': 'np.float_'}), '(a=1 * a, m=m, copy=True, normalize=False, dtype=\n np.float_)\n', (3304, 3368), False, 'import multipletau\n'), ((3532, 3626), 'multipletau.correlate_numpy', 'multipletau.correlate_numpy', ([], {'a': '(1 * a)', 'v': '(1 * a)', 'copy': '(True)', 'normalize': '(False)', 'dtype': 'np.float_'}), '(a=1 * a, v=1 * a, copy=True, normalize=False,\n dtype=np.float_)\n', (3559, 3626), False, 'import multipletau\n'), ((3851, 3897), 'numpy.allclose', 'np.allclose', (['reslin[idx, 1]', 'restau[:m + 1, 1]'], {}), '(reslin[idx, 1], restau[:m + 1, 1])\n', (3862, 3897), True, 'import numpy as np\n'), ((3939, 3954), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (3952, 3954), False, 'import sys\n'), ((4096, 4203), 'multipletau.correlate', 'multipletau.correlate', ([], {'a': 'a', 'v': '(a.imag + 1.0j * a.real)', 'm': 'm', 'copy': '(True)', 'normalize': '(True)', 'dtype': 'np.complex_'}), '(a=a, v=a.imag + 1.0j * a.real, m=m, copy=True,\n normalize=True, dtype=np.complex_)\n', (4117, 4203), False, 'import multipletau\n'), ((4383, 4491), 'multipletau.correlate_numpy', 'multipletau.correlate_numpy', ([], {'a': 'a', 'v': '(a.imag + 1.0j * a.real)', 'copy': '(True)', 'normalize': '(True)', 'dtype': 'np.complex_'}), '(a=a, v=a.imag + 1.0j * a.real, copy=True,\n normalize=True, dtype=np.complex_)\n', (4410, 4491), False, 'import multipletau\n'), ((4714, 4760), 'numpy.allclose', 'np.allclose', (['reslin[idx, 1]', 'restau[:m + 1, 1]'], {}), '(reslin[idx, 1], restau[:m + 1, 1])\n', (4725, 4760), True, 'import numpy as np\n'), ((4814, 4829), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (4827, 4829), False, 'import sys\n'), ((4971, 5079), 'multipletau.correlate', 'multipletau.correlate', ([], {'a': 'a', 'v': '(a.imag + 1.0j * a.real)', 'm': 'm', 'copy': '(True)', 'normalize': '(False)', 'dtype': 'np.complex_'}), '(a=a, v=a.imag + 1.0j * a.real, m=m, copy=True,\n normalize=False, dtype=np.complex_)\n', (4992, 5079), False, 'import multipletau\n'), ((5259, 5368), 'multipletau.correlate_numpy', 'multipletau.correlate_numpy', ([], {'a': 'a', 'v': '(a.imag + 1.0j * a.real)', 'copy': '(True)', 'normalize': '(False)', 'dtype': 'np.complex_'}), '(a=a, v=a.imag + 1.0j * a.real, copy=True,\n normalize=False, dtype=np.complex_)\n', (5286, 5368), False, 'import multipletau\n'), ((5591, 5637), 'numpy.allclose', 'np.allclose', (['reslin[idx, 1]', 'restau[:m + 1, 1]'], {}), '(reslin[idx, 1], restau[:m + 1, 1])\n', (5602, 5637), True, 'import numpy as np\n'), ((890, 928), 'numpy.array', 'np.array', (['restau[:, 0].real'], {'dtype': 'int'}), '(restau[:, 0].real, dtype=int)\n', (898, 928), True, 'import numpy as np\n'), ((1993, 2010), 'numpy.concatenate', 'np.concatenate', (['a'], {}), '(a)\n', (2007, 2010), True, 'import numpy as np\n'), ((2234, 2342), 'multipletau.correlate', 'multipletau.correlate', ([], {'a': 'a', 'v': '(a.imag + 1.0j * a.real)', 'm': 'm', 'copy': '(True)', 'normalize': '(False)', 'dtype': 'np.complex_'}), '(a=a, v=a.imag + 1.0j * a.real, m=m, copy=True,\n normalize=False, dtype=np.complex_)\n', (2255, 2342), False, 'import multipletau\n'), ((2546, 2655), 'multipletau.correlate_numpy', 'multipletau.correlate_numpy', ([], {'a': 'a', 'v': '(a.imag + 1.0j * a.real)', 'copy': '(True)', 'normalize': '(False)', 'dtype': 'np.complex_'}), '(a=a, v=a.imag + 1.0j * a.real, copy=True,\n normalize=False, dtype=np.complex_)\n', (2573, 2655), False, 'import multipletau\n'), ((3035, 3061), 'numpy.allclose', 'np.allclose', (['tau3', 'tau3ref'], {}), '(tau3, tau3ref)\n', (3046, 3061), True, 'import numpy as np\n'), ((3794, 3832), 'numpy.array', 'np.array', (['restau[:, 0].real'], {'dtype': 'int'}), '(restau[:, 0].real, dtype=int)\n', (3802, 3832), True, 'import numpy as np\n'), ((4045, 4069), 'test_correlate.get_sample_arrays_cplx', 'get_sample_arrays_cplx', ([], {}), '()\n', (4067, 4069), False, 'from test_correlate import get_sample_arrays_cplx\n'), ((4657, 4695), 'numpy.array', 'np.array', (['restau[:, 0].real'], {'dtype': 'int'}), '(restau[:, 0].real, dtype=int)\n', (4665, 4695), True, 'import numpy as np\n'), ((4920, 4944), 'test_correlate.get_sample_arrays_cplx', 'get_sample_arrays_cplx', ([], {}), '()\n', (4942, 4944), False, 'from test_correlate import get_sample_arrays_cplx\n'), ((5534, 5572), 'numpy.array', 'np.array', (['restau[:, 0].real'], {'dtype': 'int'}), '(restau[:, 0].real, dtype=int)\n', (5542, 5572), True, 'import numpy as np\n'), ((321, 345), 'test_correlate.get_sample_arrays_cplx', 'get_sample_arrays_cplx', ([], {}), '()\n', (343, 345), False, 'from test_correlate import get_sample_arrays_cplx\n'), ((1932, 1947), 'numpy.average', 'np.average', (['arr'], {}), '(arr)\n', (1942, 1947), True, 'import numpy as np\n'), ((1959, 1983), 'test_correlate.get_sample_arrays_cplx', 'get_sample_arrays_cplx', ([], {}), '()\n', (1981, 1983), False, 'from test_correlate import get_sample_arrays_cplx\n'), ((3223, 3247), 'test_correlate.get_sample_arrays_cplx', 'get_sample_arrays_cplx', ([], {}), '()\n', (3245, 3247), False, 'from test_correlate import get_sample_arrays_cplx\n'), ((2843, 2874), 'numpy.where', 'np.where', (['(restau[:, 0] == m + 2)'], {}), '(restau[:, 0] == m + 2)\n', (2851, 2874), True, 'import numpy as np\n'), ((2948, 2979), 'numpy.where', 'np.where', (['(reslin[:, 0] == m + 2)'], {}), '(reslin[:, 0] == m + 2)\n', (2956, 2979), True, 'import numpy as np\n')]
|
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from nilearn.datasets.utils import _fetch_files
from scipy import sparse
class StudyID(str):
pass
class TfIDf(float):
pass
NS_DATA_URL = "https://github.com/neurosynth/neurosynth-data/raw/master/"
def fetch_study_metadata(
data_dir: Path, version: int = 7, verbose: int = 1
) -> pd.DataFrame:
"""
Download if needed the `metadata.tsv.gz` file from Neurosynth and load
it into a pandas DataFrame.
The metadata table contains the metadata for each study. Each study (ID)
is stored on its own line. These IDs are in the same order as the id
column of the associated `coordinates.tsv.gz` file, but the rows will
differ because the coordinates file will contain multiple rows per
study. They are also in the same order as the rows in the
`features.npz` files for the same version.
The metadata will therefore have N rows, N being the number of studies
in the Neurosynth dataset. The columns (for version 7) are:
- id
- doi
- space
- title
- authors
- year
- journal
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
Returns
-------
pd.DataFrame
the study metadata dataframe
"""
metadata_filename = f"data-neurosynth_version-{version}_metadata.tsv.gz"
metadata_file = _fetch_files(
data_dir,
[
(
metadata_filename,
NS_DATA_URL + metadata_filename,
{},
),
],
verbose=verbose,
)[0]
metadata = pd.read_table(metadata_file)
return metadata
def fetch_feature_data(
data_dir: Path,
version: int = 7,
verbose: int = 1,
convert_study_ids: bool = False,
) -> pd.DataFrame:
"""
Download if needed the `tfidf_features.npz` file from Neurosynth and
load it into a pandas Dataframe. The `tfidf_features` contains feature
values for different types of "vocabularies".
The features dataframe is stored as a compressed, sparse matrix.
Once loaded and reconstructed into a dense matrix, it contains one row
per study and one column per label. The associated labels are loaded,
as well as the study ids, to reconstruct a dataframe of size N x P,
where N is the number of studies in the Neurosynth dataset, and P is
the number of words in the vocabulary.
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
convert_study_ids : bool, optional
if True, cast study ids as `StudyID`, by default False
Returns
-------
pd.DataFrame
the features dataframe
"""
file_names = [
f"data-neurosynth_version-{version}_vocab-terms_source-abstract_type-tfidf_features.npz",
f"data-neurosynth_version-{version}_vocab-terms_vocabulary.txt",
]
files = _fetch_files(
data_dir,
[
(
fn,
NS_DATA_URL + fn,
{},
)
for fn in file_names
],
verbose=verbose,
)
feature_data_sparse = sparse.load_npz(files[0])
feature_data = feature_data_sparse.todense()
metadata_df = fetch_study_metadata(data_dir, version, verbose)
ids = metadata_df["id"]
if convert_study_ids:
ids = ids.apply(StudyID)
feature_names = np.genfromtxt(
files[1],
dtype=str,
delimiter="\t",
).tolist()
feature_df = pd.DataFrame(
index=ids.tolist(), columns=feature_names, data=feature_data
)
return feature_df
def fetch_neurosynth_peak_data(
data_dir: Path,
version: int = 7,
verbose: int = 1,
convert_study_ids: bool = False,
) -> pd.DataFrame:
"""
Download if needed the `coordinates.tsv.gz` file from Neurosynth and
load it into a pandas DataFrame.
The `coordinates.tsv.gz` contains the coordinates for the peaks
reported by studies in the Neurosynth dataset. It contains one row per
coordinate reported.
The metadata for each study is also loaded to include the space in
which the coordinates are reported. The peak_data dataframe therefore
has PR rows, PR being the number of reported peaks in the Neurosynth
dataset.
The columns (for version 7) are:
- id
- table_id
- table_num
- peak_id
- space
- x
- y
- z
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
convert_study_ids : bool, optional
if True, cast study ids as `StudyID`, by default False
Returns
-------
pd.DataFrame
the peak dataframe
"""
coordinates_filename = (
f"data-neurosynth_version-{version}_coordinates.tsv.gz"
)
coordinates_file = _fetch_files(
data_dir,
[
(
coordinates_filename,
NS_DATA_URL + coordinates_filename,
{},
),
],
verbose=verbose,
)[0]
activations = pd.read_table(coordinates_file)
metadata = fetch_study_metadata(data_dir, version, verbose)
activations = activations.join(
metadata[["id", "space"]].set_index("id"), on="id"
)
if convert_study_ids:
activations["id"] = activations["id"].apply(StudyID)
return activations
def get_ns_term_study_associations(
data_dir: Path,
version: int = 7,
verbose: int = 1,
convert_study_ids: bool = False,
tfidf_threshold: Optional[float] = None,
) -> pd.DataFrame:
"""
Load a dataframe containing associations between term and studies.
The dataframe contains one row for each term and study pair from the
features table in the Neurosynth dataset. With each (term, study) pair
comes the tfidf value for the term in the study.
If a tfidf threshold value is passed, only (term, study) associations
with a tfidf value > tfidf_threshold will be kept.
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
convert_study_ids : bool, optional
if True, cast study ids as `StudyID`, by default False
tfidf_threshold : Optional[float], optional
the minimum tfidf value for the (term, study) associations,
by default None
Returns
-------
pd.DataFrame
the term association dataframe
"""
features = fetch_feature_data(
data_dir, version, verbose, convert_study_ids
)
features.index.name = "id"
term_data = pd.melt(
features.reset_index(),
var_name="term",
id_vars="id",
value_name="tfidf",
)
if tfidf_threshold is not None:
term_data = term_data.query(f"tfidf > {tfidf_threshold}")
else:
term_data = term_data.query("tfidf > 0")
return term_data
def get_ns_mni_peaks_reported(
data_dir: Path,
version: int = 7,
verbose: int = 1,
convert_study_ids: bool = False,
) -> pd.DataFrame:
"""
Load a dataframe containing the coordinates for the peaks reported by
studies in the Neurosynth dataset. Coordinates for the peaks are in
MNI space, with coordinates that are reported in Talaraich space
converted.
The resulting dataframe contains one row for each peak reported. Each
row has 4 columns:
- id
- x
- y
- z
Parameters
----------
data_dir : Path
the path for the directory where downloaded data should be saved.
version : int, optional
the neurosynth data version, by default 7
verbose : int, optional
verbose param for nilearn's `_fetch_files`, by default 1
convert_study_ids : bool, optional
if True, cast study ids as `StudyID`, by default False
Returns
-------
pd.DataFrame
the peak dataframe
"""
activations = fetch_neurosynth_peak_data(
data_dir, version, verbose, convert_study_ids
)
mni_peaks = activations.loc[activations.space == "MNI"][
["x", "y", "z", "id"]
]
non_mni_peaks = activations.loc[activations.space == "TAL"][
["x", "y", "z", "id"]
]
proj_mat = np.linalg.pinv(
np.array(
[
[0.9254, 0.0024, -0.0118, -1.0207],
[-0.0048, 0.9316, -0.0871, -1.7667],
[0.0152, 0.0883, 0.8924, 4.0926],
[0.0, 0.0, 0.0, 1.0],
]
).T
)
projected = np.round(
np.dot(
np.hstack(
(
non_mni_peaks[["x", "y", "z"]].values,
np.ones((len(non_mni_peaks), 1)),
)
),
proj_mat,
)[:, 0:3]
)
projected_df = pd.DataFrame(
np.hstack([projected, non_mni_peaks[["id"]].values]),
columns=["x", "y", "z", "id"],
)
peak_data = pd.concat([projected_df, mni_peaks]).astype(
{"x": int, "y": int, "z": int}
)
return peak_data
|
[
"numpy.hstack",
"scipy.sparse.load_npz",
"numpy.array",
"pandas.concat",
"nilearn.datasets.utils._fetch_files",
"pandas.read_table",
"numpy.genfromtxt"
] |
[((1891, 1919), 'pandas.read_table', 'pd.read_table', (['metadata_file'], {}), '(metadata_file)\n', (1904, 1919), True, 'import pandas as pd\n'), ((3386, 3480), 'nilearn.datasets.utils._fetch_files', '_fetch_files', (['data_dir', '[(fn, NS_DATA_URL + fn, {}) for fn in file_names]'], {'verbose': 'verbose'}), '(data_dir, [(fn, NS_DATA_URL + fn, {}) for fn in file_names],\n verbose=verbose)\n', (3398, 3480), False, 'from nilearn.datasets.utils import _fetch_files\n'), ((3631, 3656), 'scipy.sparse.load_npz', 'sparse.load_npz', (['files[0]'], {}), '(files[0])\n', (3646, 3656), False, 'from scipy import sparse\n'), ((5773, 5804), 'pandas.read_table', 'pd.read_table', (['coordinates_file'], {}), '(coordinates_file)\n', (5786, 5804), True, 'import pandas as pd\n'), ((1656, 1759), 'nilearn.datasets.utils._fetch_files', '_fetch_files', (['data_dir', '[(metadata_filename, NS_DATA_URL + metadata_filename, {})]'], {'verbose': 'verbose'}), '(data_dir, [(metadata_filename, NS_DATA_URL + metadata_filename,\n {})], verbose=verbose)\n', (1668, 1759), False, 'from nilearn.datasets.utils import _fetch_files\n'), ((5529, 5638), 'nilearn.datasets.utils._fetch_files', '_fetch_files', (['data_dir', '[(coordinates_filename, NS_DATA_URL + coordinates_filename, {})]'], {'verbose': 'verbose'}), '(data_dir, [(coordinates_filename, NS_DATA_URL +\n coordinates_filename, {})], verbose=verbose)\n', (5541, 5638), False, 'from nilearn.datasets.utils import _fetch_files\n'), ((9686, 9738), 'numpy.hstack', 'np.hstack', (["[projected, non_mni_peaks[['id']].values]"], {}), "([projected, non_mni_peaks[['id']].values])\n", (9695, 9738), True, 'import numpy as np\n'), ((3880, 3930), 'numpy.genfromtxt', 'np.genfromtxt', (['files[1]'], {'dtype': 'str', 'delimiter': '"""\t"""'}), "(files[1], dtype=str, delimiter='\\t')\n", (3893, 3930), True, 'import numpy as np\n'), ((9121, 9265), 'numpy.array', 'np.array', (['[[0.9254, 0.0024, -0.0118, -1.0207], [-0.0048, 0.9316, -0.0871, -1.7667], [\n 0.0152, 0.0883, 0.8924, 4.0926], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.9254, 0.0024, -0.0118, -1.0207], [-0.0048, 0.9316, -0.0871, -\n 1.7667], [0.0152, 0.0883, 0.8924, 4.0926], [0.0, 0.0, 0.0, 1.0]])\n', (9129, 9265), True, 'import numpy as np\n'), ((9801, 9837), 'pandas.concat', 'pd.concat', (['[projected_df, mni_peaks]'], {}), '([projected_df, mni_peaks])\n', (9810, 9837), True, 'import pandas as pd\n')]
|
# Created by <NAME>.
import sys
import numpy as np
sys.path.append('../')
from envs import GridWorld
from itertools import product
from utils import print_episode, eps_greedy_policy, test_policy
'''
n-step Tree Backup used to estimate the optimal policy for
the gridworld environment defined on page 48 of
"Reinforcement Learning: An Introduction."
Algorithm available on page 125.
Book reference:
<NAME>. and <NAME>., 2014. Reinforcement Learning:
An Introduction. 1st ed. London: The MIT Press.
'''
def policy_proba(policy, s, a, epsilon):
'''Return the probability of the given epsilon-greedy policy
taking the specified action in the specified state.'''
if policy[s] == a:
return (epsilon/4) + (1-epsilon)
else:
return epsilon/4
def n_step_tree_backup(env, n, alpha, gamma, epsilon, n_episodes):
# Initialize policy and state-action value function.
sa_pairs = product(range(env.observation_space_size),\
range(env.action_space_size))
Q = dict.fromkeys(sa_pairs, 0.0)
policy = dict.fromkeys(range(env.observation_space_size), 0)
states = np.zeros(n)
actions = np.zeros(n)
Qs = np.zeros(n)
deltas = np.zeros(n)
pis = np.zeros(n)
decay = lambda x: x-2/n_episodes if x-2/n_episodes > 0.1 else 0.1
for episode in range(n_episodes):
done = False
obs = env.reset()
action = eps_greedy_policy(Q, obs, epsilon, env.action_space_size)
states[0] = obs
actions[0] = action
Qs[0] = Q[obs, action]
t = -1
tau = -1
T = np.inf
while not done or t != T-1:
t += 1
if t < T:
obs_prime, reward, done = env.step(action)
states[(t+1)%n] = obs_prime
if done:
T = t+1
deltas[t%n] = reward - Qs[t%n]
else:
deltas[t%n] = reward + gamma * \
np.sum([policy_proba(policy, obs_prime, i, epsilon) * \
Q[obs_prime, i] for i in range(4)]) - Qs[t%n]
action = eps_greedy_policy(Q, obs_prime, epsilon, \
env.action_space_size)
Qs[(t+1)%n] = Q[obs_prime, action]
pis[(t+1)%n] = policy_proba(policy, obs_prime, action, epsilon)
tau = t-n+1
if tau > -1:
Z = 1
G = Qs[tau%n]
for k in range(tau,min(tau+n-1, T-1)):
G += Z*deltas[k%n]
Z *= gamma * Z * pis[(k+1)%n]
s = states[tau%n]
a = actions[tau%n]
# Update state-action value function.
Q[s,a] += alpha * (G - Q[s,a])
# Make policy greedy w.r.t. Q.
action_values = [Q[s,i] for i in range(4)]
policy[s] = np.argmax(action_values)
epsilon = decay(epsilon)
if episode % 100 == 0:
print_episode(episode,n_episodes)
print_episode(n_episodes, n_episodes)
return policy
if __name__ == '__main__':
n = 4
alpha = 0.01
gamma = 1
epsilon = 1
n_episodes = 1000
env = GridWorld()
policy = n_step_tree_backup(env, n , alpha, gamma, epsilon, n_episodes)
test_policy(env, policy, 10)
|
[
"utils.test_policy",
"utils.print_episode",
"numpy.argmax",
"envs.GridWorld",
"numpy.zeros",
"sys.path.append",
"utils.eps_greedy_policy"
] |
[((51, 73), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (66, 73), False, 'import sys\n'), ((1122, 1133), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1130, 1133), True, 'import numpy as np\n'), ((1148, 1159), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1156, 1159), True, 'import numpy as np\n'), ((1169, 1180), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1177, 1180), True, 'import numpy as np\n'), ((1194, 1205), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1202, 1205), True, 'import numpy as np\n'), ((1216, 1227), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1224, 1227), True, 'import numpy as np\n'), ((3065, 3102), 'utils.print_episode', 'print_episode', (['n_episodes', 'n_episodes'], {}), '(n_episodes, n_episodes)\n', (3078, 3102), False, 'from utils import print_episode, eps_greedy_policy, test_policy\n'), ((3239, 3250), 'envs.GridWorld', 'GridWorld', ([], {}), '()\n', (3248, 3250), False, 'from envs import GridWorld\n'), ((3331, 3359), 'utils.test_policy', 'test_policy', (['env', 'policy', '(10)'], {}), '(env, policy, 10)\n', (3342, 3359), False, 'from utils import print_episode, eps_greedy_policy, test_policy\n'), ((1402, 1459), 'utils.eps_greedy_policy', 'eps_greedy_policy', (['Q', 'obs', 'epsilon', 'env.action_space_size'], {}), '(Q, obs, epsilon, env.action_space_size)\n', (1419, 1459), False, 'from utils import print_episode, eps_greedy_policy, test_policy\n'), ((3027, 3061), 'utils.print_episode', 'print_episode', (['episode', 'n_episodes'], {}), '(episode, n_episodes)\n', (3040, 3061), False, 'from utils import print_episode, eps_greedy_policy, test_policy\n'), ((2926, 2950), 'numpy.argmax', 'np.argmax', (['action_values'], {}), '(action_values)\n', (2935, 2950), True, 'import numpy as np\n'), ((2125, 2188), 'utils.eps_greedy_policy', 'eps_greedy_policy', (['Q', 'obs_prime', 'epsilon', 'env.action_space_size'], {}), '(Q, obs_prime, epsilon, env.action_space_size)\n', (2142, 2188), False, 'from utils import print_episode, eps_greedy_policy, test_policy\n')]
|
import sys
import os
import timeit
# use local python package rather than the system install
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../python"))
from bitboost import BitBoostRegressor
import numpy as np
import sklearn.metrics
nfeatures = 5
nexamples = 10000
data = np.random.choice(np.array([0.0, 1.0, 2.0], dtype=BitBoostRegressor.numt),
size=(nexamples * 2, nfeatures))
target = (1.22 * (data[:, 0] > 1.0)
+ 0.65 * (data[:, 1] > 1.0)
+ 0.94 * (data[:, 2] != 2.0)
+ 0.13 * (data[:, 3] == 1.0)).astype(BitBoostRegressor.numt)
dtrain, ytrain = data[0:nexamples, :], target[0:nexamples]
dtest, ytest = data[nexamples:, :], target[nexamples:]
bit = BitBoostRegressor()
bit.objective = "l2"
bit.discr_nbits = 4
bit.max_tree_depth = 5
bit.learning_rate = 0.5
bit.niterations = 50
bit.categorical_features = list(range(nfeatures))
bit.fit(dtrain, ytrain)
train_pred = bit.predict(dtrain)
test_pred = bit.predict(dtest)
train_acc = sklearn.metrics.mean_absolute_error(ytrain, train_pred)
test_acc = sklearn.metrics.mean_absolute_error(ytest, test_pred)
print(f"bit train accuracy: {train_acc}")
print(f"bit test accuracy: {test_acc}")
|
[
"os.path.dirname",
"numpy.array",
"bitboost.BitBoostRegressor"
] |
[((719, 738), 'bitboost.BitBoostRegressor', 'BitBoostRegressor', ([], {}), '()\n', (736, 738), False, 'from bitboost import BitBoostRegressor\n'), ((306, 361), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0]'], {'dtype': 'BitBoostRegressor.numt'}), '([0.0, 1.0, 2.0], dtype=BitBoostRegressor.numt)\n', (314, 361), True, 'import numpy as np\n'), ((126, 151), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (141, 151), False, 'import os\n')]
|
import os
import numpy as np
from six.moves import cPickle
from tensorflow import keras
from tensorflow import keras
import helper
from tfomics import utils, metrics, explain
#------------------------------------------------------------------------
model_names = ['residualbind']
activations = ['exponential', 'relu']#
results_path = utils.make_directory('../results', 'task6')
params_path = utils.make_directory(results_path, 'model_params')
#------------------------------------------------------------------------
file_path = '../data/IRF1_400_h3k27ac.h5'
data = helper.load_data(file_path, reverse_compliment=True)
x_train, y_train, x_valid, y_valid, x_test, y_test = data
#------------------------------------------------------------------------
file_path = os.path.join(results_path, 'task6_classification_performance.tsv')
with open(file_path, 'w') as f:
f.write('%s\t%s\t%s\n'%('model', 'ave roc', 'ave pr'))
results = {}
for model_name in model_names:
for activation in activations:
keras.backend.clear_session()
# load model
model = helper.load_model(model_name, activation=activation)
name = model_name+'_'+activation+'_irf1'
print('model: ' + name)
# compile model
helper.compile_model(model)
# setup callbacks
callbacks = helper.get_callbacks(monitor='val_auroc', patience=20,
decay_patience=5, decay_factor=0.2)
# train model
history = model.fit(x_train, y_train,
epochs=100,
batch_size=100,
shuffle=True,
validation_data=(x_valid, y_valid),
callbacks=callbacks)
# save model
weights_path = os.path.join(params_path, name+'.hdf5')
model.save_weights(weights_path)
# predict test sequences and calculate performance metrics
predictions = model.predict(x_test)
mean_vals, std_vals = metrics.calculate_metrics(y_test, predictions, 'binary')
# print results to file
f.write("%s\t%.3f\t%.3f\n"%(name, mean_vals[1], mean_vals[2]))
# calculate saliency on a subset of data
true_index = np.where(y_test[:,0] == 1)[0]
X = x_test[true_index][:500]
results[name] = explain.saliency(model, X, class_index=0, layer=-1)
# save results
file_path = os.path.join(results_path, 'task6_saliency_results.pickle')
with open(file_path, 'wb') as f:
cPickle.dump(results, f, protocol=cPickle.HIGHEST_PROTOCOL)
|
[
"helper.get_callbacks",
"helper.load_data",
"numpy.where",
"six.moves.cPickle.dump",
"os.path.join",
"helper.load_model",
"tfomics.explain.saliency",
"helper.compile_model",
"tensorflow.keras.backend.clear_session",
"tfomics.utils.make_directory",
"tfomics.metrics.calculate_metrics"
] |
[((338, 381), 'tfomics.utils.make_directory', 'utils.make_directory', (['"""../results"""', '"""task6"""'], {}), "('../results', 'task6')\n", (358, 381), False, 'from tfomics import utils, metrics, explain\n'), ((396, 446), 'tfomics.utils.make_directory', 'utils.make_directory', (['results_path', '"""model_params"""'], {}), "(results_path, 'model_params')\n", (416, 446), False, 'from tfomics import utils, metrics, explain\n'), ((573, 625), 'helper.load_data', 'helper.load_data', (['file_path'], {'reverse_compliment': '(True)'}), '(file_path, reverse_compliment=True)\n', (589, 625), False, 'import helper\n'), ((772, 838), 'os.path.join', 'os.path.join', (['results_path', '"""task6_classification_performance.tsv"""'], {}), "(results_path, 'task6_classification_performance.tsv')\n", (784, 838), False, 'import os\n'), ((2593, 2652), 'os.path.join', 'os.path.join', (['results_path', '"""task6_saliency_results.pickle"""'], {}), "(results_path, 'task6_saliency_results.pickle')\n", (2605, 2652), False, 'import os\n'), ((2690, 2749), 'six.moves.cPickle.dump', 'cPickle.dump', (['results', 'f'], {'protocol': 'cPickle.HIGHEST_PROTOCOL'}), '(results, f, protocol=cPickle.HIGHEST_PROTOCOL)\n', (2702, 2749), False, 'from six.moves import cPickle\n'), ((1034, 1063), 'tensorflow.keras.backend.clear_session', 'keras.backend.clear_session', ([], {}), '()\n', (1061, 1063), False, 'from tensorflow import keras\n'), ((1122, 1174), 'helper.load_model', 'helper.load_model', (['model_name'], {'activation': 'activation'}), '(model_name, activation=activation)\n', (1139, 1174), False, 'import helper\n'), ((1305, 1332), 'helper.compile_model', 'helper.compile_model', (['model'], {}), '(model)\n', (1325, 1332), False, 'import helper\n'), ((1388, 1482), 'helper.get_callbacks', 'helper.get_callbacks', ([], {'monitor': '"""val_auroc"""', 'patience': '(20)', 'decay_patience': '(5)', 'decay_factor': '(0.2)'}), "(monitor='val_auroc', patience=20, decay_patience=5,\n decay_factor=0.2)\n", (1408, 1482), False, 'import helper\n'), ((1910, 1951), 'os.path.join', 'os.path.join', (['params_path', "(name + '.hdf5')"], {}), "(params_path, name + '.hdf5')\n", (1922, 1951), False, 'import os\n'), ((2165, 2221), 'tfomics.metrics.calculate_metrics', 'metrics.calculate_metrics', (['y_test', 'predictions', '"""binary"""'], {}), "(y_test, predictions, 'binary')\n", (2190, 2221), False, 'from tfomics import utils, metrics, explain\n'), ((2513, 2564), 'tfomics.explain.saliency', 'explain.saliency', (['model', 'X'], {'class_index': '(0)', 'layer': '(-1)'}), '(model, X, class_index=0, layer=-1)\n', (2529, 2564), False, 'from tfomics import utils, metrics, explain\n'), ((2414, 2441), 'numpy.where', 'np.where', (['(y_test[:, 0] == 1)'], {}), '(y_test[:, 0] == 1)\n', (2422, 2441), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 14:15:38 2019
@author: Satish
"""
# doing the ortho-correction on the processed data from matchedFilter
import os
import numpy as np
import spectral as spy
import spectral.io.envi as envi
import spectral.algorithms as algo
from spectral.algorithms.detectors import MatchedFilter, matched_filter
import logging
import coloredlogs
import json
import shutil
import statistics
# set the logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("aviris_data_loader")
coloredlogs.install(level='DEBUG', logger=logger)
#DIRECTORY = "/media/data/satish/avng.jpl.nasa.gov/pub/test_unrect"
DIRECTORY = "../../data/raw_data"
#manual offset file load
try:
#Read the manually computed offset file
f = open('./manual_offset.json')
offset_data = json.load(f)
OFFSET_DICT = offset_data['OFFSET_DICT']
except:
print("No manual offset file found")
pass
FILES = []
for x in os.listdir(DIRECTORY):
if(os.path.isdir(os.path.join(DIRECTORY, x))):
FILES.append(x)
print(FILES)
#%% return image object
def image_obj(hdr, img):
"create a object of the image corresponding to certain header"
head = envi.read_envi_header(hdr)
param = envi.gen_params(head)
param.filename = img # spectral data file corresponding to .hdr file
interleave = head['interleave']
if (interleave == 'bip' or interleave == 'BIP'):
print("it is a bip")
from spectral.io.bipfile import BipFile
img_obj = BipFile(param, head)
if (interleave == 'bil' or interleave == 'BIL'):
print("It is a bil file")
from spectral.io.bilfile import BilFile
img_obj = BilFile(param, head)
return img_obj
# Use this fucntion in case you have data other than the custom dataset
def ideal_ortho_correction(glt: np.ndarray, img: np.ndarray, b_val=0.0, output=None) -> np.ndarray:
"""does the ortho-correction of the file
glt: 2L, world-relative coordinates L1: y (rows), L2: x (columns)
img: 1L, unrectified, output from matched filter
output: 1L, rectified version of img, with shape: glt.shape
"""
if output is None:
output = np.zeros((glt.shape[0], glt.shape[1]))
if not np.array_equal(output.shape, [glt.shape[0], glt.shape[1]]):
print("image dimension of output arrary do not match the GLT file")
# getting the absolute even if GLT has negative values
# magnitude
glt_mag = np.absolute(glt)
# GLT value of zero means no data, extract this because python has zero-indexing.
glt_mask = np.all(glt_mag==0, axis=2)
output[glt_mask] = b_val
glt_mag[glt_mag>(img.shape[0]-1)] = 0
# now check the lookup and fill in the location, -1 to map to zero-indexing
# output[~glt_mask] = img[glt_mag[~glt_mask, 1] - 1, glt_mag[~glt_mask, 0] - 1]
output[~glt_mask] = img[glt_mag[~glt_mask, 1]-1, glt_mag[~glt_mask, 0]-1]
return output
def custom_ortho_correct_for_data(file_name, glt: np.ndarray, img: np.ndarray, OFFSET_DICT, b_val=0.0, output=None) -> np.ndarray:
"""does the ortho-correction of the file
glt: 2L, world-relative coordinates L1: y (rows), L2: x (columns)
img: 1L, unrectified, output from matched filter
output: 1L, rectified version of img, with shape: glt.shape
"""
if output is None:
output = np.zeros((glt.shape[0], glt.shape[1]))
if not np.array_equal(output.shape, [glt.shape[0], glt.shape[1]]):
print("image dimension of output arrary do not match the GLT file")
print(file_name)
if file_name in OFFSET_DICT.keys():
offset_mul = OFFSET_DICT[file_name]
else:
return 0
print(offset_mul)
off_v = int(offset_mul*1005)
img_readB = img[off_v:img.shape[0],:]
img_readA = img[0:off_v,:]
img_read = np.vstack((img_readB,img_readA))
if ((glt.shape[0]-img.shape[0])>0):
print("size mismatch. Fixing it...")
completion_shape = np.zeros((glt.shape[0]-img.shape[0], img.shape[1]))
img_read = np.vstack((img_read,completion_shape))
print(img_read.shape)
# getting the absolute even if GLT has negative values
# magnitude
glt_mag = np.absolute(glt)
# GLT value of zero means no data, extract this because python has zero-indexing.
glt_mask = np.all(glt_mag==0, axis=2)
output[glt_mask] = b_val
glt_mag[glt_mag>(img.shape[0]-1)] = 0
# now check the lookup and fill in the location, -1 to map to zero-indexing
output[~glt_mask] = img_read[glt_mag[~glt_mask,1]-1, glt_mag[~glt_mask,0]-1]
return output
#%% load file and rectify it in each band
for fname in FILES:
fname_glt = fname.split("_")[0]
sname_glt = f'{fname_glt}_rdn_glt' #geo-ref file for ortho-correction
hname_glt = f'{sname_glt}.hdr' #header file
glt_img = f'{DIRECTORY}/{fname}/{sname_glt}'
glt_hdr = f'{DIRECTORY}/{fname}/{hname_glt}'
print(glt_img, glt_hdr)
mf_folder = f'{DIRECTORY}/{fname}/{fname_glt}_rdn_v1f_clip_mfout'
try:
if (fname_glt not in OFFSET_DICT.keys()):
continue
if (os.path.exists(glt_hdr)):
glt_data_obj = image_obj(glt_hdr, glt_img)
glt = glt_data_obj.read_bands([0,1])
else:
continue
except:
pass
#mf_rect_path = f'/media/data/satish/detector_bank_input/corrected_output'
mf_rect_folder = f'{DIRECTORY}/{fname}/{fname_glt}_rect'
if not(os.path.isdir(mf_rect_folder)):
os.mkdir(mf_rect_folder)
print("\nDirectory", mf_rect_folder ," created.")
elif os.path.isdir(mf_rect_folder):
print("\nDirectory", mf_rect_folder ," already exists..deleting it")
shutil.rmtree(mf_rect_folder)
os.mkdir(mf_rect_folder)
print("\nNew Directory", mf_rect_folder ," created.")
for mfname in os.listdir(mf_folder):
print("Ortho-correcting file", mfname)
mf_filename = f'{mf_folder}/{mfname}'
img_unrect = np.load(mf_filename)
print(img_unrect.shape)
'''
use this function in case you have any other dataset, the custom_ortho_correct_for_data
function uses the OFFSET_DICT to correct the row positions in each band.
rect_img = ideal_ortho_correction(fname_glt, glt, img_unrect)
'''
rect_img = custom_ortho_correct_for_data(fname_glt, glt, img_unrect, OFFSET_DICT)
rect_filename = f'{mf_rect_folder}/{mfname}'
np.save(rect_filename, rect_img)
|
[
"logging.getLogger",
"spectral.io.envi.read_envi_header",
"spectral.io.envi.gen_params",
"numpy.save",
"os.path.exists",
"os.listdir",
"os.path.isdir",
"numpy.vstack",
"os.mkdir",
"spectral.io.bipfile.BipFile",
"logging.basicConfig",
"coloredlogs.install",
"numpy.absolute",
"os.path.join",
"spectral.io.bilfile.BilFile",
"numpy.zeros",
"numpy.array_equal",
"shutil.rmtree",
"json.load",
"numpy.all",
"numpy.load"
] |
[((467, 506), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (486, 506), False, 'import logging\n'), ((516, 555), 'logging.getLogger', 'logging.getLogger', (['"""aviris_data_loader"""'], {}), "('aviris_data_loader')\n", (533, 555), False, 'import logging\n'), ((556, 605), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""DEBUG"""', 'logger': 'logger'}), "(level='DEBUG', logger=logger)\n", (575, 605), False, 'import coloredlogs\n'), ((976, 997), 'os.listdir', 'os.listdir', (['DIRECTORY'], {}), '(DIRECTORY)\n', (986, 997), False, 'import os\n'), ((839, 851), 'json.load', 'json.load', (['f'], {}), '(f)\n', (848, 851), False, 'import json\n'), ((1228, 1254), 'spectral.io.envi.read_envi_header', 'envi.read_envi_header', (['hdr'], {}), '(hdr)\n', (1249, 1254), True, 'import spectral.io.envi as envi\n'), ((1267, 1288), 'spectral.io.envi.gen_params', 'envi.gen_params', (['head'], {}), '(head)\n', (1282, 1288), True, 'import spectral.io.envi as envi\n'), ((2513, 2529), 'numpy.absolute', 'np.absolute', (['glt'], {}), '(glt)\n', (2524, 2529), True, 'import numpy as np\n'), ((2632, 2660), 'numpy.all', 'np.all', (['(glt_mag == 0)'], {'axis': '(2)'}), '(glt_mag == 0, axis=2)\n', (2638, 2660), True, 'import numpy as np\n'), ((3873, 3906), 'numpy.vstack', 'np.vstack', (['(img_readB, img_readA)'], {}), '((img_readB, img_readA))\n', (3882, 3906), True, 'import numpy as np\n'), ((4243, 4259), 'numpy.absolute', 'np.absolute', (['glt'], {}), '(glt)\n', (4254, 4259), True, 'import numpy as np\n'), ((4361, 4389), 'numpy.all', 'np.all', (['(glt_mag == 0)'], {'axis': '(2)'}), '(glt_mag == 0, axis=2)\n', (4367, 4389), True, 'import numpy as np\n'), ((5906, 5927), 'os.listdir', 'os.listdir', (['mf_folder'], {}), '(mf_folder)\n', (5916, 5927), False, 'import os\n'), ((1028, 1054), 'os.path.join', 'os.path.join', (['DIRECTORY', 'x'], {}), '(DIRECTORY, x)\n', (1040, 1054), False, 'import os\n'), ((1553, 1573), 'spectral.io.bipfile.BipFile', 'BipFile', (['param', 'head'], {}), '(param, head)\n', (1560, 1573), False, 'from spectral.io.bipfile import BipFile\n'), ((1736, 1756), 'spectral.io.bilfile.BilFile', 'BilFile', (['param', 'head'], {}), '(param, head)\n', (1743, 1756), False, 'from spectral.io.bilfile import BilFile\n'), ((2238, 2276), 'numpy.zeros', 'np.zeros', (['(glt.shape[0], glt.shape[1])'], {}), '((glt.shape[0], glt.shape[1]))\n', (2246, 2276), True, 'import numpy as np\n'), ((2288, 2346), 'numpy.array_equal', 'np.array_equal', (['output.shape', '[glt.shape[0], glt.shape[1]]'], {}), '(output.shape, [glt.shape[0], glt.shape[1]])\n', (2302, 2346), True, 'import numpy as np\n'), ((3407, 3445), 'numpy.zeros', 'np.zeros', (['(glt.shape[0], glt.shape[1])'], {}), '((glt.shape[0], glt.shape[1]))\n', (3415, 3445), True, 'import numpy as np\n'), ((3457, 3515), 'numpy.array_equal', 'np.array_equal', (['output.shape', '[glt.shape[0], glt.shape[1]]'], {}), '(output.shape, [glt.shape[0], glt.shape[1]])\n', (3471, 3515), True, 'import numpy as np\n'), ((4018, 4071), 'numpy.zeros', 'np.zeros', (['(glt.shape[0] - img.shape[0], img.shape[1])'], {}), '((glt.shape[0] - img.shape[0], img.shape[1]))\n', (4026, 4071), True, 'import numpy as np\n'), ((4089, 4128), 'numpy.vstack', 'np.vstack', (['(img_read, completion_shape)'], {}), '((img_read, completion_shape))\n', (4098, 4128), True, 'import numpy as np\n'), ((5168, 5191), 'os.path.exists', 'os.path.exists', (['glt_hdr'], {}), '(glt_hdr)\n', (5182, 5191), False, 'import os\n'), ((5510, 5539), 'os.path.isdir', 'os.path.isdir', (['mf_rect_folder'], {}), '(mf_rect_folder)\n', (5523, 5539), False, 'import os\n'), ((5550, 5574), 'os.mkdir', 'os.mkdir', (['mf_rect_folder'], {}), '(mf_rect_folder)\n', (5558, 5574), False, 'import os\n'), ((5642, 5671), 'os.path.isdir', 'os.path.isdir', (['mf_rect_folder'], {}), '(mf_rect_folder)\n', (5655, 5671), False, 'import os\n'), ((6043, 6063), 'numpy.load', 'np.load', (['mf_filename'], {}), '(mf_filename)\n', (6050, 6063), True, 'import numpy as np\n'), ((6518, 6550), 'numpy.save', 'np.save', (['rect_filename', 'rect_img'], {}), '(rect_filename, rect_img)\n', (6525, 6550), True, 'import numpy as np\n'), ((5758, 5787), 'shutil.rmtree', 'shutil.rmtree', (['mf_rect_folder'], {}), '(mf_rect_folder)\n', (5771, 5787), False, 'import shutil\n'), ((5796, 5820), 'os.mkdir', 'os.mkdir', (['mf_rect_folder'], {}), '(mf_rect_folder)\n', (5804, 5820), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import csv
import tensorflow as tf
from config import Config
from DataFeeder import DataFeeder,TestData
from model import DKT
from sklearn.metrics import f1_score,precision_score,recall_score
indices = [precision_score,recall_score,f1_score]
def make_prediction(folderName,index,max_iters = 200,target_key = 'FirstCorrect'):
tf.reset_default_graph()
cfg = Config(dataFile = '%s/Training.csv'%folderName)
cfg.load()
DF_train = DataFeeder(cfg)
# problem vectors cfg.probVecs
features = [['ProblemID','inp',[cfg.numP,8],False],
['FirstCorrect','inp',[2,8],True],
['EverCorrect','inp',[2,8],True],
['UsedHint','inp',[2,8],True]]
targets = [['FirstCorrect',2 , 1. , [1., 1.2]]]
model4train = DKT(features = features,
targets = targets,
keep_prob = 0.1,
num_items = cfg.numP,
rnn_units = [32,32],
training = True,
lr_decay = [1e-3,0.9,50])
model4test = DKT(features = features,
targets = targets,
keep_prob = 1.,
num_items = cfg.numP,
rnn_units = [32,32],
training = False,
lr_decay = [5*1e-2,0.9,100])
session = tf.Session()
session.run(tf.global_variables_initializer())
print('training on %s'%folderName)
for i in range(1,max_iters+1):
inputs,targets,bu_masks = DF_train.next_batch(batch_size = DF_train.size,
cum = True)
feed_data = model4train.zip_data(inputs,model4train.input_holders)
feed_data_t = model4train.zip_data(targets,model4train.target_holders)
feed_data.update(feed_data_t)
_,predicts,costs = session.run([model4train.trainop,
model4train.predicts,
model4train.costs] ,
feed_dict=feed_data)
if i%max_iters == 0:
for name,values in predicts.items():
# y_pred = values[bu_masks]
# y_true = targets[name][bu_masks]
# indices = [func(y_true,y_pred) for func in evalue_indices]
print('final cost',round(costs[target_key],3))
cfg_test = Config(dataFile = '%s/Test.csv'%folderName)
cfg_test.load()
TD = TestData(cfg_test)
result = []
predictions = []
groundtruth = []
for data,(inputs,targets,seqIndices) in TD.export():
feed_data = model4test.zip_data(inputs,model4test.input_holders)
predicts,probablities = session.run([model4test.predicts,
model4test.probablities],feed_dict = feed_data)
probs_on_correct = probablities[target_key][0,np.arange(inputs['lengths'][0]),seqIndices,1]
y_pred = predicts[target_key][0,np.arange(inputs['lengths'][0]),seqIndices]
y_true = targets[target_key][0,:]
predictions.append(y_pred)
groundtruth.append(y_true)
for i in range(data.shape[0]):
raw_data = list(data.iloc[i,:].values)
raw_data +=[float(probs_on_correct[i]) , int(y_pred[i]) , index]
result.append(raw_data)
y_true = np.concatenate(groundtruth,axis=0)
y_pred = np.concatenate(predictions,axis=0)
index = [round(func(y_true,y_pred),3) for func in indices]
print(' '*4,'testing',index)
return result,list(data.columns)
def main(datafolder):
total_predicts = []
for i in range(10):
predicts,labels = make_prediction(folderName = datafolder+'/fold%d'%i,
index = i,
max_iters = 400)
total_predicts.extend(predicts)
fobj = open('cv_predict.csv','w',newline='')
writer = csv.writer(fobj)
writer.writerow(labels+['pCorrectProblem','prediction','fold'])
for line in total_predicts:
writer.writerow(line)
fobj.close()
return True
if __name__=='__main__':
dataFolder = r'C:\Users\G7\Desktop\itemRL\DataChellenge\CV'
main(dataFolder)
|
[
"tensorflow.reset_default_graph",
"model.DKT",
"tensorflow.Session",
"config.Config",
"csv.writer",
"DataFeeder.TestData",
"DataFeeder.DataFeeder",
"tensorflow.global_variables_initializer",
"numpy.concatenate",
"numpy.arange"
] |
[((388, 412), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (410, 412), True, 'import tensorflow as tf\n'), ((424, 471), 'config.Config', 'Config', ([], {'dataFile': "('%s/Training.csv' % folderName)"}), "(dataFile='%s/Training.csv' % folderName)\n", (430, 471), False, 'from config import Config\n'), ((504, 519), 'DataFeeder.DataFeeder', 'DataFeeder', (['cfg'], {}), '(cfg)\n', (514, 519), False, 'from DataFeeder import DataFeeder, TestData\n'), ((844, 984), 'model.DKT', 'DKT', ([], {'features': 'features', 'targets': 'targets', 'keep_prob': '(0.1)', 'num_items': 'cfg.numP', 'rnn_units': '[32, 32]', 'training': '(True)', 'lr_decay': '[0.001, 0.9, 50]'}), '(features=features, targets=targets, keep_prob=0.1, num_items=cfg.numP,\n rnn_units=[32, 32], training=True, lr_decay=[0.001, 0.9, 50])\n', (847, 984), False, 'from model import DKT\n'), ((1158, 1303), 'model.DKT', 'DKT', ([], {'features': 'features', 'targets': 'targets', 'keep_prob': '(1.0)', 'num_items': 'cfg.numP', 'rnn_units': '[32, 32]', 'training': '(False)', 'lr_decay': '[5 * 0.01, 0.9, 100]'}), '(features=features, targets=targets, keep_prob=1.0, num_items=cfg.numP,\n rnn_units=[32, 32], training=False, lr_decay=[5 * 0.01, 0.9, 100])\n', (1161, 1303), False, 'from model import DKT\n'), ((1467, 1479), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1477, 1479), True, 'import tensorflow as tf\n'), ((2543, 2586), 'config.Config', 'Config', ([], {'dataFile': "('%s/Test.csv' % folderName)"}), "(dataFile='%s/Test.csv' % folderName)\n", (2549, 2586), False, 'from config import Config\n'), ((2624, 2642), 'DataFeeder.TestData', 'TestData', (['cfg_test'], {}), '(cfg_test)\n', (2632, 2642), False, 'from DataFeeder import DataFeeder, TestData\n'), ((3542, 3577), 'numpy.concatenate', 'np.concatenate', (['groundtruth'], {'axis': '(0)'}), '(groundtruth, axis=0)\n', (3556, 3577), True, 'import numpy as np\n'), ((3591, 3626), 'numpy.concatenate', 'np.concatenate', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (3605, 3626), True, 'import numpy as np\n'), ((4140, 4156), 'csv.writer', 'csv.writer', (['fobj'], {}), '(fobj)\n', (4150, 4156), False, 'import csv\n'), ((1497, 1530), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1528, 1530), True, 'import tensorflow as tf\n'), ((3055, 3086), 'numpy.arange', 'np.arange', (["inputs['lengths'][0]"], {}), "(inputs['lengths'][0])\n", (3064, 3086), True, 'import numpy as np\n'), ((3152, 3183), 'numpy.arange', 'np.arange', (["inputs['lengths'][0]"], {}), "(inputs['lengths'][0])\n", (3161, 3183), True, 'import numpy as np\n')]
|
import numpy as np
def ratios(pops1, pops2):
totals1 = np.array(pops1[0]) + np.array(pops1[1])
totals2 = np.array(pops2[0]) + np.array(pops2[1])
change_ratio = np.delete(totals2, 0) / np.delete(totals1, -1)
change_ratio = np.delete(change_ratio, -1)
baby_ratio = totals2[0] / np.sum(np.array(pops1[1])[3:10])
tail_ratio = totals2[-1] / np.sum(totals1[-2:])
return change_ratio.tolist(), baby_ratio, tail_ratio
def simulate(pops, change_ratio, baby_ratio, tail_ratio):
estimates = [[], []]
mothers = np.sum(np.array(pops[1])[3:10])
estimates[0].append(mothers * baby_ratio * (105 / (105 + 100)))
estimates[1].append(mothers * baby_ratio * (100 / (105 + 100)))
males = (np.array(pops[0])[:-2] * np.array(change_ratio)).tolist()
females = (np.array(pops[1])[:-2] * np.array(change_ratio)).tolist()
estimates[0] += males
estimates[1] += females
estimates[0].append(np.sum(pops[0][-2:]) * tail_ratio)
estimates[1].append(np.sum(pops[1][-2:]) * tail_ratio)
return estimates
|
[
"numpy.delete",
"numpy.array",
"numpy.sum"
] |
[((241, 268), 'numpy.delete', 'np.delete', (['change_ratio', '(-1)'], {}), '(change_ratio, -1)\n', (250, 268), True, 'import numpy as np\n'), ((61, 79), 'numpy.array', 'np.array', (['pops1[0]'], {}), '(pops1[0])\n', (69, 79), True, 'import numpy as np\n'), ((82, 100), 'numpy.array', 'np.array', (['pops1[1]'], {}), '(pops1[1])\n', (90, 100), True, 'import numpy as np\n'), ((115, 133), 'numpy.array', 'np.array', (['pops2[0]'], {}), '(pops2[0])\n', (123, 133), True, 'import numpy as np\n'), ((136, 154), 'numpy.array', 'np.array', (['pops2[1]'], {}), '(pops2[1])\n', (144, 154), True, 'import numpy as np\n'), ((175, 196), 'numpy.delete', 'np.delete', (['totals2', '(0)'], {}), '(totals2, 0)\n', (184, 196), True, 'import numpy as np\n'), ((199, 221), 'numpy.delete', 'np.delete', (['totals1', '(-1)'], {}), '(totals1, -1)\n', (208, 221), True, 'import numpy as np\n'), ((363, 383), 'numpy.sum', 'np.sum', (['totals1[-2:]'], {}), '(totals1[-2:])\n', (369, 383), True, 'import numpy as np\n'), ((549, 566), 'numpy.array', 'np.array', (['pops[1]'], {}), '(pops[1])\n', (557, 566), True, 'import numpy as np\n'), ((934, 954), 'numpy.sum', 'np.sum', (['pops[0][-2:]'], {}), '(pops[0][-2:])\n', (940, 954), True, 'import numpy as np\n'), ((993, 1013), 'numpy.sum', 'np.sum', (['pops[1][-2:]'], {}), '(pops[1][-2:])\n', (999, 1013), True, 'import numpy as np\n'), ((306, 324), 'numpy.array', 'np.array', (['pops1[1]'], {}), '(pops1[1])\n', (314, 324), True, 'import numpy as np\n'), ((749, 771), 'numpy.array', 'np.array', (['change_ratio'], {}), '(change_ratio)\n', (757, 771), True, 'import numpy as np\n'), ((822, 844), 'numpy.array', 'np.array', (['change_ratio'], {}), '(change_ratio)\n', (830, 844), True, 'import numpy as np\n'), ((724, 741), 'numpy.array', 'np.array', (['pops[0]'], {}), '(pops[0])\n', (732, 741), True, 'import numpy as np\n'), ((797, 814), 'numpy.array', 'np.array', (['pops[1]'], {}), '(pops[1])\n', (805, 814), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# Set fontsize larger for latex plots
matplotlib.rcParams.update({'font.size': 20})
# Generate data from file
x, y = np.genfromtxt("bin/python_Aufgabe2.txt", unpack=True)
m, n = x[-1], y[-1]
# Plotting
plt.figure(figsize=(12,7))
plt.grid()
plt.xlabel("x")
plt.ylabel("y")
x_new = np.linspace(min(x)-x[:-1].std()/2, max(x)+x[:-1].std()/2)
plt.plot(x[:-1], y[:-1], "x", mew=2., alpha=2, label="Datenpunkte")
plt.plot(x_new, m*x_new+n, "-", linewidth=3, label="Ausgleichsgerade")
plt.legend()
plt.tight_layout()
plt.savefig("bin/figure.pdf", dpi=1200)
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.rcParams.update",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.genfromtxt",
"matplotlib.pyplot.legend"
] |
[((108, 153), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (134, 153), False, 'import matplotlib\n'), ((188, 241), 'numpy.genfromtxt', 'np.genfromtxt', (['"""bin/python_Aufgabe2.txt"""'], {'unpack': '(True)'}), "('bin/python_Aufgabe2.txt', unpack=True)\n", (201, 241), True, 'import numpy as np\n'), ((275, 302), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (285, 302), True, 'import matplotlib.pyplot as plt\n'), ((302, 312), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (310, 312), True, 'import matplotlib.pyplot as plt\n'), ((313, 328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (323, 328), True, 'import matplotlib.pyplot as plt\n'), ((329, 344), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (339, 344), True, 'import matplotlib.pyplot as plt\n'), ((411, 479), 'matplotlib.pyplot.plot', 'plt.plot', (['x[:-1]', 'y[:-1]', '"""x"""'], {'mew': '(2.0)', 'alpha': '(2)', 'label': '"""Datenpunkte"""'}), "(x[:-1], y[:-1], 'x', mew=2.0, alpha=2, label='Datenpunkte')\n", (419, 479), True, 'import matplotlib.pyplot as plt\n'), ((479, 553), 'matplotlib.pyplot.plot', 'plt.plot', (['x_new', '(m * x_new + n)', '"""-"""'], {'linewidth': '(3)', 'label': '"""Ausgleichsgerade"""'}), "(x_new, m * x_new + n, '-', linewidth=3, label='Ausgleichsgerade')\n", (487, 553), True, 'import matplotlib.pyplot as plt\n'), ((550, 562), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (560, 562), True, 'import matplotlib.pyplot as plt\n'), ((563, 581), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (579, 581), True, 'import matplotlib.pyplot as plt\n'), ((582, 621), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bin/figure.pdf"""'], {'dpi': '(1200)'}), "('bin/figure.pdf', dpi=1200)\n", (593, 621), True, 'import matplotlib.pyplot as plt\n')]
|
'''
Created on April 15, 2018
@author: <NAME>
'''
import numpy as np
import warnings
from scipy.stats import gamma, lognorm
from sklearn.linear_model import ElasticNet
from spn.structure.leaves.conditional.Conditional import Conditional_Gaussian, Conditional_Poisson, \
Conditional_Bernoulli
import statsmodels.api as sm
from os.path import dirname
path = dirname(__file__) + "/"
def update_glm_parameters_mle(node, data, scope): # assume data is tuple (output np array, conditional np array)
assert len(scope) == 1, 'more than one output variable in scope?'
data = data[~np.isnan(data)].reshape(data.shape)
dataOut = data[:, :len(scope)]
dataIn = data[:, len(scope):]
assert dataOut.shape[1] == 1, 'more than one output variable in scope?'
if dataOut.shape[0] == 0:
return
dataIn = np.c_[dataIn, np.ones((dataIn.shape[0]))]
if isinstance(node, Conditional_Gaussian):
reg = ElasticNet(random_state=0, alpha=0.01, max_iter=2000, fit_intercept=False)
reg.fit(dataIn, dataOut)
if reg.n_iter_ < reg.max_iter:
node.weights = reg.coef_.tolist()
return
family = sm.families.Gaussian()
elif isinstance(node, Conditional_Poisson):
family = sm.families.Poisson()
elif isinstance(node, Conditional_Bernoulli):
family = sm.families.Binomial()
else:
raise Exception("Unknown conditional " + str(type(node)))
glmfit = sm.GLM(dataOut, dataIn, family=family).fit_regularized(alpha=0.0001, maxiter=5)
node.weights = glmfit.params.tolist()
return
try:
import tensorflow as tf
import tensorflow_probability as tfp;
tfd = tfp.distributions
dataOut = dataOut.reshape(-1)
w, linear_response, is_converged, num_iter = tfp.glm.fit(
model_matrix=tf.constant(dataIn),
response=tf.constant(dataOut),
model=tfp.glm.Poisson(),
l2_regularizer=0.0001)
log_likelihood = tfp.glm.Poisson().log_prob(tf.constant(dataOut), linear_response)
with tf.Session() as sess:
[w_, linear_response_, is_converged_, num_iter_, Y_, log_likelihood_] = sess.run(
[w, linear_response, is_converged, num_iter, tf.constant(dataOut), log_likelihood])
node.weights = w_
print("node.weights", node.weights)
# glmfit = sm.GLM(dataOut, dataIn, family=family).fit_regularized(alpha=0.001)
# node.weights = glmfit.params
# # if glmfit.converged is False:
# # warnings.warn("Maximum number of iterations reached")
except Exception:
glmfit = sm.GLM(dataOut, dataIn, family=family).fit_regularized(alpha=0.0001)
node.weights = glmfit.params
print("node.weights with glmfit", node.weights)
np.savez(path + "tmp_glm_mle_data", dataIn=dataIn, dataOut=dataOut)
|
[
"statsmodels.api.families.Poisson",
"numpy.savez",
"sklearn.linear_model.ElasticNet",
"numpy.ones",
"statsmodels.api.families.Gaussian",
"tensorflow_probability.glm.Poisson",
"tensorflow.Session",
"statsmodels.api.families.Binomial",
"os.path.dirname",
"tensorflow.constant",
"numpy.isnan",
"statsmodels.api.GLM"
] |
[((364, 381), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (371, 381), False, 'from os.path import dirname\n'), ((939, 1013), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'random_state': '(0)', 'alpha': '(0.01)', 'max_iter': '(2000)', 'fit_intercept': '(False)'}), '(random_state=0, alpha=0.01, max_iter=2000, fit_intercept=False)\n', (949, 1013), False, 'from sklearn.linear_model import ElasticNet\n'), ((1171, 1193), 'statsmodels.api.families.Gaussian', 'sm.families.Gaussian', ([], {}), '()\n', (1191, 1193), True, 'import statsmodels.api as sm\n'), ((849, 873), 'numpy.ones', 'np.ones', (['dataIn.shape[0]'], {}), '(dataIn.shape[0])\n', (856, 873), True, 'import numpy as np\n'), ((1259, 1280), 'statsmodels.api.families.Poisson', 'sm.families.Poisson', ([], {}), '()\n', (1278, 1280), True, 'import statsmodels.api as sm\n'), ((1461, 1499), 'statsmodels.api.GLM', 'sm.GLM', (['dataOut', 'dataIn'], {'family': 'family'}), '(dataOut, dataIn, family=family)\n', (1467, 1499), True, 'import statsmodels.api as sm\n'), ((2031, 2051), 'tensorflow.constant', 'tf.constant', (['dataOut'], {}), '(dataOut)\n', (2042, 2051), True, 'import tensorflow as tf\n'), ((2084, 2096), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2094, 2096), True, 'import tensorflow as tf\n'), ((2818, 2885), 'numpy.savez', 'np.savez', (["(path + 'tmp_glm_mle_data')"], {'dataIn': 'dataIn', 'dataOut': 'dataOut'}), "(path + 'tmp_glm_mle_data', dataIn=dataIn, dataOut=dataOut)\n", (2826, 2885), True, 'import numpy as np\n'), ((1348, 1370), 'statsmodels.api.families.Binomial', 'sm.families.Binomial', ([], {}), '()\n', (1368, 1370), True, 'import statsmodels.api as sm\n'), ((1843, 1862), 'tensorflow.constant', 'tf.constant', (['dataIn'], {}), '(dataIn)\n', (1854, 1862), True, 'import tensorflow as tf\n'), ((1885, 1905), 'tensorflow.constant', 'tf.constant', (['dataOut'], {}), '(dataOut)\n', (1896, 1905), True, 'import tensorflow as tf\n'), ((1925, 1942), 'tensorflow_probability.glm.Poisson', 'tfp.glm.Poisson', ([], {}), '()\n', (1940, 1942), True, 'import tensorflow_probability as tfp\n'), ((2004, 2021), 'tensorflow_probability.glm.Poisson', 'tfp.glm.Poisson', ([], {}), '()\n', (2019, 2021), True, 'import tensorflow_probability as tfp\n'), ((592, 606), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (600, 606), True, 'import numpy as np\n'), ((2261, 2281), 'tensorflow.constant', 'tf.constant', (['dataOut'], {}), '(dataOut)\n', (2272, 2281), True, 'import tensorflow as tf\n'), ((2648, 2686), 'statsmodels.api.GLM', 'sm.GLM', (['dataOut', 'dataIn'], {'family': 'family'}), '(dataOut, dataIn, family=family)\n', (2654, 2686), True, 'import statsmodels.api as sm\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from NumPyNet.activations import Activations
from NumPyNet.utils import _check_activation
from NumPyNet.utils import check_is_fitted
import numpy as np
from NumPyNet.layers.base import BaseLayer
__author__ = ['<NAME>', '<NAME>']
__email__ = ['<EMAIL>', '<EMAIL>']
class Activation_layer(BaseLayer):
'''
Activation layer
Parameters
----------
input_shape : tuple (default=None)
Input dimensions as tuple of 4 integers
activation : str or Activation object
Activation function to apply into the layer.
Example
-------
>>> import os
>>> import pylab as plt
>>> from PIL import Image
>>> from NumPyNet import activations
>>>
>>> activation_func = activations.Relu()
>>>
>>> img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
>>> float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
>>>
>>> filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
>>> inpt = np.asarray(Image.open(filename), dtype=float)
>>> inpt.setflags(write=1)
>>> inpt = img_2_float(inpt)
>>> # Relu activation constrain
>>> inpt = inpt * 2 - 1
>>>
>>> # add batch = 1
>>> inpt = np.expand_dims(inpt, axis=0)
>>>
>>> layer = Activation_layer(input_shape=inpt.shape, activation=activation_func)
>>>
>>> # FORWARD
>>>
>>> layer.forward(inpt)
>>> forward_out = layer.output
>>> print(layer)
>>>
>>> # BACKWARD
>>>
>>> layer.delta = np.ones(shape=inpt.shape, dtype=float)
>>> delta = np.zeros(shape=inpt.shape, dtype=float)
>>> layer.backward(delta, copy=True)
>>>
>>> # Visualizations
>>>
>>> fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
>>> fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
>>>
>>> fig.suptitle('Activation Layer : {}'.format(activation_func.name))
>>>
>>> ax1.imshow(float_2_img(inpt[0]))
>>> ax1.set_title('Original image')
>>> ax1.axis('off')
>>>
>>> ax2.imshow(float_2_img(forward_out[0]))
>>> ax2.set_title("Forward")
>>> ax2.axis("off")
>>>
>>> ax3.imshow(float_2_img(delta[0]))
>>> ax3.set_title('Backward')
>>> ax3.axis('off')
>>>
>>> fig.tight_layout()
>>> plt.show()
.. image:: ../../../NumPyNet/images/activation_relu.png
References
----------
- TODO
'''
def __init__(self, input_shape=None, activation=Activations, **kwargs):
activation = _check_activation(self, activation)
self.activation = activation.activate
self.gradient = activation.gradient
super(Activation_layer, self).__init__(input_shape=input_shape)
def __str__(self):
'''
Printer
'''
batch, out_width, out_height, out_channels = self.out_shape
return 'activ {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d} -> {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d}'.format(
batch, out_width, out_height, out_channels)
def forward(self, inpt, copy=True):
'''
Forward of the activation layer, apply the selected activation function to
the input.
Parameters
----------
inpt: array-like
Input array to activate.
copy: bool (default=True)
If True make a copy of the input before applying the activation.
Returns
-------
self
'''
self._check_dims(shape=self.out_shape, arr=inpt, func='Forward')
self.output = self.activation(inpt, copy=copy)
self.delta = np.zeros(shape=self.out_shape, dtype=float)
return self
def backward(self, delta, copy=False):
'''
Compute the backward of the activation layer
Parameters
----------
delta : array-like
Global error to be backpropagated.
Returns
-------
self
'''
check_is_fitted(self, 'delta')
self._check_dims(shape=self.out_shape, arr=delta, func='Backward')
self.delta *= self.gradient(self.output, copy=copy)
delta[:] = self.delta
return self
if __name__ == '__main__':
import os
import pylab as plt
from PIL import Image
from NumPyNet import activations
activation_func = activations.Hardtan()
img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
inpt = np.asarray(Image.open(filename), dtype=float)
inpt.setflags(write=1)
inpt = img_2_float(inpt)
# Relu activation constrain
inpt = inpt * 2 - 1
# add batch = 1
inpt = np.expand_dims(inpt, axis=0)
layer = Activation_layer(input_shape=inpt.shape, activation=activation_func)
# FORWARD
layer.forward(inpt)
forward_out = layer.output
print(layer)
# BACKWARD
layer.delta = np.ones(shape=inpt.shape, dtype=float)
delta = np.zeros(shape=inpt.shape, dtype=float)
layer.backward(delta, copy=True)
# Visualizations
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
fig.suptitle('Activation Layer\nfunction : {}'.format(activation_func.name))
ax1.imshow(float_2_img(inpt[0]))
ax1.set_title('Original image')
ax1.axis('off')
ax2.imshow(float_2_img(forward_out[0]))
ax2.set_title("Forward")
ax2.axis("off")
ax3.imshow(float_2_img(delta[0]))
ax3.set_title('Backward')
ax3.axis('off')
fig.tight_layout()
plt.show()
|
[
"PIL.Image.open",
"numpy.ones",
"NumPyNet.utils._check_activation",
"NumPyNet.utils.check_is_fitted",
"os.path.dirname",
"numpy.zeros",
"NumPyNet.activations.Hardtan",
"numpy.expand_dims",
"pylab.subplots",
"pylab.show"
] |
[((4263, 4284), 'NumPyNet.activations.Hardtan', 'activations.Hardtan', ([], {}), '()\n', (4282, 4284), False, 'from NumPyNet import activations\n'), ((4751, 4779), 'numpy.expand_dims', 'np.expand_dims', (['inpt'], {'axis': '(0)'}), '(inpt, axis=0)\n', (4765, 4779), True, 'import numpy as np\n'), ((4971, 5009), 'numpy.ones', 'np.ones', ([], {'shape': 'inpt.shape', 'dtype': 'float'}), '(shape=inpt.shape, dtype=float)\n', (4978, 5009), True, 'import numpy as np\n'), ((5020, 5059), 'numpy.zeros', 'np.zeros', ([], {'shape': 'inpt.shape', 'dtype': 'float'}), '(shape=inpt.shape, dtype=float)\n', (5028, 5059), True, 'import numpy as np\n'), ((5141, 5188), 'pylab.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(10, 5)'}), '(nrows=1, ncols=3, figsize=(10, 5))\n', (5153, 5188), True, 'import pylab as plt\n'), ((5619, 5629), 'pylab.show', 'plt.show', ([], {}), '()\n', (5627, 5629), True, 'import pylab as plt\n'), ((2613, 2648), 'NumPyNet.utils._check_activation', '_check_activation', (['self', 'activation'], {}), '(self, activation)\n', (2630, 2648), False, 'from NumPyNet.utils import _check_activation\n'), ((3607, 3650), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.out_shape', 'dtype': 'float'}), '(shape=self.out_shape, dtype=float)\n', (3615, 3650), True, 'import numpy as np\n'), ((3915, 3945), 'NumPyNet.utils.check_is_fitted', 'check_is_fitted', (['self', '"""delta"""'], {}), "(self, 'delta')\n", (3930, 3945), False, 'from NumPyNet.utils import check_is_fitted\n'), ((4506, 4531), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4521, 4531), False, 'import os\n'), ((4584, 4604), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (4594, 4604), False, 'from PIL import Image\n')]
|
"""
This script shows the usage of scikit-learns linear regression functionality.
"""
# %% [markdown]
# # Linear Regression using Scikit-Learn #
# %% [markdown]
# ## Ice Cream Dataset ##
# | Temperature C° | Ice Cream Sales |
# |:--------------:|:---------------:|
# | 15 | 34 |
# | 24 | 587 |
# | 34 | 1200 |
# | 31 | 1080 |
# | 29 | 989 |
# | 26 | 734 |
# | 17 | 80 |
# | 11 | 1 |
# | 23 | 523 |
# | 25 | 651 |
# %% [markdown]
# ### Dependencies ###
# Install Numpy for number crunching and Matplotlib for plotting graphs:
# ```bash
# pip install sklearn
# ```
# %% [markdown]
# ### Imports ###
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# %% [markdown]
# ### Ice Cream Dataset as Numpy Array ###
data = np.array([[15, 34],
[24, 587],
[34, 1200],
[31, 1080],
[29, 989],
[26, 734],
[17, 80],
[11, 1],
[23, 523],
[25, 651],
[0, 0],
[2, 0],
[12, 5]])
# %% [markdown]
# ### Plotting the Dataset ###
x_values, y_values = data.T
plt.style.use('ggplot')
plt.scatter(x_values, y_values)
plt.show()
# %% [markdown]
# ### Prepare Train and Test Data ###
x_train, x_test, y_train, y_test = train_test_split(
x_values, y_values, test_size=1/3)
x_train = x_train.reshape(-1, 1)
x_test = x_test.reshape(-1, 1)
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
# %% [markdown]
# ### Train model ###
regression = linear_model.LinearRegression()
regression.fit(x_train, y_train)
# %% [markdown]
# ### Predict ###
y_prediction = regression.predict(x_test)
# %% [markdown]
# ### Plot Predicted Results ###
plt.scatter(x_test, y_test)
plt.plot(x_test, y_prediction, color='blue')
plt.show()
# %% [markdown]
# ### Print Metrics ###
print('Coefficient: \n', regression.coef_)
print('Intercept: \n', regression.intercept_)
print('Mean Squared Error: %.2f' % mean_squared_error(y_test, y_prediction))
|
[
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] |
[((1100, 1247), 'numpy.array', 'np.array', (['[[15, 34], [24, 587], [34, 1200], [31, 1080], [29, 989], [26, 734], [17, 80\n ], [11, 1], [23, 523], [25, 651], [0, 0], [2, 0], [12, 5]]'], {}), '([[15, 34], [24, 587], [34, 1200], [31, 1080], [29, 989], [26, 734],\n [17, 80], [11, 1], [23, 523], [25, 651], [0, 0], [2, 0], [12, 5]])\n', (1108, 1247), True, 'import numpy as np\n'), ((1526, 1549), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1539, 1549), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1581), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_values', 'y_values'], {}), '(x_values, y_values)\n', (1561, 1581), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1590, 1592), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1738), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_values', 'y_values'], {'test_size': '(1 / 3)'}), '(x_values, y_values, test_size=1 / 3)\n', (1701, 1738), False, 'from sklearn.model_selection import train_test_split\n'), ((1925, 1956), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (1954, 1956), False, 'from sklearn import linear_model\n'), ((2121, 2148), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (2132, 2148), True, 'import matplotlib.pyplot as plt\n'), ((2149, 2193), 'matplotlib.pyplot.plot', 'plt.plot', (['x_test', 'y_prediction'], {'color': '"""blue"""'}), "(x_test, y_prediction, color='blue')\n", (2157, 2193), True, 'import matplotlib.pyplot as plt\n'), ((2194, 2204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2202, 2204), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2412), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_prediction'], {}), '(y_test, y_prediction)\n', (2390, 2412), False, 'from sklearn.metrics import mean_squared_error\n')]
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''test_Rainbow_pen
'''
import sys, os
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
FN_OUT = 'rainbow_pen_320x240.png'
def mk_col(w, h, x, y):
a = 255
i = int(7 * y / h)
if i == 0: c, u, v = (192, 0, 0), (32, 0, 0), (0, 32, 0) # R
elif i == 1: c, u, v = (192, 96, 0), (0, -32, 0), (0, 32, 0) # O-
elif i == 2: c, u, v = (192, 192, 0), (0, -32, 0), (-32, 0, 0) # Y
elif i == 3: c, u, v = (0, 192, 0), (64, 0, 0), (0, 0, 64) # G
elif i == 4: c, u, v = (0, 192, 192), (0, 0, -64), (0, -64, 0) # C
elif i == 5: c, u, v = (0, 0, 192), (0, 64, 0), (32, 0, 0) # B
elif i == 6: c, u, v = (96, 0, 192), (-32, 0, 0), (32, 0, 0) # M-
return (i, a, c, u, v)
def mk_dum(w, h, x, y):
# return (64, 64, 64, 192)
i, a, (r, g, b), u, v = mk_col(w, h, x, y)
return (r, g, b, a)
def mk_rainbow(w, h, x, y):
# return (x % 256, y % 256, 128, 255)
i, a, (r, g, b), u, v = mk_col(w, h, x, y)
d = h / 7.0
z = int(y - i * d)
e = d / 3.0
f = 1 if z < e else (-1 if z > 2*e else 0)
rgb = np.array((r, g, b))
if f > 0: rgb += np.array(u)
if f < 0: rgb += np.array(v)
r, g, b = rgb
if x < w / 4:
j, k = 2.0 * d * x / w, d / 2.0
t = z + j < k or z - j > k
if x < w / 36 or t: return (255, 255, 255, 0) # transparent
if x < w / 12: return (r, g, b, a)
else: return (224, 128, 0, 255) # light brown
return (r, g, b, a)
def rainbow_pen(w, h):
fig = plt.figure(figsize=(6, 4), dpi=96)
dm = np.ndarray((h, w, 4), dtype=np.uint8)
for y in range(h):
for x in range(w):
dm[y][x] = mk_dum(w, h, x, y)
dum = Image.fromarray(dm[::-1,:,:], 'RGBA')
im = np.ndarray((h, w, 4), dtype=np.uint8)
for y in range(h):
for x in range(w):
im[y][x] = mk_rainbow(w, h, x, y)
img = Image.fromarray(im[::-1,:,:], 'RGBA')
Image.fromarray(im, 'RGBA').save(FN_OUT, 'PNG')
ax = fig.add_subplot(231)
ax.imshow(img)
ax = fig.add_subplot(232)
ax.imshow(img.convert('L'), cmap='gray', vmin=0, vmax=255)
ax = fig.add_subplot(233)
ax.imshow(img.convert('L')) # auto heatmap
ax = fig.add_subplot(234)
ax.imshow(img.convert('YCbCr')) # ok ?
ax = fig.add_subplot(235)
ax.imshow(dum) # img.convert('LAB')) # not supported on PIL <= py 2.5 ?
ax = fig.add_subplot(236)
ax.imshow(dum) # img.convert('HSV')) # not supported on PIL <= py 2.5 ?
plt.show()
if __name__ == '__main__':
rainbow_pen(320, 240)
|
[
"PIL.Image.fromarray",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.ndarray",
"matplotlib.pyplot.show"
] |
[((1087, 1106), 'numpy.array', 'np.array', (['(r, g, b)'], {}), '((r, g, b))\n', (1095, 1106), True, 'import numpy as np\n'), ((1475, 1509), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)', 'dpi': '(96)'}), '(figsize=(6, 4), dpi=96)\n', (1485, 1509), True, 'from matplotlib import pyplot as plt\n'), ((1518, 1555), 'numpy.ndarray', 'np.ndarray', (['(h, w, 4)'], {'dtype': 'np.uint8'}), '((h, w, 4), dtype=np.uint8)\n', (1528, 1555), True, 'import numpy as np\n'), ((1644, 1683), 'PIL.Image.fromarray', 'Image.fromarray', (['dm[::-1, :, :]', '"""RGBA"""'], {}), "(dm[::-1, :, :], 'RGBA')\n", (1659, 1683), False, 'from PIL import Image\n'), ((1690, 1727), 'numpy.ndarray', 'np.ndarray', (['(h, w, 4)'], {'dtype': 'np.uint8'}), '((h, w, 4), dtype=np.uint8)\n', (1700, 1727), True, 'import numpy as np\n'), ((1820, 1859), 'PIL.Image.fromarray', 'Image.fromarray', (['im[::-1, :, :]', '"""RGBA"""'], {}), "(im[::-1, :, :], 'RGBA')\n", (1835, 1859), False, 'from PIL import Image\n'), ((2393, 2403), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2401, 2403), True, 'from matplotlib import pyplot as plt\n'), ((1126, 1137), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (1134, 1137), True, 'import numpy as np\n'), ((1157, 1168), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1165, 1168), True, 'import numpy as np\n'), ((1860, 1887), 'PIL.Image.fromarray', 'Image.fromarray', (['im', '"""RGBA"""'], {}), "(im, 'RGBA')\n", (1875, 1887), False, 'from PIL import Image\n')]
|
from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, \
amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan
import yaml
from seir_model import SEIR_matrix
from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks
from sys import exit
def epidemiology_model():
with open(r'common_params.yaml') as file:
common_params = yaml.full_load(file)
with open(r'regions.yaml') as file:
regions = yaml.full_load(file)
with open(r'seir_params.yaml') as file:
seir_params_multivar = yaml.full_load(file)
nvars=len(seir_params_multivar) # (var=1 is baseline model, var=2 is delta variant)
nregions = len(regions)
epi = []
intl_visitors = []
between_region_mobility_rate = []
between_locality_mobility_rate = []
beds_per_1000 = []
baseline_hosp = []
for rgn in regions:
beds_per_1000.append(rgn['initial']['beds per 1000'])
baseline_hosp.append(rgn['initial']['population'] * rgn['initial']['beds per 1000']/1000)
epivar=[]
for var in seir_params_multivar:
epivar.append(SEIR_matrix(rgn, var, common_params))
if 'international travel' in rgn:
intl_visitors.append(rgn['international travel']['daily arrivals'] * rgn['international travel']['duration of stay'])
else:
intl_visitors.append(0.0)
between_locality_mobility_rate.append(rgn['between locality mobility rate'])
between_region_mobility_rate.append(rgn['between region mobility rate'])
epi.append(epivar) # contains objects with following order: [[rgn1/var1, rgn2/var1], [rgn1/var2, rgn2/var2]]
proportion_total = [e.proportion_global_infected for e in epi[0]]
test1=np_sum(proportion_total,axis=0)
if any(test1<0.999) or any(test1>1.001):
print('Error test1: aborted')
print('proportions of global infections across variants do not sum to 1')
exit()
start_datetime = get_datetime(common_params['time']['COVID start'])
start_time = timesteps_between_dates(common_params['time']['start date'], common_params['time']['COVID start'])
end_time = timesteps_between_dates(common_params['time']['start date'], common_params['time']['end date'])
epi_datetime_array = get_datetime_array(common_params['time']['COVID start'], common_params['time']['end date'])
ntimesteps = end_time - start_time
# All the epidemiological regional models will give the same values for these parameters
epi_invisible_fraction = epi[0][0].invisible_fraction_1stinfection
total_population=0
for i in range(0,len(epi[:][0])):
total_population += epi[i][0].N
normal_bed_occupancy_fraction = common_params['bed occupancy']['normal']
max_reduction_in_normal_bed_occupancy = common_params['bed occupancy']['max reduction']
if 'vaccinate at risk first' in common_params['vaccination']:
vaccinate_at_risk = common_params['vaccination']['vaccinate at risk first']
else:
vaccinate_at_risk = False
avoid_elective_operations= common_params['avoid elective operations']
# Global infection rate per person
global_infection_points = common_params['global infection rate']
global_infection_npoints = len(global_infection_points)
global_infection_traj_start = global_infection_points[0][0]
if get_datetime(global_infection_traj_start) > start_datetime:
global_infection_traj_start = common_params['time']['COVID start']
global_infection_traj_timesteps_array = np_array(range(0,timesteps_between_dates(global_infection_traj_start, common_params['time']['end date']) + 1))
global_infection_ts = np_empty(global_infection_npoints)
global_infection_val = np_empty(global_infection_npoints)
for i in range(0,global_infection_npoints):
global_infection_ts[i] = timesteps_between_dates(global_infection_traj_start, global_infection_points[i][0])
global_infection_val[i] = global_infection_points[i][1]/1000 # Values are entered per 1000
global_infection_rate = np_interp(global_infection_traj_timesteps_array, global_infection_ts, global_infection_val)
# Trunctate at start as necessary
ntrunc = timesteps_between_dates(global_infection_traj_start, common_params['time']['COVID start'])
global_infection_rate = global_infection_rate[ntrunc:]
# Maximum vaccination rate
vaccination_points = common_params['vaccination']['maximum doses per day']
vaccination_delay = timesteps_over_timedelta_weeks(common_params['vaccination']['time to efficacy'])
vaccination_npoints = len(vaccination_points)
vaccination_timesteps_array = np_array(range(0,timesteps_between_dates(common_params['time']['COVID start'], common_params['time']['end date']) + 1))
vaccination_ts = np_empty(vaccination_npoints)
vaccination_val = np_empty(vaccination_npoints)
for i in range(0,vaccination_npoints):
vaccination_ts[i] = timesteps_between_dates(common_params['time']['COVID start'], vaccination_points[i][0]) + vaccination_delay
vaccination_val[i] = vaccination_points[i][1]
vaccination_max_doses = np_interp(vaccination_timesteps_array, vaccination_ts, vaccination_val)
isolate_symptomatic_cases_windows = []
if 'isolate symptomatic cases' in common_params:
for window in common_params['isolate symptomatic cases']:
if window['apply']:
isolate_symptomatic_cases_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
(1 - epi_invisible_fraction) * window['fraction of cases isolated']))
isolate_at_risk_windows = []
if 'isolate at risk' in common_params:
for window in common_params['isolate at risk']:
if window['apply']:
isolate_at_risk_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
window['fraction of population isolated']))
test_and_trace_windows = []
if 'test and trace' in common_params:
for window in common_params['test and trace']:
if window['apply']:
test_and_trace_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
window['fraction of infectious cases isolated']))
soc_dist_windows = []
if 'social distance' in common_params:
for window in common_params['social distance']:
if window['apply']:
soc_dist_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
window['effectiveness']))
travel_restrictions_windows = []
if 'international travel restrictions' in common_params:
for window in common_params['international travel restrictions']:
if window['apply']:
travel_restrictions_windows.append(Window((get_datetime(window['start date']) - start_datetime).days,
(get_datetime(window['end date']) - start_datetime).days,
window['ramp up for'],
window['ramp down for'],
window['effectiveness']))
# Initialize values for indicator graphs
Itot_allvars=np_zeros(nregions)
comm_spread_frac_allvars = np_zeros((nregions, nvars))
deaths = np_zeros((nregions, nvars))
deaths_reinf = np_zeros((nregions, nvars))
cumulative_cases = np_zeros((nregions, nvars))
deaths_over_time = np_zeros((nregions, ntimesteps, nvars))
new_deaths_over_time = np_zeros((nregions, ntimesteps, nvars))
deaths_reinf_over_time = np_zeros((nregions, ntimesteps, nvars))
recovered_over_time = np_zeros((nregions, ntimesteps, nvars))
vaccinated_over_time = np_zeros((nregions, ntimesteps, nvars))
rerecovered_over_time = np_zeros((nregions, ntimesteps, nvars))
mortality_rate_over_time = np_zeros((nregions, ntimesteps, nvars))
hospitalization_index_region = np_ones(nregions)
hospitalization_index = np_ones(ntimesteps)
mortality_rate = np_ones(ntimesteps)
infective_over_time = np_zeros((nregions, ntimesteps, nvars))
reinfective_over_time = np_zeros((nregions, ntimesteps, nvars))
susceptible_over_time = np_zeros((nregions, ntimesteps, nvars))
for j in range(0,nregions):
susceptible_over_time[j,0,:] = [e.S for e in epi[j]]
# susceptible_over_time = np_zeros((nregions, ntimesteps, nvars))
# for j in range(0,nregions):
# e=epi[j]
# for v in range(0, len(e)):
# susceptible_over_time[j,0,v] = e[v].S
exposed_over_time = np_zeros((nregions, ntimesteps, nvars))
for j in range(0,nregions):
exposed_over_time[j,0,:] = [np_sum(e.E_nr) + np_sum(e.E_r) for e in epi[j]]
reexposed_over_time = np_zeros((nregions, ntimesteps, nvars))
for j in range(0,nregions):
reexposed_over_time[j,0,:] = [np_sum(e.RE_nr) + np_sum(e.RE_r) for e in epi[j]]
comm_spread_frac_over_time = np_zeros((nregions, ntimesteps, nvars))
for j in range(0,nregions):
comm_spread_frac_over_time[j,0,:] = [e.comm_spread_frac for e in epi[j]]
for i in range(0, ntimesteps):
# Public health measures
PHA_social_distancing = 0
for w in soc_dist_windows:
PHA_social_distancing += w.window(i)
PHA_travel_restrictions = 0
for w in travel_restrictions_windows:
PHA_travel_restrictions += w.window(i)
PHA_isolate_visible_cases = 0
for w in isolate_symptomatic_cases_windows:
PHA_isolate_visible_cases += w.window(i)
PHA_isolate_at_risk = 0
for w in isolate_at_risk_windows:
PHA_isolate_at_risk += w.window(i)
PHA_isolate_infectious_cases = 0
for w in test_and_trace_windows:
PHA_isolate_infectious_cases += w.window(i)
PHA_isolate_cases = max(PHA_isolate_visible_cases, PHA_isolate_infectious_cases)
public_health_adjustment = (1 - PHA_social_distancing) * (1 - PHA_isolate_cases)
# Beds and Mortality
if avoid_elective_operations:
bed_occupancy_factor = (1 - PHA_social_distancing * max_reduction_in_normal_bed_occupancy)
else:
bed_occupancy_factor = 1
bed_occupancy_fraction = bed_occupancy_factor * normal_bed_occupancy_fraction
#Community spread
for j in range(0, nregions):
comm_spread_frac_allvars[j,:] = [e.comm_spread_frac for e in epi[j]]
# Loop of variants
for v in range(0,nvars):
# Loop over regions
for j in range(0, nregions):
intl_infected_visitors = intl_visitors[j] * (epi[j][v].proportion_global_infected[i]*global_infection_rate[i]) * min(0, 1 - PHA_travel_restrictions)
dom_infected_visitors = 0
# Confirm current variant has been introduced already
if epi_datetime_array[i] >= epi[j][v].start_time:
if nregions > 1:
for k in range(0, nregions):
if k != j:
dom_infected_visitors += epi[k][v].Itot_prev * between_region_mobility_rate[k]/(nregions - 1)
# Run the model for one time step
epi[j][v].update(total_population,
dom_infected_visitors + intl_infected_visitors,
between_locality_mobility_rate[j],
public_health_adjustment,
PHA_isolate_at_risk,
bed_occupancy_fraction,
beds_per_1000[j],
vaccination_max_doses[i],
vaccinate_at_risk,
Itot_allvars[j],
comm_spread_frac_allvars[j],
nvars)
# Update values for indicator graphs
new_deaths_over_time[j,i,v] = epi[j][v].new_deaths + epi[j][v].new_deaths_reinf
deaths[j,v] += epi[j][v].new_deaths
deaths_reinf[j,v] += epi[j][v].new_deaths_reinf
#susceptible_over_time[j,i,v] = epi[j][v].S
exposed_over_time[j,i,v] = np_sum(epi[j][v].E_nr) + np_sum(epi[j][v].E_r)
reexposed_over_time[j,i,v] = np_sum(epi[j][v].RE_nr) + np_sum(epi[j][v].RE_r)
infective_over_time[j,i,v] = epi[j][v].Itot
reinfective_over_time[j,i,v] = epi[j][v].RItot
deaths_over_time[j,i,v] = deaths[j,v]
deaths_reinf_over_time[j,i,v] = deaths_reinf[j,v]
vaccinated_over_time[j,i,v] = epi[j][v].vaccinated
rerecovered_over_time[j,i,v] = epi[j][v].RR
cumulative_cases[j,v] += (1 - epi[j][v].invisible_fraction_1stinfection) * (epi[j][v].I_nr[1] + epi[j][v].I_r[1]) + \
(1 - epi[j][v].invisible_fraction_reinfection) * (epi[j][v].RI_nr[1] + epi[j][v].RI_r[1])
comm_spread_frac_over_time[j,i,v] = epi[j][v].comm_spread_frac
mortality_rate_over_time[j,i,v] = epi[j][v].curr_mortality_rate
# Calculate hospitalisation index across variants and track infected fraction across variants
Itot_allvars=np_zeros(nregions) ## Breaks if one variant infects everyone
hospitalized=np_zeros(nregions)
for j in range(0, nregions):
# Infected by regions
for e in epi[j]:
Itot_allvars[j]+= e.Itot_incl_reinf # add total infected for each variant in that region
hosp_per_infective_1stinfections = (1 - e.invisible_fraction_1stinfection) * e.ave_fraction_of_visible_1stinfections_requiring_hospitalization
hosp_per_infective_reinfections = (1 - e.invisible_fraction_reinfection) * e.ave_fraction_of_visible_reinfections_requiring_hospitalization
hospitalized[j] += ( hosp_per_infective_1stinfections * np_sum(e.I_r + e.I_nr) + hosp_per_infective_reinfections * np_sum(e.RI_r + e.RI_nr) )
hospitalization_index_region[j] = bed_occupancy_fraction + hospitalized[j] /baseline_hosp[j]
hospitalization_index[i] = np_amax(hospitalization_index_region)
mortality_rate[i] = np_sum(new_deaths_over_time[:,i,:] )/total_population* 100000 # per 100,000
#True up susceptible pools, total population and recovered pools between variants
for j in range(0, nregions):
for v in range(0,nvars):
if nvars>1:
if i==0:
epi[j][v].S-= (np_sum(epi[j][~v].E_nr[1]) + np_sum(epi[j][~v].E_r[1]) + np_sum(epi[j][~v].Itot))
if i > 0:
epi[j][v].S= max(0, epi[j][v].S - (np_sum(epi[j][~v].E_nr[1]) + np_sum(epi[j][~v].E_r[1])))
epi[j][v].N -= ( epi[j][~v].new_deaths +epi[j][~v].new_deaths_reinf)
if epi_datetime_array[i] < epi[j][v].start_time:
epi[j][v].S= max(0, epi[j][v].S - (epi[j][~v].vaccinated_nr + epi[j][~v].vaccinated_r))
epi[j][v].R_nr = epi[j][~v].R_nr
epi[j][v].R_r = epi[j][~v].R_r
else:
epi[j][v].R_nr -= epi[j][~v].new_reexposed_nr
epi[j][v].R_r -= epi[j][~v].new_reexposed_r
susceptible_over_time[j,i,v] = epi[j][v].S
recovered_over_time[j,i,v] = np_sum(epi[j][v].R_nr) + np_sum(epi[j][v].R_r)
return nvars, seir_params_multivar, nregions, regions, start_time, end_time, epi_datetime_array, susceptible_over_time, \
exposed_over_time, infective_over_time, recovered_over_time, vaccinated_over_time, deaths_over_time, deaths_reinf_over_time, reexposed_over_time, reinfective_over_time, \
rerecovered_over_time, hospitalization_index
|
[
"yaml.full_load",
"common.timesteps_over_timedelta_weeks",
"common.get_datetime",
"numpy.ones",
"common.get_datetime_array",
"common.timesteps_between_dates",
"numpy.sum",
"numpy.zeros",
"numpy.empty",
"numpy.interp",
"sys.exit",
"seir_model.SEIR_matrix",
"numpy.amax"
] |
[((1888, 1920), 'numpy.sum', 'np_sum', (['proportion_total'], {'axis': '(0)'}), '(proportion_total, axis=0)\n', (1894, 1920), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((2128, 2178), 'common.get_datetime', 'get_datetime', (["common_params['time']['COVID start']"], {}), "(common_params['time']['COVID start'])\n", (2140, 2178), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((2197, 2300), 'common.timesteps_between_dates', 'timesteps_between_dates', (["common_params['time']['start date']", "common_params['time']['COVID start']"], {}), "(common_params['time']['start date'], common_params[\n 'time']['COVID start'])\n", (2220, 2300), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((2312, 2412), 'common.timesteps_between_dates', 'timesteps_between_dates', (["common_params['time']['start date']", "common_params['time']['end date']"], {}), "(common_params['time']['start date'], common_params[\n 'time']['end date'])\n", (2335, 2412), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((2434, 2530), 'common.get_datetime_array', 'get_datetime_array', (["common_params['time']['COVID start']", "common_params['time']['end date']"], {}), "(common_params['time']['COVID start'], common_params[\n 'time']['end date'])\n", (2452, 2530), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((3852, 3886), 'numpy.empty', 'np_empty', (['global_infection_npoints'], {}), '(global_infection_npoints)\n', (3860, 3886), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((3915, 3949), 'numpy.empty', 'np_empty', (['global_infection_npoints'], {}), '(global_infection_npoints)\n', (3923, 3949), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((4246, 4341), 'numpy.interp', 'np_interp', (['global_infection_traj_timesteps_array', 'global_infection_ts', 'global_infection_val'], {}), '(global_infection_traj_timesteps_array, global_infection_ts,\n global_infection_val)\n', (4255, 4341), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((4391, 4486), 'common.timesteps_between_dates', 'timesteps_between_dates', (['global_infection_traj_start', "common_params['time']['COVID start']"], {}), "(global_infection_traj_start, common_params['time'][\n 'COVID start'])\n", (4414, 4486), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((4681, 4766), 'common.timesteps_over_timedelta_weeks', 'timesteps_over_timedelta_weeks', (["common_params['vaccination']['time to efficacy']"], {}), "(common_params['vaccination']['time to efficacy']\n )\n", (4711, 4766), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((4990, 5019), 'numpy.empty', 'np_empty', (['vaccination_npoints'], {}), '(vaccination_npoints)\n', (4998, 5019), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((5043, 5072), 'numpy.empty', 'np_empty', (['vaccination_npoints'], {}), '(vaccination_npoints)\n', (5051, 5072), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((5338, 5409), 'numpy.interp', 'np_interp', (['vaccination_timesteps_array', 'vaccination_ts', 'vaccination_val'], {}), '(vaccination_timesteps_array, vaccination_ts, vaccination_val)\n', (5347, 5409), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((8793, 8811), 'numpy.zeros', 'np_zeros', (['nregions'], {}), '(nregions)\n', (8801, 8811), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((8844, 8871), 'numpy.zeros', 'np_zeros', (['(nregions, nvars)'], {}), '((nregions, nvars))\n', (8852, 8871), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((8886, 8913), 'numpy.zeros', 'np_zeros', (['(nregions, nvars)'], {}), '((nregions, nvars))\n', (8894, 8913), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((8934, 8961), 'numpy.zeros', 'np_zeros', (['(nregions, nvars)'], {}), '((nregions, nvars))\n', (8942, 8961), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((8986, 9013), 'numpy.zeros', 'np_zeros', (['(nregions, nvars)'], {}), '((nregions, nvars))\n', (8994, 9013), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9040, 9079), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9048, 9079), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9108, 9147), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9116, 9147), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9178, 9217), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9186, 9217), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9245, 9284), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9253, 9284), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9313, 9352), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9321, 9352), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9382, 9421), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9390, 9421), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9454, 9493), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9462, 9493), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9532, 9549), 'numpy.ones', 'np_ones', (['nregions'], {}), '(nregions)\n', (9539, 9549), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9579, 9598), 'numpy.ones', 'np_ones', (['ntimesteps'], {}), '(ntimesteps)\n', (9586, 9598), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9621, 9640), 'numpy.ones', 'np_ones', (['ntimesteps'], {}), '(ntimesteps)\n', (9628, 9640), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9670, 9709), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9678, 9709), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9739, 9778), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9747, 9778), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((9810, 9849), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (9818, 9849), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((10189, 10228), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (10197, 10228), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((10376, 10415), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (10384, 10415), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((10578, 10617), 'numpy.zeros', 'np_zeros', (['(nregions, ntimesteps, nvars)'], {}), '((nregions, ntimesteps, nvars))\n', (10586, 10617), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((479, 499), 'yaml.full_load', 'yaml.full_load', (['file'], {}), '(file)\n', (493, 499), False, 'import yaml\n'), ((562, 582), 'yaml.full_load', 'yaml.full_load', (['file'], {}), '(file)\n', (576, 582), False, 'import yaml\n'), ((662, 682), 'yaml.full_load', 'yaml.full_load', (['file'], {}), '(file)\n', (676, 682), False, 'import yaml\n'), ((2097, 2103), 'sys.exit', 'exit', ([], {}), '()\n', (2101, 2103), False, 'from sys import exit\n'), ((3533, 3574), 'common.get_datetime', 'get_datetime', (['global_infection_traj_start'], {}), '(global_infection_traj_start)\n', (3545, 3574), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((4033, 4120), 'common.timesteps_between_dates', 'timesteps_between_dates', (['global_infection_traj_start', 'global_infection_points[i][0]'], {}), '(global_infection_traj_start,\n global_infection_points[i][0])\n', (4056, 4120), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((15144, 15162), 'numpy.zeros', 'np_zeros', (['nregions'], {}), '(nregions)\n', (15152, 15162), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((15227, 15245), 'numpy.zeros', 'np_zeros', (['nregions'], {}), '(nregions)\n', (15235, 15245), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((16077, 16114), 'numpy.amax', 'np_amax', (['hospitalization_index_region'], {}), '(hospitalization_index_region)\n', (16084, 16114), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((5146, 5237), 'common.timesteps_between_dates', 'timesteps_between_dates', (["common_params['time']['COVID start']", 'vaccination_points[i][0]'], {}), "(common_params['time']['COVID start'],\n vaccination_points[i][0])\n", (5169, 5237), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((1250, 1286), 'seir_model.SEIR_matrix', 'SEIR_matrix', (['rgn', 'var', 'common_params'], {}), '(rgn, var, common_params)\n', (1261, 1286), False, 'from seir_model import SEIR_matrix\n'), ((3731, 3823), 'common.timesteps_between_dates', 'timesteps_between_dates', (['global_infection_traj_start', "common_params['time']['end date']"], {}), "(global_infection_traj_start, common_params['time'][\n 'end date'])\n", (3754, 3823), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((4865, 4966), 'common.timesteps_between_dates', 'timesteps_between_dates', (["common_params['time']['COVID start']", "common_params['time']['end date']"], {}), "(common_params['time']['COVID start'], common_params\n ['time']['end date'])\n", (4888, 4966), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((10299, 10313), 'numpy.sum', 'np_sum', (['e.E_nr'], {}), '(e.E_nr)\n', (10305, 10313), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((10316, 10329), 'numpy.sum', 'np_sum', (['e.E_r'], {}), '(e.E_r)\n', (10322, 10329), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((10488, 10503), 'numpy.sum', 'np_sum', (['e.RE_nr'], {}), '(e.RE_nr)\n', (10494, 10503), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((10506, 10520), 'numpy.sum', 'np_sum', (['e.RE_r'], {}), '(e.RE_r)\n', (10512, 10520), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((16144, 16181), 'numpy.sum', 'np_sum', (['new_deaths_over_time[:, i, :]'], {}), '(new_deaths_over_time[:, i, :])\n', (16150, 16181), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((17400, 17422), 'numpy.sum', 'np_sum', (['epi[j][v].R_nr'], {}), '(epi[j][v].R_nr)\n', (17406, 17422), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((17425, 17446), 'numpy.sum', 'np_sum', (['epi[j][v].R_r'], {}), '(epi[j][v].R_r)\n', (17431, 17446), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((14048, 14070), 'numpy.sum', 'np_sum', (['epi[j][v].E_nr'], {}), '(epi[j][v].E_nr)\n', (14054, 14070), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((14073, 14094), 'numpy.sum', 'np_sum', (['epi[j][v].E_r'], {}), '(epi[j][v].E_r)\n', (14079, 14094), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((14145, 14168), 'numpy.sum', 'np_sum', (['epi[j][v].RE_nr'], {}), '(epi[j][v].RE_nr)\n', (14151, 14168), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((14171, 14193), 'numpy.sum', 'np_sum', (['epi[j][v].RE_r'], {}), '(epi[j][v].RE_r)\n', (14177, 14193), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((15846, 15868), 'numpy.sum', 'np_sum', (['(e.I_r + e.I_nr)'], {}), '(e.I_r + e.I_nr)\n', (15852, 15868), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((15905, 15929), 'numpy.sum', 'np_sum', (['(e.RI_r + e.RI_nr)'], {}), '(e.RI_r + e.RI_nr)\n', (15911, 15929), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((16541, 16564), 'numpy.sum', 'np_sum', (['epi[j][~v].Itot'], {}), '(epi[j][~v].Itot)\n', (16547, 16564), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((5676, 5710), 'common.get_datetime', 'get_datetime', (["window['start date']"], {}), "(window['start date'])\n", (5688, 5710), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((5801, 5833), 'common.get_datetime', 'get_datetime', (["window['end date']"], {}), "(window['end date'])\n", (5813, 5833), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((6397, 6431), 'common.get_datetime', 'get_datetime', (["window['start date']"], {}), "(window['start date'])\n", (6409, 6431), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((6512, 6544), 'common.get_datetime', 'get_datetime', (["window['end date']"], {}), "(window['end date'])\n", (6524, 6544), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((7048, 7082), 'common.get_datetime', 'get_datetime', (["window['start date']"], {}), "(window['start date'])\n", (7060, 7082), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((7162, 7194), 'common.get_datetime', 'get_datetime', (["window['end date']"], {}), "(window['end date'])\n", (7174, 7194), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((7691, 7725), 'common.get_datetime', 'get_datetime', (["window['start date']"], {}), "(window['start date'])\n", (7703, 7725), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((7799, 7831), 'common.get_datetime', 'get_datetime', (["window['end date']"], {}), "(window['end date'])\n", (7811, 7831), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((8344, 8378), 'common.get_datetime', 'get_datetime', (["window['start date']"], {}), "(window['start date'])\n", (8356, 8378), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((8452, 8484), 'common.get_datetime', 'get_datetime', (["window['end date']"], {}), "(window['end date'])\n", (8464, 8484), False, 'from common import Window, get_datetime, timesteps_between_dates, get_datetime_array, timesteps_over_timedelta_weeks\n'), ((16484, 16510), 'numpy.sum', 'np_sum', (['epi[j][~v].E_nr[1]'], {}), '(epi[j][~v].E_nr[1])\n', (16490, 16510), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((16513, 16538), 'numpy.sum', 'np_sum', (['epi[j][~v].E_r[1]'], {}), '(epi[j][~v].E_r[1])\n', (16519, 16538), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((16655, 16681), 'numpy.sum', 'np_sum', (['epi[j][~v].E_nr[1]'], {}), '(epi[j][~v].E_nr[1])\n', (16661, 16681), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n'), ((16684, 16709), 'numpy.sum', 'np_sum', (['epi[j][~v].E_r[1]'], {}), '(epi[j][~v].E_r[1])\n', (16690, 16709), True, 'from numpy import array as np_array, zeros as np_zeros, sum as np_sum, empty as np_empty, amax as np_amax, interp as np_interp, ones as np_ones, tile as np_tile, isnan as np_isnan\n')]
|
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import sys
data = np.loadtxt('NbSe2.freq.gp')
symmetryfile = 'plotband.out'
lbd = np.loadtxt("lambda.dat")
lbd_val = np.where(lbd<1 , lbd, 1)
def Symmetries(fstring):
f = open(fstring, 'r')
x = np.zeros(0)
for i in f:
if "high-symmetry" in i:
x = np.append(x, float(i.split()[-1]))
f.close()
return x
x=np.tile(data.T[0],9)
val = lbd_val.T[1:].reshape(-1)
y=data.T[1:].reshape(-1,)
fig=plt.figure(figsize=(8,6))
labels=["G","M","K","G"]
plt.scatter(x,y*0.12398,c=val,cmap="copper",s=10)
sym_tick = Symmetries(symmetryfile)
for i in range(len(sym_tick)-1):
plt.axvline(sym_tick[i],linestyle='dashed', color='black', alpha=0.75)
plt.xticks(sym_tick,labels)
plt.xlim(min(sym_tick),max(sym_tick))
plt.ylim(0)
plt.ylabel("Energy (meV)")
plt.colorbar()
plt.savefig("epc.pdf")
plt.show()
|
[
"numpy.tile",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.loadtxt",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.show"
] |
[((133, 160), 'numpy.loadtxt', 'np.loadtxt', (['"""NbSe2.freq.gp"""'], {}), "('NbSe2.freq.gp')\n", (143, 160), True, 'import numpy as np\n'), ((197, 221), 'numpy.loadtxt', 'np.loadtxt', (['"""lambda.dat"""'], {}), "('lambda.dat')\n", (207, 221), True, 'import numpy as np\n'), ((232, 257), 'numpy.where', 'np.where', (['(lbd < 1)', 'lbd', '(1)'], {}), '(lbd < 1, lbd, 1)\n', (240, 257), True, 'import numpy as np\n'), ((458, 479), 'numpy.tile', 'np.tile', (['data.T[0]', '(9)'], {}), '(data.T[0], 9)\n', (465, 479), True, 'import numpy as np\n'), ((541, 567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (551, 567), True, 'import matplotlib.pyplot as plt\n'), ((592, 647), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', '(y * 0.12398)'], {'c': 'val', 'cmap': '"""copper"""', 's': '(10)'}), "(x, y * 0.12398, c=val, cmap='copper', s=10)\n", (603, 647), True, 'import matplotlib.pyplot as plt\n'), ((786, 814), 'matplotlib.pyplot.xticks', 'plt.xticks', (['sym_tick', 'labels'], {}), '(sym_tick, labels)\n', (796, 814), True, 'import matplotlib.pyplot as plt\n'), ((852, 863), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)'], {}), '(0)\n', (860, 863), True, 'import matplotlib.pyplot as plt\n'), ((864, 890), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Energy (meV)"""'], {}), "('Energy (meV)')\n", (874, 890), True, 'import matplotlib.pyplot as plt\n'), ((891, 905), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (903, 905), True, 'import matplotlib.pyplot as plt\n'), ((906, 928), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""epc.pdf"""'], {}), "('epc.pdf')\n", (917, 928), True, 'import matplotlib.pyplot as plt\n'), ((929, 939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (937, 939), True, 'import matplotlib.pyplot as plt\n'), ((317, 328), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (325, 328), True, 'import numpy as np\n'), ((715, 786), 'matplotlib.pyplot.axvline', 'plt.axvline', (['sym_tick[i]'], {'linestyle': '"""dashed"""', 'color': '"""black"""', 'alpha': '(0.75)'}), "(sym_tick[i], linestyle='dashed', color='black', alpha=0.75)\n", (726, 786), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Regression tests for the REINFORCE agent on OpenAI gym environments
"""
import pytest
import numpy as np
import shutil
from yarlp.utils.env_utils import NormalizedGymEnv
from yarlp.agent.ddqn_agent import DDQNAgent
env = NormalizedGymEnv(
'PongNoFrameskip-v4',
is_atari=True
)
def test_ddqn():
agent = DDQNAgent(env, max_timesteps=10,
learning_start_timestep=1,
train_freq=5,
batch_size=1)
agent.train()
def test_seed():
agent = DDQNAgent(env, seed=143, max_timesteps=2)
agent.train()
ob, *_ = agent.replay_buffer.sample(1)
agent = DDQNAgent(env, seed=143, max_timesteps=2)
agent.train()
ob2, *_ = agent.replay_buffer.sample(1)
assert np.all(
np.array(ob) == np.array(ob2))
def test_save_models():
agent = DDQNAgent(env, max_timesteps=2)
agent.train()
agent.save('testy_ddqn')
agent = DDQNAgent.load('testy_ddqn')
agent.t = 0
agent.train()
shutil.rmtree('testy_ddqn')
|
[
"yarlp.agent.ddqn_agent.DDQNAgent.load",
"yarlp.utils.env_utils.NormalizedGymEnv",
"numpy.array",
"shutil.rmtree",
"yarlp.agent.ddqn_agent.DDQNAgent"
] |
[((232, 285), 'yarlp.utils.env_utils.NormalizedGymEnv', 'NormalizedGymEnv', (['"""PongNoFrameskip-v4"""'], {'is_atari': '(True)'}), "('PongNoFrameskip-v4', is_atari=True)\n", (248, 285), False, 'from yarlp.utils.env_utils import NormalizedGymEnv\n'), ((327, 418), 'yarlp.agent.ddqn_agent.DDQNAgent', 'DDQNAgent', (['env'], {'max_timesteps': '(10)', 'learning_start_timestep': '(1)', 'train_freq': '(5)', 'batch_size': '(1)'}), '(env, max_timesteps=10, learning_start_timestep=1, train_freq=5,\n batch_size=1)\n', (336, 418), False, 'from yarlp.agent.ddqn_agent import DDQNAgent\n'), ((530, 571), 'yarlp.agent.ddqn_agent.DDQNAgent', 'DDQNAgent', (['env'], {'seed': '(143)', 'max_timesteps': '(2)'}), '(env, seed=143, max_timesteps=2)\n', (539, 571), False, 'from yarlp.agent.ddqn_agent import DDQNAgent\n'), ((646, 687), 'yarlp.agent.ddqn_agent.DDQNAgent', 'DDQNAgent', (['env'], {'seed': '(143)', 'max_timesteps': '(2)'}), '(env, seed=143, max_timesteps=2)\n', (655, 687), False, 'from yarlp.agent.ddqn_agent import DDQNAgent\n'), ((847, 878), 'yarlp.agent.ddqn_agent.DDQNAgent', 'DDQNAgent', (['env'], {'max_timesteps': '(2)'}), '(env, max_timesteps=2)\n', (856, 878), False, 'from yarlp.agent.ddqn_agent import DDQNAgent\n'), ((938, 966), 'yarlp.agent.ddqn_agent.DDQNAgent.load', 'DDQNAgent.load', (['"""testy_ddqn"""'], {}), "('testy_ddqn')\n", (952, 966), False, 'from yarlp.agent.ddqn_agent import DDQNAgent\n'), ((1005, 1032), 'shutil.rmtree', 'shutil.rmtree', (['"""testy_ddqn"""'], {}), "('testy_ddqn')\n", (1018, 1032), False, 'import shutil\n'), ((778, 790), 'numpy.array', 'np.array', (['ob'], {}), '(ob)\n', (786, 790), True, 'import numpy as np\n'), ((794, 807), 'numpy.array', 'np.array', (['ob2'], {}), '(ob2)\n', (802, 807), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
# ===========================================================================
# Constant
# ===========================================================================
a = 8
b = 0.5
mu = 0
n_samples = 100000
# ===========================================================================
# Following the generative procedure
# ===========================================================================
# Step 1: generate the precision Beta
beta_dist = tfd.Gamma(concentration=a, rate=b)
beta = beta_dist.sample(n_samples)
# the prior probability
p_beta_given_a_and_b = beta_dist.prob(beta)
# Step 2: generate the data point
# scale is standard deviation
x_dist = tfd.Normal(loc=mu, scale=tf.sqrt(1 / beta))
x = x_dist.sample()
# the likelihood
p_x_given_mu_and_beta = x_dist.prob(x)
# ====== plotting the prior ====== #
plt.figure()
sns.distplot(beta.numpy(), bins=120, kde=True)
plt.title(r"Prior distribution: $p(\beta|a=%g, b=%g)$" % (a, b))
# ====== plotting the likelihood ====== #
plt.figure()
sns.distplot(x.numpy(), bins=120, kde=True)
plt.title(r"Likelihood distribution: $p(X|\mu=%g, \sigma=\sqrt{\beta^{-1}})$" % mu)
# ====== plotting the posterior ====== #
# the posterior probability, this is only
# proportionally, not exactly because we omit
# the evidence p(X)
# If we want to calculate p(X), we need to marginalize out
# beta using sum rule:
# p(X) = p(X, beta_1) + p(X, beta_2) + ... + p(X, beta_∞)
# This is not easy
p_beta_given_x = p_x_given_mu_and_beta * p_beta_given_a_and_b
p_beta_given_x = p_beta_given_x / tf.reduce_sum(p_beta_given_x)
posterior_dist = tfd.Categorical(probs=p_beta_given_x)
beta = beta.numpy()
posterior = []
for i in range(n_samples // 2000):
idx = posterior_dist.sample(2000).numpy()
posterior.append(beta[idx])
posterior = np.concatenate(posterior)
plt.figure()
sns.distplot(posterior, bins=120, kde=True)
plt.title(r"Sampled posterior distribution: $p(\beta|X)$")
# ====== plotting the close form solution ====== #
a0 = a + n_samples / 2
b0 = b + n_samples / 2 * np.var(x.numpy())
posterior_dist = tfd.Gamma(concentration=a0, rate=b0)
posterior = posterior_dist.sample(n_samples)
plt.figure()
sns.distplot(posterior, bins=120, kde=True)
plt.title(
r"Closed form solution: $p(\beta|X) \sim Gamma(a=%g, b=%g)$"
% (a0, b0))
from odin import visual as V
V.plot_save('/tmp/tmp.pdf', dpi=200)
|
[
"seaborn.distplot",
"matplotlib.use",
"odin.visual.plot_save",
"tensorflow.reduce_sum",
"tensorflow.enable_eager_execution",
"matplotlib.pyplot.figure",
"tensorflow.sqrt",
"numpy.concatenate",
"matplotlib.pyplot.title"
] |
[((107, 128), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (121, 128), False, 'import matplotlib\n'), ((233, 260), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (258, 260), True, 'import tensorflow as tf\n'), ((1194, 1206), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1204, 1206), True, 'from matplotlib import pyplot as plt\n'), ((1254, 1318), 'matplotlib.pyplot.title', 'plt.title', (["('Prior distribution: $p(\\\\beta|a=%g, b=%g)$' % (a, b))"], {}), "('Prior distribution: $p(\\\\beta|a=%g, b=%g)$' % (a, b))\n", (1263, 1318), True, 'from matplotlib import pyplot as plt\n'), ((1362, 1374), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1372, 1374), True, 'from matplotlib import pyplot as plt\n'), ((1419, 1515), 'matplotlib.pyplot.title', 'plt.title', (["('Likelihood distribution: $p(X|\\\\mu=%g, \\\\sigma=\\\\sqrt{\\\\beta^{-1}})$' % mu)"], {}), "(\n 'Likelihood distribution: $p(X|\\\\mu=%g, \\\\sigma=\\\\sqrt{\\\\beta^{-1}})$' % mu\n )\n", (1428, 1515), True, 'from matplotlib import pyplot as plt\n'), ((2152, 2177), 'numpy.concatenate', 'np.concatenate', (['posterior'], {}), '(posterior)\n', (2166, 2177), True, 'import numpy as np\n'), ((2179, 2191), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2189, 2191), True, 'from matplotlib import pyplot as plt\n'), ((2192, 2235), 'seaborn.distplot', 'sns.distplot', (['posterior'], {'bins': '(120)', 'kde': '(True)'}), '(posterior, bins=120, kde=True)\n', (2204, 2235), True, 'import seaborn as sns\n'), ((2236, 2294), 'matplotlib.pyplot.title', 'plt.title', (['"""Sampled posterior distribution: $p(\\\\beta|X)$"""'], {}), "('Sampled posterior distribution: $p(\\\\beta|X)$')\n", (2245, 2294), True, 'from matplotlib import pyplot as plt\n'), ((2513, 2525), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2523, 2525), True, 'from matplotlib import pyplot as plt\n'), ((2526, 2569), 'seaborn.distplot', 'sns.distplot', (['posterior'], {'bins': '(120)', 'kde': '(True)'}), '(posterior, bins=120, kde=True)\n', (2538, 2569), True, 'import seaborn as sns\n'), ((2570, 2658), 'matplotlib.pyplot.title', 'plt.title', (["('Closed form solution: $p(\\\\beta|X) \\\\sim Gamma(a=%g, b=%g)$' % (a0, b0))"], {}), "('Closed form solution: $p(\\\\beta|X) \\\\sim Gamma(a=%g, b=%g)$' % (\n a0, b0))\n", (2579, 2658), True, 'from matplotlib import pyplot as plt\n'), ((2693, 2729), 'odin.visual.plot_save', 'V.plot_save', (['"""/tmp/tmp.pdf"""'], {'dpi': '(200)'}), "('/tmp/tmp.pdf', dpi=200)\n", (2704, 2729), True, 'from odin import visual as V\n'), ((1910, 1939), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['p_beta_given_x'], {}), '(p_beta_given_x)\n', (1923, 1939), True, 'import tensorflow as tf\n'), ((1061, 1078), 'tensorflow.sqrt', 'tf.sqrt', (['(1 / beta)'], {}), '(1 / beta)\n', (1068, 1078), True, 'import tensorflow as tf\n')]
|
try:
import tensorflow
except ModuleNotFoundError:
pkg_name = 'tensorflow'
import os
import sys
import subprocess
from cellacdc import myutils
cancel = myutils.install_package_msg(pkg_name)
if cancel:
raise ModuleNotFoundError(
f'User aborted {pkg_name} installation'
)
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install', 'tensorflow']
)
# numba requires numpy<1.22 but tensorflow might install higher
# so install numpy less than 1.22 if needed
import numpy
np_version = numpy.__version__.split('.')
np_major, np_minor = [int(v) for v in np_version][:2]
if np_major >= 1 and np_minor >= 22:
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install', '--upgrade', 'numpy<1.22']
)
|
[
"numpy.__version__.split",
"subprocess.check_call",
"cellacdc.myutils.install_package_msg"
] |
[((180, 217), 'cellacdc.myutils.install_package_msg', 'myutils.install_package_msg', (['pkg_name'], {}), '(pkg_name)\n', (207, 217), False, 'from cellacdc import myutils\n'), ((334, 411), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', 'tensorflow']"], {}), "([sys.executable, '-m', 'pip', 'install', 'tensorflow'])\n", (355, 411), False, 'import subprocess\n'), ((577, 605), 'numpy.__version__.split', 'numpy.__version__.split', (['"""."""'], {}), "('.')\n", (600, 605), False, 'import numpy\n'), ((713, 807), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '--upgrade', 'numpy<1.22']"], {}), "([sys.executable, '-m', 'pip', 'install', '--upgrade',\n 'numpy<1.22'])\n", (734, 807), False, 'import subprocess\n')]
|
import os, sys, re, json, random, importlib
import numpy as np
import pandas as pd
from collections import OrderedDict
from tqdm import tqdm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import logomaker as lm
from venn import venn
from venn import generate_petal_labels, draw_venn
from scipy.stats import pearsonr
from scipy.cluster import hierarchy
from util import *
import warnings
warnings.filterwarnings('ignore')
class CAMInterp():
def __init__(self, mhc_seq_filename, allele_mask_dirname, epitope_mask_dirname, df_filename, output_dir,
pred_basename='score', pred_threshold=0.9, mhc_len=182, min_sample_num=100, submotif_len=4):
self.aa_str = 'ACDEFGHIKLMNPQRSTVWY'
self.mhc_len = mhc_len
self.epitope_len = 10
self.res34_pos = [6, 8, 23, 44, 58, 61, 62, 65, 66, 68, 69, 72, 73, 75, 76, 79, 80, 83, 94,
96, 98, 113, 115, 117, 142, 146, 149, 151, 155, 157, 158, 162, 166, 170]
self.color_dict = {'A': '#DACC47', 'B': '#B1DEC9', 'C': '#FFBB99', 'polymorphism': '#875A85'}
self.dpi = 600
self.fontsize = 10
self.pred_basename = pred_basename
self.pred_threshold = pred_threshold
self.min_sample_num = min_sample_num
self.submotif_len = submotif_len
self.output_dir = output_dir
# mhc_seq_dict
self.mhc_seq_dict = json.load(open(mhc_seq_filename, 'r'))
# allele_mask_df
if type(allele_mask_dirname) == list:
alleles = [self._convert_allele(i) for i in os.listdir(allele_mask_dirname[0])]
self.allele_mask_df = pd.DataFrame(columns=alleles, index=range(self.mhc_len), data=0)
self.allele_mask_df.loc['count'] = 0
for i in range(len(allele_mask_dirname)):
temp_df = pd.DataFrame(self._parse_mask(allele_mask_dirname[i], mask_type='mhc'))
self.allele_mask_df.loc[temp_df.index, temp_df.columns] += temp_df
self.allele_mask_df.loc['count', temp_df.columns] += 1
self.allele_mask_df = self.allele_mask_df.loc[:, self.allele_mask_df.loc['count'] != 0]
self.allele_mask_df.loc[range(self.mhc_len)] /= self.allele_mask_df.loc['count']
self.allele_mask_df = self.allele_mask_df.drop('count')
else:
self.allele_mask_df = pd.DataFrame(self._parse_mask(allele_mask_dirname, mask_type='mhc'))
self.allele_mask_df.to_csv('%s/AlleleMask.csv'%self.output_dir)
# epitope_mask_df
if type(epitope_mask_dirname) == list:
alleles = [self._convert_allele(i) for i in os.listdir(epitope_mask_dirname[0])]
self.epitope_mask_df = pd.DataFrame(columns=alleles, index=range(self.epitope_len), data=0)
self.epitope_mask_df.loc['count'] = 0
for i in range(len(epitope_mask_dirname)):
temp_df = pd.DataFrame(self._parse_mask(epitope_mask_dirname[i], mask_type='epitope'))
self.epitope_mask_df.loc[temp_df.index, temp_df.columns] += temp_df
self.epitope_mask_df.loc['count', temp_df.columns] += 1
self.epitope_mask_df = self.epitope_mask_df.loc[:, self.epitope_mask_df.loc['count'] != 0]
self.epitope_mask_df.loc[range(self.epitope_len)] /= self.epitope_mask_df.loc['count']
self.epitope_mask_df = self.epitope_mask_df.drop('count')
else:
self.epitope_mask_df = pd.DataFrame(self._parse_mask(epitope_mask_dirname, mask_type='epitope'))
self.epitope_mask_df['position'] = [1,2,3,4,5,-5,-4,-3,-2,-1]
self.epitope_mask_df = self.epitope_mask_df.set_index('position', drop=True)
self.epitope_mask_df.to_csv('%s/EpitopeMask.csv'%self.output_dir)
# df
self.df = pd.read_csv(df_filename, index_col=0)
self.alleles = list(self.df['mhc'].unique())
self.allele_num = len(self.alleles)
# motif_dict
self.motif_dict = self._parse_motif(pred_basename, pred_threshold, self.min_sample_num)
self.alleles = list(self.df['mhc'].unique())
self.allele_num = len(self.alleles)
# mhc_seqlogo_df
self.mhc_seqlogo_df = self._mhc_seqlogo_df(self.alleles, list(range(self.mhc_len)))
def ResidueAnalysis(self, cam_threshold, importance_threshold, barplot_figsize=(10,2), square_figsize=(3.5,3.5)):
# mean plot
self._residue_barplot(self.allele_mask_df.mean(axis=1), self.res34_pos, figsize=barplot_figsize,
figfile='%s/CAMmean.png'%self.output_dir)
# importance plot
importance_count = self._residue_importance_count(self.alleles, cam_threshold)
self._residue_barplot(importance_count, self.res34_pos, figsize=barplot_figsize,
figfile='%s/CAMimportance.png'%self.output_dir)
# important residues - stacked plot
df = self._importance_stacked_barplot(cam_threshold, self.res34_pos,
xticklabels=False, yticklabels=True, figsize=barplot_figsize,
figfile='%s/CAMimportanceStacked.png'%self.output_dir)
df.to_csv('%s/ImportanceStack.csv'%self.output_dir)
# important residues
residue_dict = self._select_residue(cam_threshold, importance_threshold)
json.dump(residue_dict, open('%s/ResidueSelection.json'%self.output_dir, 'w'))
# venn diagram of residue selection
self._importance_venn_plot(residue_dict, figsize=square_figsize,
figfile='%s/ResidueSelectionVenn.png'%self.output_dir)
# correlation between residue importance and sequence entropy
# entropy = sigma(probability**2)
# allele part
df = self._mhc_importance_polymorphism_plot(cam_threshold, residue_dict, figsize=square_figsize,
figfile='%s/AlleleImportanceEntropyCorrelation.png'%self.output_dir)
df.to_csv('%s/AlleleImportancePolymorphism.csv'%self.output_dir)
# epitope part
df = self._epitope_importance_polymorphism_plot(figsize=square_figsize,
figfile='%s/EpitopeImportanceEntropyCorrelation.png'%self.output_dir)
df.to_csv('%s/EpitopeImportancePolymorphism.csv'%self.output_dir)
def ClusterAnalysis(self, method, metric, allele_figsize=(10,2), epitope_figsize=(3.5,3.5)):
alleles = self.alleles
# allele masks
allele_order, position_order = self._mask_clustering_plot(alleles, mask_type='mhc',
method=method, metric=metric,
xticklabels=False, yticklabels=False,
row_colors=True, figsize=allele_figsize,
title=None, xlabel='MHC-I position', ylabel='MHC-I allele',
figfile='%s/AlleleCAMcluster_all.png'%self.output_dir)
# epitope masks
allele_order, position_order = self._mask_clustering_plot(alleles, mask_type='epitope',
method=method, metric=metric,
xticklabels=True, yticklabels=False,
row_colors=True, figsize=epitope_figsize,
title=None, xlabel='peptide position', ylabel='MHC-I allele',
figfile='%s/EpitopeCAMcluster_all.png'%self.output_dir)
""""""""""""""""""""""""""""""""""""""
# Plots
""""""""""""""""""""""""""""""""""""""
# mask_type: mhc or epitope
def _mask_clustering_plot(self, alleles, mask_type='mhc',
method='average', metric='euclidean',
allele_linkage=True, position_linkage=False,
row_colors=False, xticklabels=True, yticklabels=True,
title=None, xlabel=None, ylabel=None,
figsize=(8, 4), figfile=None):
# residue positions
if mask_type == 'mhc':
positions = list(range(self.mhc_len))
df = self.allele_mask_df.iloc[positions][alleles].T
else:
positions = [1,2,3,4,-4,-3,-2,-1]
df = self.epitope_mask_df.loc[positions][alleles].T
# linkage
zx, zy = None, None
if allele_linkage:
zy = hierarchy.linkage(df, method=method, metric=metric, optimal_ordering=True)
if position_linkage:
zx = hierarchy.linkage(df.T, method=method, metric=metric, optimal_ordering=True)
# row colors
if row_colors:
color_list = list()
for allele in alleles:
hla = allele.split('*')[0]
color_list.append(self.color_dict[hla])
else:
color_list = None
# clustermap
g = sns.clustermap(df,
col_cluster=position_linkage,
row_cluster=allele_linkage,
row_linkage=zy,
col_linkage=zx,
row_colors = color_list,
cmap='Blues',
cbar_kws={'orientation': 'horizontal', 'label': 'mask score'},
cbar_pos=(.3, -.05, .4, .02),
dendrogram_ratio=0.1,
colors_ratio=0.02,
xticklabels=xticklabels,
yticklabels=yticklabels,
figsize=figsize)
g.ax_heatmap.set_title(title)
g.ax_heatmap.set_xlabel(xlabel)
g.ax_heatmap.set_ylabel(ylabel)
# cluster order
if allele_linkage:
allele_order = g.dendrogram_row.reordered_ind
allele_order = [alleles[i] for i in allele_order]
else:
allele_order = None
if position_linkage:
position_order = g.dendrogram_col.reordered_ind
position_order = [positions[i] for i in position_order]
else:
position_order = None
# save figure
if figfile:
plt.savefig(figfile, bbox_inches='tight', dpi=self.dpi)
return allele_order, position_order
def _motif_plot(self, alleles, motif_dict, figfile=None):
allele_num = len(alleles)
fig, ax = plt.subplots(allele_num, figsize=(0.8, allele_num*0.2), dpi=self.dpi)
for i in range(allele_num):
allele = alleles[i]
seqlogo_df = pd.DataFrame(motif_dict[allele], columns=list(self.aa_str))
logo = lm.Logo(seqlogo_df, ax=ax[i], color_scheme="skylign_protein")
_ = ax[i].set_xticks([])
_ = ax[i].set_yticks([])
for side in ['top','bottom','left','right']:
ax[i].spines[side].set_linewidth(0.1)
fig.tight_layout()
if figfile:
fig.savefig(figfile)
def _residue_barplot(self, arr, tag_pos, figsize=(8,3), figfile=None):
# main figure
fig, ax = plt.subplots(1, figsize=figsize, dpi=self.dpi)
sns.barplot(x=list(range(self.mhc_len)), y=arr, ax=ax)
ax.tick_params(axis='x', rotation=90)
# fontsize
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_yticklabels()):
item.set_fontsize(self.fontsize)
for item in ax.get_xticklabels():
item.set_fontsize(self.fontsize/4)
# set xtick colors
colors = list()
for i in range(self.mhc_len):
if i in tag_pos:
colors.append('red')
else:
colors.append('black')
for tick, color in zip(ax.get_xticklabels(), colors):
tick.set_color(color)
fig.tight_layout()
# save figure
if figfile:
fig.savefig(figfile, bbox_inches='tight')
def _importance_stacked_barplot(self, cam_threshold, tag_pos, figsize=(8,3),
xticklabels=True, yticklabels=True, figfile=None):
# build importance dataframe, columns=['A','B','C']
d = dict()
for hla in ['A', 'B', 'C']:
alleles = [i for i in self.alleles if hla in i]
d[hla] = self._residue_importance_count(alleles, cam_threshold)
df = pd.DataFrame(d)
# figure
fig = plt.figure(figsize=figsize, dpi=self.dpi)
ax = fig.add_subplot(111)
ax.margins(x=0)
# stacked bar plot
ax.bar(df.index, df['A'], color=self.color_dict['A'])
ax.bar(df.index, df['B'], bottom=df['A'], color=self.color_dict['B'])
ax.bar(df.index, df['C'], bottom=df['A'] + df['B'], color=self.color_dict['C'])
# ticks & ticklabels
if xticklabels:
_ = ax.set_xticks(df.index)
_ = ax.set_xticklabels(df.index+1, rotation=90)
# xtick colors
colors = list()
for i in df.index:
if i in tag_pos:
colors.append('red')
else:
colors.append('black')
for tick, color in zip(ax.get_xticklabels(), colors):
tick.set_color(color)
else:
_ = ax.set_xticks([])
_ = ax.set_xticklabels([])
if yticklabels:
_ = ax.set_ylabel('importance')
else:
_ = ax.set_yticks([])
_ = ax.set_yticklabels([])
# fontsize
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(self.fontsize)
# legend
Abar = matplotlib.patches.Rectangle((0,0),1,1,fc=self.color_dict['A'], edgecolor='none')
Bbar = matplotlib.patches.Rectangle((0,0),1,1,fc=self.color_dict['B'], edgecolor='none')
Cbar = matplotlib.patches.Rectangle((0,0),1,1,fc=self.color_dict['C'], edgecolor='none')
l = ax.legend([Abar, Bbar, Cbar], ['HLA-A', 'HLA-B', 'HLA-C'], loc=0, ncol=3, fontsize=self.fontsize)
l.draw_frame(False)
fig.tight_layout()
# save figure
if figfile:
fig.savefig(figfile, bbox_inches='tight')
return df
def _mhc_importance_polymorphism_plot(self, cam_threshold, position_dict, figsize=(3.5,3.5), s=2, figfile=None):
# figure
df = pd.DataFrame()
fig, ax = plt.subplots(1, figsize=figsize, dpi=self.dpi)
# calculate entropy
df['polymorphism'] = -(self.mhc_seqlogo_df*np.log(self.mhc_seqlogo_df)).sum(axis=1)
# calculate importance by HLA
importance_counts = list()
for hla in ['A', 'B', 'C']:
alleles = [i for i in self.alleles if hla in i]
importance_counts.append(self._residue_importance_count(alleles, cam_threshold))
importance_counts = np.array(importance_counts)
importance_count = importance_counts.max(axis=0)
df['importance'] = importance_count
# label
df['label'] = 'others'
df.loc[position_dict['res34'], 'label'] = '34-residue'
df.loc[position_dict['selected'], 'label'] = 'selected'
intersect = list(set(position_dict['res34']) & set(position_dict['selected']))
df.loc[intersect, 'label'] = 'intersection'
# plot_param
param_dict = OrderedDict({'selected':{'color': '#ff4949', 'marker': 'o', 's': 12},
'intersection': {'color': '#ff4949', 'marker': 'x', 's': 12},
'34-residue': {'color': '#adb5bd', 'marker': 'x', 's': 12},
'others': {'color': '#adb5bd', 'marker': 'o', 's': 12}})
# regplot
df = df[df['polymorphism']!=0]
p = sns.regplot(x='importance', y='polymorphism', data=df, ax=ax, fit_reg=True, scatter_kws={'s':0})
for label, params in param_dict.items():
p = sns.regplot(x='importance', y='polymorphism', data=df[df['label']==label],
ax=ax, fit_reg=False, marker=params['marker'],
scatter_kws={'color':params['color'], 's':params['s'], 'linewidths': 0.1})
'''
# annotation
for idx, row in df.iterrows():
if idx in [64, 70]:
p.text(df.loc[idx, 'importance']-0.025, df.loc[idx, 'polymorphism']-0.09, idx+1, fontsize=self.fontsize-2)
'''
# fontsize
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(self.fontsize)
# legend
legend_list = [matplotlib.patches.Rectangle((0,0),1,1,fc='#ff4949', edgecolor='none'),
matplotlib.patches.Rectangle((0,0),1,1,fc='#adb5bd', edgecolor='none'),
plt.scatter([], [], color='black', marker='x', s=12),
plt.scatter([], [], color='black', marker='o', s=12)]
label_list = ['selected', 'non-selected', '34-residue', 'non-34-residue']
l = ax.legend(handles=legend_list, labels=label_list,
loc='lower left', bbox_to_anchor=(-0.2,1), ncol=2, fontsize=self.fontsize)
l.draw_frame(True)
# layout
ax.set_xticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.02])
ax.set_xticklabels([0.0, 0.2, 0.4, 0.6, 0.8, 1.0, ''])
fig.tight_layout()
# pearson correlation
pearson, pvalue = pearsonr(df['importance'], df['polymorphism'])
ax.text(0.05, 1.6, 'r=%.2f, p=%.2e'%(pearson, pvalue))
# save figure
if figfile:
fig.savefig(figfile, bbox_inches='tight')
return df
def _epitope_importance_polymorphism_plot(self, figsize=(3.5,3.5), figfile=None):
# get epitope polymorphism
peptides = self.df[self.df[self.pred_basename] > self.pred_threshold]['sequence'].to_list()
peptides = [i[:self.submotif_len] + i[-self.submotif_len:] for i in peptides]
seqlogo_df = lm.alignment_to_matrix(sequences=peptides, to_type="probability",
characters_to_ignore=".", pseudocount=0)
polymorphism = -(seqlogo_df*np.log(seqlogo_df)).sum(axis=1).to_numpy()
# df for plot
df = pd.DataFrame(index=list(range(1, 1+self.submotif_len)) + list(range(-self.submotif_len, 0)))
df['polymorphism'] = polymorphism
df['mask_score'] = self.epitope_mask_df.mean(axis=1)[df.index]
df['residue_tag'] = 'other'
df.loc[[2,-1], 'residue_tag'] = 'anchor'
# plot
fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=self.dpi)
sns.scatterplot(data=df, x='mask_score', y='polymorphism', hue='residue_tag', ax=ax)
for pos in [2, -1]:
ax.text(x=df.loc[pos, 'mask_score']-0.25, y=df.loc[pos, 'polymorphism'], s='Position: {}'.format(pos))
fig.tight_layout()
if figfile:
fig.savefig(figfile, bbox_inches='tight')
return df
def _importance_venn_plot(self, position_dict, figsize=(3.5,3.5), figfile=None):
keys = ['A','B','C','polymorphism']
position_dict = {k: set(v) for k, v in position_dict.items() if k in keys}
petal_labels = generate_petal_labels(position_dict.values())
colors = [list(np.array(self._convert_color_code(self.color_dict[k]))/256) + [0.4] for k in keys]
fig, ax = plt.subplots(1, figsize=figsize, dpi=self.dpi)
draw_venn(petal_labels=petal_labels, dataset_labels=position_dict.keys(),hint_hidden=False,
colors=colors, figsize=figsize, fontsize=self.fontsize, legend_loc="best", ax=ax)
ax.get_legend().remove()
legends = [matplotlib.patches.Rectangle((0,0),1,1,fc=color, edgecolor='none') for color in colors]
l = fig.legend(legends, keys, fontsize=self.fontsize,
ncol=4, loc="lower center", bbox_to_anchor=(0, 0.75, 1, 0.2),
columnspacing=1, handlelength=0.5, handletextpad=0.2, borderpad=0.2)
fig.tight_layout()
if figfile:
fig.savefig(figfile, bbox_inches='tight')
""""""""""""""""""""""""""""""""""""""
# Minor Functions
""""""""""""""""""""""""""""""""""""""
def _parse_mask(self, dirname, mask_type):
masks = OrderedDict()
for allele in os.listdir(dirname):
if re.match(r'[ABC][0-9]+', allele):
if not os.path.isfile('%s/%s/record.npy'%(dirname, allele)):
continue
if mask_type == 'mhc':
masks[self._convert_allele(allele)] \
= np.load('%s/%s/record.npy'%(dirname, allele), allow_pickle=True)[()]['mhc_masks'].mean(axis=0)
else:
masks[self._convert_allele(allele)] \
= np.load('%s/%s/record.npy'%(dirname, allele), allow_pickle=True)[()]['epitope_masks'].mean(axis=0)
return masks
def _parse_motif(self, basename, threshold, sample_num):
motifs = OrderedDict()
for i in range(self.allele_num):
allele = self.alleles[i]
seqs = self.df.loc[(self.df['mhc']==allele) & (self.df[basename] >= threshold), 'sequence']
if len(seqs) >= sample_num:
seqs = seqs.apply(lambda x: x[:self.submotif_len] + x[-self.submotif_len:])
temp_df = pd.DataFrame(columns=list(self.aa_str))
seqlogo_df = lm.alignment_to_matrix(sequences=seqs, to_type="information", characters_to_ignore="XU")
temp_df = pd.concat([temp_df, seqlogo_df], axis=0)
temp_df = temp_df.fillna(0.0)
motifs[allele] = temp_df.to_numpy()
return motifs
def _residue_importance_count(self, alleles, cam_threshold):
importance_count = np.array([0]*self.mhc_len)
for allele in alleles:
importance_count[self.allele_mask_df[allele] > cam_threshold] += 1
return importance_count / len(alleles)
def _mhc_seqlogo_df(self, alleles, positions):
seqs = list()
for allele in alleles:
seqs.append(''.join(self.mhc_seq_dict[allele][j] for j in positions))
temp_df = pd.DataFrame(columns=list(self.aa_str))
seqlogo_df = lm.alignment_to_matrix(sequences=seqs, to_type="probability",
characters_to_ignore=".", pseudocount=0)
temp_df = pd.concat([temp_df, seqlogo_df], axis=0)
temp_df = temp_df.fillna(0.0)
return temp_df
def _select_residue(self, cam_threshold, importance_threshold):
importance_positions = dict()
importance_position_set = set()
importance_positions['res34'] = self.res34_pos
# by HLA
for hla in ['A', 'B', 'C']:
alleles = [i for i in self.alleles if hla in i]
importance_count = self._residue_importance_count(alleles, cam_threshold)
pos = list(map(int, np.where(importance_count > importance_threshold)[0]))
importance_positions[hla] = pos
importance_position_set = importance_position_set | set(pos)
# polymorphism
polymorphism_position = list(map(int,self.mhc_seqlogo_df[~(self.mhc_seqlogo_df.max(axis=1)==1)].index))
importance_positions['polymorphism'] = sorted(polymorphism_position)
importance_position_set = importance_position_set & set(polymorphism_position)
# final
importance_position = sorted(list(importance_position_set))
importance_positions['selected'] = importance_position
return importance_positions
def _convert_allele(self, allele):
if re.match(r'[ABC][0-9]+', allele):
return allele[0] + '*' + allele[1:-2] + ':' + allele[-2:]
elif re.match(r'[ABC]\*[0-9]+\:[0-9]+', allele):
return allele
def _convert_color_code(self, code):
return tuple(int(code[i:i+2], 16) for i in (1, 3, 5))
|
[
"pandas.read_csv",
"numpy.log",
"numpy.array",
"seaborn.scatterplot",
"scipy.stats.pearsonr",
"seaborn.regplot",
"os.listdir",
"numpy.where",
"scipy.cluster.hierarchy.linkage",
"matplotlib.pyplot.scatter",
"logomaker.alignment_to_matrix",
"pandas.DataFrame",
"collections.OrderedDict",
"matplotlib.pyplot.savefig",
"seaborn.clustermap",
"re.match",
"os.path.isfile",
"warnings.filterwarnings",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.figure",
"numpy.load",
"pandas.concat",
"matplotlib.pyplot.subplots",
"logomaker.Logo"
] |
[((412, 445), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (435, 445), False, 'import warnings\n'), ((3833, 3870), 'pandas.read_csv', 'pd.read_csv', (['df_filename'], {'index_col': '(0)'}), '(df_filename, index_col=0)\n', (3844, 3870), True, 'import pandas as pd\n'), ((9393, 9760), 'seaborn.clustermap', 'sns.clustermap', (['df'], {'col_cluster': 'position_linkage', 'row_cluster': 'allele_linkage', 'row_linkage': 'zy', 'col_linkage': 'zx', 'row_colors': 'color_list', 'cmap': '"""Blues"""', 'cbar_kws': "{'orientation': 'horizontal', 'label': 'mask score'}", 'cbar_pos': '(0.3, -0.05, 0.4, 0.02)', 'dendrogram_ratio': '(0.1)', 'colors_ratio': '(0.02)', 'xticklabels': 'xticklabels', 'yticklabels': 'yticklabels', 'figsize': 'figsize'}), "(df, col_cluster=position_linkage, row_cluster=allele_linkage,\n row_linkage=zy, col_linkage=zx, row_colors=color_list, cmap='Blues',\n cbar_kws={'orientation': 'horizontal', 'label': 'mask score'}, cbar_pos\n =(0.3, -0.05, 0.4, 0.02), dendrogram_ratio=0.1, colors_ratio=0.02,\n xticklabels=xticklabels, yticklabels=yticklabels, figsize=figsize)\n", (9407, 9760), True, 'import seaborn as sns\n'), ((10935, 11006), 'matplotlib.pyplot.subplots', 'plt.subplots', (['allele_num'], {'figsize': '(0.8, allele_num * 0.2)', 'dpi': 'self.dpi'}), '(allele_num, figsize=(0.8, allele_num * 0.2), dpi=self.dpi)\n', (10947, 11006), True, 'import matplotlib.pyplot as plt\n'), ((11622, 11668), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'figsize', 'dpi': 'self.dpi'}), '(1, figsize=figsize, dpi=self.dpi)\n', (11634, 11668), True, 'import matplotlib.pyplot as plt\n'), ((12946, 12961), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (12958, 12961), True, 'import pandas as pd\n'), ((13002, 13043), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'self.dpi'}), '(figsize=figsize, dpi=self.dpi)\n', (13012, 13043), True, 'import matplotlib.pyplot as plt\n'), ((14342, 14431), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': "self.color_dict['A']", 'edgecolor': '"""none"""'}), "((0, 0), 1, 1, fc=self.color_dict['A'],\n edgecolor='none')\n", (14370, 14431), False, 'import matplotlib\n'), ((14439, 14528), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': "self.color_dict['B']", 'edgecolor': '"""none"""'}), "((0, 0), 1, 1, fc=self.color_dict['B'],\n edgecolor='none')\n", (14467, 14528), False, 'import matplotlib\n'), ((14536, 14625), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': "self.color_dict['C']", 'edgecolor': '"""none"""'}), "((0, 0), 1, 1, fc=self.color_dict['C'],\n edgecolor='none')\n", (14564, 14625), False, 'import matplotlib\n'), ((15089, 15103), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15101, 15103), True, 'import pandas as pd\n'), ((15122, 15168), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'figsize', 'dpi': 'self.dpi'}), '(1, figsize=figsize, dpi=self.dpi)\n', (15134, 15168), True, 'import matplotlib.pyplot as plt\n'), ((15581, 15608), 'numpy.array', 'np.array', (['importance_counts'], {}), '(importance_counts)\n', (15589, 15608), True, 'import numpy as np\n'), ((16067, 16329), 'collections.OrderedDict', 'OrderedDict', (["{'selected': {'color': '#ff4949', 'marker': 'o', 's': 12}, 'intersection':\n {'color': '#ff4949', 'marker': 'x', 's': 12}, '34-residue': {'color':\n '#adb5bd', 'marker': 'x', 's': 12}, 'others': {'color': '#adb5bd',\n 'marker': 'o', 's': 12}}"], {}), "({'selected': {'color': '#ff4949', 'marker': 'o', 's': 12},\n 'intersection': {'color': '#ff4949', 'marker': 'x', 's': 12},\n '34-residue': {'color': '#adb5bd', 'marker': 'x', 's': 12}, 'others': {\n 'color': '#adb5bd', 'marker': 'o', 's': 12}})\n", (16078, 16329), False, 'from collections import OrderedDict\n'), ((16488, 16589), 'seaborn.regplot', 'sns.regplot', ([], {'x': '"""importance"""', 'y': '"""polymorphism"""', 'data': 'df', 'ax': 'ax', 'fit_reg': '(True)', 'scatter_kws': "{'s': 0}"}), "(x='importance', y='polymorphism', data=df, ax=ax, fit_reg=True,\n scatter_kws={'s': 0})\n", (16499, 16589), True, 'import seaborn as sns\n'), ((18173, 18219), 'scipy.stats.pearsonr', 'pearsonr', (["df['importance']", "df['polymorphism']"], {}), "(df['importance'], df['polymorphism'])\n", (18181, 18219), False, 'from scipy.stats import pearsonr\n'), ((18733, 18843), 'logomaker.alignment_to_matrix', 'lm.alignment_to_matrix', ([], {'sequences': 'peptides', 'to_type': '"""probability"""', 'characters_to_ignore': '"""."""', 'pseudocount': '(0)'}), "(sequences=peptides, to_type='probability',\n characters_to_ignore='.', pseudocount=0)\n", (18755, 18843), True, 'import logomaker as lm\n'), ((19332, 19381), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize', 'dpi': 'self.dpi'}), '(1, 1, figsize=figsize, dpi=self.dpi)\n', (19344, 19381), True, 'import matplotlib.pyplot as plt\n'), ((19390, 19479), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df', 'x': '"""mask_score"""', 'y': '"""polymorphism"""', 'hue': '"""residue_tag"""', 'ax': 'ax'}), "(data=df, x='mask_score', y='polymorphism', hue=\n 'residue_tag', ax=ax)\n", (19405, 19479), True, 'import seaborn as sns\n'), ((20184, 20230), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'figsize', 'dpi': 'self.dpi'}), '(1, figsize=figsize, dpi=self.dpi)\n', (20196, 20230), True, 'import matplotlib.pyplot as plt\n'), ((21120, 21133), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21131, 21133), False, 'from collections import OrderedDict\n'), ((21156, 21175), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (21166, 21175), False, 'import os, sys, re, json, random, importlib\n'), ((21848, 21861), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21859, 21861), False, 'from collections import OrderedDict\n'), ((22641, 22669), 'numpy.array', 'np.array', (['([0] * self.mhc_len)'], {}), '([0] * self.mhc_len)\n', (22649, 22669), True, 'import numpy as np\n'), ((23092, 23198), 'logomaker.alignment_to_matrix', 'lm.alignment_to_matrix', ([], {'sequences': 'seqs', 'to_type': '"""probability"""', 'characters_to_ignore': '"""."""', 'pseudocount': '(0)'}), "(sequences=seqs, to_type='probability',\n characters_to_ignore='.', pseudocount=0)\n", (23114, 23198), True, 'import logomaker as lm\n'), ((23257, 23297), 'pandas.concat', 'pd.concat', (['[temp_df, seqlogo_df]'], {'axis': '(0)'}), '([temp_df, seqlogo_df], axis=0)\n', (23266, 23297), True, 'import pandas as pd\n'), ((24527, 24558), 're.match', 're.match', (['"""[ABC][0-9]+"""', 'allele'], {}), "('[ABC][0-9]+', allele)\n", (24535, 24558), False, 'import os, sys, re, json, random, importlib\n'), ((8886, 8960), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['df'], {'method': 'method', 'metric': 'metric', 'optimal_ordering': '(True)'}), '(df, method=method, metric=metric, optimal_ordering=True)\n', (8903, 8960), False, 'from scipy.cluster import hierarchy\n'), ((9007, 9083), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['df.T'], {'method': 'method', 'metric': 'metric', 'optimal_ordering': '(True)'}), '(df.T, method=method, metric=metric, optimal_ordering=True)\n', (9024, 9083), False, 'from scipy.cluster import hierarchy\n'), ((10706, 10761), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figfile'], {'bbox_inches': '"""tight"""', 'dpi': 'self.dpi'}), "(figfile, bbox_inches='tight', dpi=self.dpi)\n", (10717, 10761), True, 'import matplotlib.pyplot as plt\n'), ((11177, 11238), 'logomaker.Logo', 'lm.Logo', (['seqlogo_df'], {'ax': 'ax[i]', 'color_scheme': '"""skylign_protein"""'}), "(seqlogo_df, ax=ax[i], color_scheme='skylign_protein')\n", (11184, 11238), True, 'import logomaker as lm\n'), ((16650, 16858), 'seaborn.regplot', 'sns.regplot', ([], {'x': '"""importance"""', 'y': '"""polymorphism"""', 'data': "df[df['label'] == label]", 'ax': 'ax', 'fit_reg': '(False)', 'marker': "params['marker']", 'scatter_kws': "{'color': params['color'], 's': params['s'], 'linewidths': 0.1}"}), "(x='importance', y='polymorphism', data=df[df['label'] == label],\n ax=ax, fit_reg=False, marker=params['marker'], scatter_kws={'color':\n params['color'], 's': params['s'], 'linewidths': 0.1})\n", (16661, 16858), True, 'import seaborn as sns\n'), ((17359, 17433), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': '"""#ff4949"""', 'edgecolor': '"""none"""'}), "((0, 0), 1, 1, fc='#ff4949', edgecolor='none')\n", (17387, 17433), False, 'import matplotlib\n'), ((17454, 17528), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': '"""#adb5bd"""', 'edgecolor': '"""none"""'}), "((0, 0), 1, 1, fc='#adb5bd', edgecolor='none')\n", (17482, 17528), False, 'import matplotlib\n'), ((17549, 17601), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'color': '"""black"""', 'marker': '"""x"""', 's': '(12)'}), "([], [], color='black', marker='x', s=12)\n", (17560, 17601), True, 'import matplotlib.pyplot as plt\n'), ((17626, 17678), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'color': '"""black"""', 'marker': '"""o"""', 's': '(12)'}), "([], [], color='black', marker='o', s=12)\n", (17637, 17678), True, 'import matplotlib.pyplot as plt\n'), ((20501, 20571), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': 'color', 'edgecolor': '"""none"""'}), "((0, 0), 1, 1, fc=color, edgecolor='none')\n", (20529, 20571), False, 'import matplotlib\n'), ((21192, 21223), 're.match', 're.match', (['"""[ABC][0-9]+"""', 'allele'], {}), "('[ABC][0-9]+', allele)\n", (21200, 21223), False, 'import os, sys, re, json, random, importlib\n'), ((24644, 24687), 're.match', 're.match', (['"""[ABC]\\\\*[0-9]+\\\\:[0-9]+"""', 'allele'], {}), "('[ABC]\\\\*[0-9]+\\\\:[0-9]+', allele)\n", (24652, 24687), False, 'import os, sys, re, json, random, importlib\n'), ((22271, 22363), 'logomaker.alignment_to_matrix', 'lm.alignment_to_matrix', ([], {'sequences': 'seqs', 'to_type': '"""information"""', 'characters_to_ignore': '"""XU"""'}), "(sequences=seqs, to_type='information',\n characters_to_ignore='XU')\n", (22293, 22363), True, 'import logomaker as lm\n'), ((22386, 22426), 'pandas.concat', 'pd.concat', (['[temp_df, seqlogo_df]'], {'axis': '(0)'}), '([temp_df, seqlogo_df], axis=0)\n', (22395, 22426), True, 'import pandas as pd\n'), ((1602, 1636), 'os.listdir', 'os.listdir', (['allele_mask_dirname[0]'], {}), '(allele_mask_dirname[0])\n', (1612, 1636), False, 'import os, sys, re, json, random, importlib\n'), ((2672, 2707), 'os.listdir', 'os.listdir', (['epitope_mask_dirname[0]'], {}), '(epitope_mask_dirname[0])\n', (2682, 2707), False, 'import os, sys, re, json, random, importlib\n'), ((21249, 21303), 'os.path.isfile', 'os.path.isfile', (["('%s/%s/record.npy' % (dirname, allele))"], {}), "('%s/%s/record.npy' % (dirname, allele))\n", (21263, 21303), False, 'import os, sys, re, json, random, importlib\n'), ((15249, 15276), 'numpy.log', 'np.log', (['self.mhc_seqlogo_df'], {}), '(self.mhc_seqlogo_df)\n', (15255, 15276), True, 'import numpy as np\n'), ((23794, 23843), 'numpy.where', 'np.where', (['(importance_count > importance_threshold)'], {}), '(importance_count > importance_threshold)\n', (23802, 23843), True, 'import numpy as np\n'), ((18920, 18938), 'numpy.log', 'np.log', (['seqlogo_df'], {}), '(seqlogo_df)\n', (18926, 18938), True, 'import numpy as np\n'), ((21451, 21517), 'numpy.load', 'np.load', (["('%s/%s/record.npy' % (dirname, allele))"], {'allow_pickle': '(True)'}), "('%s/%s/record.npy' % (dirname, allele), allow_pickle=True)\n", (21458, 21517), True, 'import numpy as np\n'), ((21648, 21714), 'numpy.load', 'np.load', (["('%s/%s/record.npy' % (dirname, allele))"], {'allow_pickle': '(True)'}), "('%s/%s/record.npy' % (dirname, allele), allow_pickle=True)\n", (21655, 21714), True, 'import numpy as np\n')]
|
from utils import *
import torch
import sys
import numpy as np
import time
import torchvision
from torch.autograd import Variable
import torchvision.transforms as transforms
import torchvision.datasets as datasets
def validate_pgd(val_loader, model, criterion, K, step, configs, logger, save_image=False, HE=False):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# Initiate the meters
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
eps = configs.ADV.clip_eps
model.eval()
end = time.time()
logger.info(pad_str(' PGD eps: {}, K: {}, step: {} '.format(eps, K, step)))
if HE == True:
is_HE = '_HE'
else:
is_HE = ''
if configs.pretrained:
is_HE = '_pretrained'
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
#save original images
if save_image == True and i < 2:
original_images_save = input.clone()
for o in range(input.size(0)):
torchvision.utils.save_image(original_images_save[o, :, :, :], 'saved_images/original_images'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
randn = torch.FloatTensor(input.size()).uniform_(-eps, eps).cuda()
input += randn
input.clamp_(0, 1.0)
orig_input = input.clone()
for _ in range(K):
invar = Variable(input, requires_grad=True)
in1 = invar - mean
in1.div_(std)
output = model(in1)
ascend_loss = criterion(output, target)
ascend_grad = torch.autograd.grad(ascend_loss, invar)[0]
pert = fgsm(ascend_grad, step)
# Apply purturbation
input += pert.data
input = torch.max(orig_input-eps, input)
input = torch.min(orig_input+eps, input)
input.clamp_(0, 1.0)
#save adv images
if save_image == True and i < 2:
adv_images_save = input.clone()
for o in range(input.size(0)):
torchvision.utils.save_image(adv_images_save[o, :, :, :], 'saved_images/adv_images'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
#save scaled perturbation
perturbation = input - orig_input
perturbation.clamp_(-eps,eps)
scaled_perturbation = (perturbation.clone() + eps) / (2 * eps)
scaled_perturbation.clamp_(0, 1.0)
if save_image == True and i < 2:
for o in range(input.size(0)):
torchvision.utils.save_image(scaled_perturbation[o, :, :, :], 'saved_images/scaled_perturbation'+is_HE+'/{}.png'.format(o + configs.DATA.batch_size*i))
input.sub_(mean).div_(std)
with torch.no_grad():
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % configs.TRAIN.print_freq == 0:
print('PGD Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' PGD Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def validate(val_loader, model, criterion, configs, logger):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# Initiate the meters
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
input = input - mean
input.div_(std)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % configs.TRAIN.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
sys.stdout.flush()
print(' Final Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def validate_ImagetNet_C(val_loader_name, model, criterion, configs, logger):
# Mean/Std for normalization
mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])
mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()
std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])
std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()
# switch to evaluate mode
model.eval()
fil_index = ['/1','/2','/3','/4','/5']
avg_return = 0
for f in fil_index:
valdir = os.path.join(configs.data, val_loader_name+f)
print(' File: ', valdir)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(configs.DATA.img_size),
transforms.CenterCrop(configs.DATA.crop_size),
transforms.ToTensor(),
])),
batch_size=configs.DATA.batch_size, shuffle=False,
num_workers=configs.DATA.workers, pin_memory=True)
# Initiate the meters
top1 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
input = input - mean
input.div_(std)
output = model(input)
# measure accuracy and record loss
prec1,_ = accuracy(output, target, topk=(1,2))
top1.update(prec1[0], input.size(0))
# if i % configs.TRAIN.print_freq == 0:
# print('PGD Test: [{0}/{1}]\t'
# 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
# i, len(val_loader),top1=top1))
# print('Time: ', time.time() - end)
# sys.stdout.flush()
print('Prec: ',top1.avg.cpu().item())
avg_return += top1.avg.cpu().item()
print('Avergae Classification Accuracy is: ', avg_return / 5.)
return
|
[
"torchvision.transforms.CenterCrop",
"torchvision.transforms.ToTensor",
"torch.max",
"torch.min",
"numpy.array",
"torch.autograd.grad",
"torchvision.transforms.Resize",
"torch.no_grad",
"sys.stdout.flush",
"torch.autograd.Variable",
"time.time"
] |
[((873, 884), 'time.time', 'time.time', ([], {}), '()\n', (882, 884), False, 'import time\n'), ((4940, 4951), 'time.time', 'time.time', ([], {}), '()\n', (4949, 4951), False, 'import time\n'), ((7509, 7520), 'time.time', 'time.time', ([], {}), '()\n', (7518, 7520), False, 'import time\n'), ((377, 405), 'numpy.array', 'np.array', (['configs.TRAIN.mean'], {}), '(configs.TRAIN.mean)\n', (385, 405), True, 'import numpy as np\n'), ((537, 564), 'numpy.array', 'np.array', (['configs.TRAIN.std'], {}), '(configs.TRAIN.std)\n', (545, 564), True, 'import numpy as np\n'), ((1808, 1843), 'torch.autograd.Variable', 'Variable', (['input'], {'requires_grad': '(True)'}), '(input, requires_grad=True)\n', (1816, 1843), False, 'from torch.autograd import Variable\n'), ((2181, 2215), 'torch.max', 'torch.max', (['(orig_input - eps)', 'input'], {}), '(orig_input - eps, input)\n', (2190, 2215), False, 'import torch\n'), ((2234, 2268), 'torch.min', 'torch.min', (['(orig_input + eps)', 'input'], {}), '(orig_input + eps, input)\n', (2243, 2268), False, 'import torch\n'), ((3159, 3174), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3172, 3174), False, 'import torch\n'), ((3652, 3663), 'time.time', 'time.time', ([], {}), '()\n', (3661, 3663), False, 'import time\n'), ((4445, 4473), 'numpy.array', 'np.array', (['configs.TRAIN.mean'], {}), '(configs.TRAIN.mean)\n', (4453, 4473), True, 'import numpy as np\n'), ((4605, 4632), 'numpy.array', 'np.array', (['configs.TRAIN.std'], {}), '(configs.TRAIN.std)\n', (4613, 4632), True, 'import numpy as np\n'), ((5018, 5033), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5031, 5033), False, 'import torch\n'), ((5675, 5686), 'time.time', 'time.time', ([], {}), '()\n', (5684, 5686), False, 'import time\n'), ((6481, 6509), 'numpy.array', 'np.array', (['configs.TRAIN.mean'], {}), '(configs.TRAIN.mean)\n', (6489, 6509), True, 'import numpy as np\n'), ((6641, 6668), 'numpy.array', 'np.array', (['configs.TRAIN.std'], {}), '(configs.TRAIN.std)\n', (6649, 6668), True, 'import numpy as np\n'), ((2011, 2050), 'torch.autograd.grad', 'torch.autograd.grad', (['ascend_loss', 'invar'], {}), '(ascend_loss, invar)\n', (2030, 2050), False, 'import torch\n'), ((4176, 4194), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4192, 4194), False, 'import sys\n'), ((6195, 6213), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6211, 6213), False, 'import sys\n'), ((7595, 7610), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7608, 7610), False, 'import torch\n'), ((3615, 3626), 'time.time', 'time.time', ([], {}), '()\n', (3624, 3626), False, 'import time\n'), ((5638, 5649), 'time.time', 'time.time', ([], {}), '()\n', (5647, 5649), False, 'import time\n'), ((7146, 7186), 'torchvision.transforms.Resize', 'transforms.Resize', (['configs.DATA.img_size'], {}), '(configs.DATA.img_size)\n', (7163, 7186), True, 'import torchvision.transforms as transforms\n'), ((7204, 7249), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['configs.DATA.crop_size'], {}), '(configs.DATA.crop_size)\n', (7225, 7249), True, 'import torchvision.transforms as transforms\n'), ((7267, 7288), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7286, 7288), True, 'import torchvision.transforms as transforms\n')]
|
import numpy as np
from numexpr_kernel import numexpr_kernel
from numba_kernel import numba_kernel
N = 10000
x = np.random.rand(N)
y = np.random.rand(N)
z = np.random.rand(N)
tau = np.random.rand(N)
r1 = numexpr_kernel(x, y, z, tau)
r1 = numexpr_kernel(x, y, z, tau)
r2 = np.zeros(N, dtype=float)
numba_kernel(x, y, z, tau, r2, N)
numba_kernel(x, y, z, tau, r2, N)
|
[
"numexpr_kernel.numexpr_kernel",
"numpy.zeros",
"numba_kernel.numba_kernel",
"numpy.random.rand"
] |
[((114, 131), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (128, 131), True, 'import numpy as np\n'), ((136, 153), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (150, 153), True, 'import numpy as np\n'), ((158, 175), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (172, 175), True, 'import numpy as np\n'), ((182, 199), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (196, 199), True, 'import numpy as np\n'), ((206, 234), 'numexpr_kernel.numexpr_kernel', 'numexpr_kernel', (['x', 'y', 'z', 'tau'], {}), '(x, y, z, tau)\n', (220, 234), False, 'from numexpr_kernel import numexpr_kernel\n'), ((240, 268), 'numexpr_kernel.numexpr_kernel', 'numexpr_kernel', (['x', 'y', 'z', 'tau'], {}), '(x, y, z, tau)\n', (254, 268), False, 'from numexpr_kernel import numexpr_kernel\n'), ((274, 298), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'float'}), '(N, dtype=float)\n', (282, 298), True, 'import numpy as np\n'), ((299, 332), 'numba_kernel.numba_kernel', 'numba_kernel', (['x', 'y', 'z', 'tau', 'r2', 'N'], {}), '(x, y, z, tau, r2, N)\n', (311, 332), False, 'from numba_kernel import numba_kernel\n'), ((333, 366), 'numba_kernel.numba_kernel', 'numba_kernel', (['x', 'y', 'z', 'tau', 'r2', 'N'], {}), '(x, y, z, tau, r2, N)\n', (345, 366), False, 'from numba_kernel import numba_kernel\n')]
|
"""
This module give the classification results for test data using SVM with RBF
kernel.
Email: <EMAIL>
Dtd: 2 - August - 2020
Parameters
----------
classification_type : string
DESCRIPTION - classification_type == "binary_class" loads binary classification artificial data.
classification_type == "multi_class" loads multiclass artificial data
folder_name : string
DESCRIPTION - the name of the folder to store results. For eg., if
folder_name = "hnb", then this function will create two folder "hnb-svm"
and "hnb-svm_rbf" to save the classification report.
target_names : array, 1D, string
DESCRIPTION - if there are two classes, then target_names = ['class-0', class-1]
Note- At the present version of the code, the results for binary classification
and five class classification will be saved.
Returns : None
-------
Computes the accuracy_svm_rbf, fscore_svm_rbf
"""
import os
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from sklearn import svm
from sklearn.metrics import confusion_matrix as cm
from sklearn.metrics import classification_report
from load_data_synthetic import get_data
#from Codes import classification_report_csv_
classification_type = "concentric_circle_noise"
folder_name = "full-testdata"
target_names = ['class-0', 'class-1']
path = os.getcwd()
result_path_svm_rbf = path + '/NEUROCHAOS-RESULTS/' + classification_type + '/' + folder_name +'-svm_rbf/'
# Creating Folder to save the results
try:
os.makedirs(result_path_svm_rbf)
except OSError:
print("Creation of the result directory %s failed" % result_path_svm_rbf)
else:
print("Successfully created the result directory %s" % result_path_svm_rbf)
full_artificial_data, full_artificial_label, full_artificial_test_data, full_artificial_test_label = get_data(classification_type)
num_classes = len(np.unique(full_artificial_label)) # Number of classes
print("**** Genome data details ******")
for class_label in range(np.max(full_artificial_label)+1):
print("Total Data instance in Class -", class_label, " = ", full_artificial_label.tolist().count([class_label]))
print(" train data = ", (full_artificial_data.shape[0]))
print("val data = ", (full_artificial_test_data.shape[0]))
# Start of svm_rbf classifier
svm_rbf_classifier = svm.SVC(kernel='rbf', gamma='scale')
svm_rbf_classifier.fit(full_artificial_data, full_artificial_label[:, 0])
predicted_svm_rbf_val_label = svm_rbf_classifier.predict(full_artificial_test_data)
acc_svm_rbf = accuracy_score(full_artificial_test_label, predicted_svm_rbf_val_label)*100
f1score_svm_rbf = f1_score(full_artificial_test_label, predicted_svm_rbf_val_label, average="macro")
report_svm_rbf = classification_report(full_artificial_test_label, predicted_svm_rbf_val_label, target_names=target_names)
# Saving the classification report to csv file for svm_rbf classifier.
print(report_svm_rbf)
#classification_report_csv_(report_svm_rbf, num_classes).to_csv(result_path_svm_rbf+'svm_rbf_report_'+ str(iterations) +'.csv', index=False)
confusion_matrix_svm_rbf = cm(full_artificial_test_label, predicted_svm_rbf_val_label)
print("Confusion matrixfor svm_rbf\n", confusion_matrix_svm_rbf)
# End of svm_rbf classifier.
# saving the f1-score
np.save(result_path_svm_rbf + 'f1score.npy', f1score_svm_rbf)
|
[
"sklearn.metrics.f1_score",
"sklearn.metrics.confusion_matrix",
"os.makedirs",
"numpy.unique",
"sklearn.metrics.classification_report",
"os.getcwd",
"load_data_synthetic.get_data",
"numpy.max",
"sklearn.metrics.accuracy_score",
"numpy.save",
"sklearn.svm.SVC"
] |
[((1369, 1380), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1378, 1380), False, 'import os\n'), ((1867, 1896), 'load_data_synthetic.get_data', 'get_data', (['classification_type'], {}), '(classification_type)\n', (1875, 1896), False, 'from load_data_synthetic import get_data\n'), ((2376, 2412), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""rbf"""', 'gamma': '"""scale"""'}), "(kernel='rbf', gamma='scale')\n", (2383, 2412), False, 'from sklearn import svm\n'), ((2685, 2772), 'sklearn.metrics.f1_score', 'f1_score', (['full_artificial_test_label', 'predicted_svm_rbf_val_label'], {'average': '"""macro"""'}), "(full_artificial_test_label, predicted_svm_rbf_val_label, average=\n 'macro')\n", (2693, 2772), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((2786, 2895), 'sklearn.metrics.classification_report', 'classification_report', (['full_artificial_test_label', 'predicted_svm_rbf_val_label'], {'target_names': 'target_names'}), '(full_artificial_test_label,\n predicted_svm_rbf_val_label, target_names=target_names)\n', (2807, 2895), False, 'from sklearn.metrics import classification_report\n'), ((3161, 3220), 'sklearn.metrics.confusion_matrix', 'cm', (['full_artificial_test_label', 'predicted_svm_rbf_val_label'], {}), '(full_artificial_test_label, predicted_svm_rbf_val_label)\n', (3163, 3220), True, 'from sklearn.metrics import confusion_matrix as cm\n'), ((3343, 3404), 'numpy.save', 'np.save', (["(result_path_svm_rbf + 'f1score.npy')", 'f1score_svm_rbf'], {}), "(result_path_svm_rbf + 'f1score.npy', f1score_svm_rbf)\n", (3350, 3404), True, 'import numpy as np\n'), ((1544, 1576), 'os.makedirs', 'os.makedirs', (['result_path_svm_rbf'], {}), '(result_path_svm_rbf)\n', (1555, 1576), False, 'import os\n'), ((1918, 1950), 'numpy.unique', 'np.unique', (['full_artificial_label'], {}), '(full_artificial_label)\n', (1927, 1950), True, 'import numpy as np\n'), ((2590, 2661), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['full_artificial_test_label', 'predicted_svm_rbf_val_label'], {}), '(full_artificial_test_label, predicted_svm_rbf_val_label)\n', (2604, 2661), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((2042, 2071), 'numpy.max', 'np.max', (['full_artificial_label'], {}), '(full_artificial_label)\n', (2048, 2071), True, 'import numpy as np\n')]
|
import torch
import numpy as np
import time
import datetime
import random
from Kfold import KFold
from split_data import DataManager
from transformers import BertTokenizer
from transformers import BertTokenizer
from torch.utils.data import TensorDataset, random_split
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import BertForSequenceClassification, AdamW, BertConfig
from transformers import get_linear_schedule_with_warmup
class KfoldBERTData(DataManager):
def __init__(self, data, labels, num_folds):
super().__init__(data, labels, num_folds)
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
def pre_process(self, sentences, labels):
max_len = 0
for sent in sentences:
input_ids = self.tokenizer.encode(sent, add_special_tokens=True)
max_len = max(max_len, len(input_ids))
input_ids = []
attention_masks = []
for sent in sentences:
encoded_dict = self.tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = 350,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
truncation=True
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
dataset = TensorDataset(input_ids, attention_masks, labels)
d, _ = random_split(dataset, [len(dataset), 0])
return d
class KfoldBERT(KFold):
def __init__(self, data, labels, num_folds):
super().__init__(data, labels, num_folds)
self.batch_size = 8
self.epochs = 10
self.data = KfoldBERTData(data, labels, num_folds)
if torch.cuda.is_available():
self.device = torch.device("cuda")
def flat_accuracy(self, preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
def format_time(self, time):
'''
Takes a time in seconds and returns a string hh:mm:ss
'''
elapsed_rounded = int(round((time)))
return str(datetime.timedelta(seconds=elapsed_rounded))
def train(self, train_dataset, val_dataset):
train_dataloader = DataLoader(
train_dataset, # The training samples.
sampler = RandomSampler(train_dataset), # Select batches randomly
batch_size = self.batch_size # Trains with this batch size.
)
validation_dataloader = DataLoader(
val_dataset, # The validation samples.
sampler = SequentialSampler(val_dataset), # Pull out batches sequentially.
batch_size = self.batch_size # Evaluate with this batch size.
)
model = BertForSequenceClassification.from_pretrained(
"bert-base-uncased", # Use the 12-layer BERT model, with an uncased vocab.
num_labels = 4, # The number of output labels--2 for binary classification.
# You can increase this for multi-class tasks.
output_attentions = False, # Whether the model returns attentions weights.
output_hidden_states = False, # Whether the model returns all hidden-states.
)
model.cuda()
optimizer = AdamW(model.parameters(),
lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5
eps = 1e-8 # args.adam_epsilon - default is 1e-8.
)
total_steps = len(train_dataloader) * self.epochs
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0, # Default value in run_glue.py
num_training_steps = total_steps)
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
training_stats = []
# Measure the total training time for the whole run.
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, self.epochs):
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, self.epochs))
print('Training...')
t0 = time.time()
total_train_loss = 0
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
# Calculate elapsed time in minutes.
elapsed = self.format_time(time.time() - t0)
# Report progress.
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)
model.zero_grad()
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
total_train_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
# Measure how long this epoch took.
training_time = self.format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
model.eval()
# Tracking variables
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
for batch in validation_dataloader:
b_input_ids = batch[0].to(self.device)
b_input_mask = batch[1].to(self.device)
b_labels = batch[2].to(self.device)
with torch.no_grad():
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
# Accumulate the validation loss.
total_eval_loss += loss.item()
# Move logits and labels to CPU
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
# Calculate the accuracy for this batch of test sentences, and
# accumulate it over all batches.
total_eval_accuracy += self.flat_accuracy(logits, label_ids)
# Report the final accuracy for this validation run.
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# Calculate the average loss over all of the batches.
avg_val_loss = total_eval_loss / len(validation_dataloader)
# Measure how long the validation run took.
validation_time = self.format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
# Record all statistics from this epoch.
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
torch.save(model.state_dict(), "removed_model_epoch_" + str(epoch_i + 1) +".pth")
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(self.format_time(time.time()-total_t0)))
return avg_val_accuracy, avg_val_loss
|
[
"torch.cuda.is_available",
"datetime.timedelta",
"numpy.random.seed",
"torch.utils.data.SequentialSampler",
"numpy.argmax",
"torch.utils.data.TensorDataset",
"transformers.BertForSequenceClassification.from_pretrained",
"time.time",
"torch.cat",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"transformers.get_linear_schedule_with_warmup",
"transformers.BertTokenizer.from_pretrained",
"random.seed",
"torch.utils.data.RandomSampler",
"torch.tensor",
"numpy.sum",
"torch.no_grad"
] |
[((638, 708), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {'do_lower_case': '(True)'}), "('bert-base-uncased', do_lower_case=True)\n", (667, 708), False, 'from transformers import BertTokenizer\n'), ((1706, 1733), 'torch.cat', 'torch.cat', (['input_ids'], {'dim': '(0)'}), '(input_ids, dim=0)\n', (1715, 1733), False, 'import torch\n'), ((1760, 1793), 'torch.cat', 'torch.cat', (['attention_masks'], {'dim': '(0)'}), '(attention_masks, dim=0)\n', (1769, 1793), False, 'import torch\n'), ((1811, 1831), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (1823, 1831), False, 'import torch\n'), ((1851, 1900), 'torch.utils.data.TensorDataset', 'TensorDataset', (['input_ids', 'attention_masks', 'labels'], {}), '(input_ids, attention_masks, labels)\n', (1864, 1900), False, 'from torch.utils.data import TensorDataset, random_split\n'), ((2222, 2247), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2245, 2247), False, 'import torch\n'), ((3360, 3497), 'transformers.BertForSequenceClassification.from_pretrained', 'BertForSequenceClassification.from_pretrained', (['"""bert-base-uncased"""'], {'num_labels': '(4)', 'output_attentions': '(False)', 'output_hidden_states': '(False)'}), "('bert-base-uncased',\n num_labels=4, output_attentions=False, output_hidden_states=False)\n", (3405, 3497), False, 'from transformers import BertForSequenceClassification, AdamW, BertConfig\n'), ((4199, 4297), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': '(0)', 'num_training_steps': 'total_steps'}), '(optimizer, num_warmup_steps=0,\n num_training_steps=total_steps)\n', (4230, 4297), False, 'from transformers import get_linear_schedule_with_warmup\n'), ((4450, 4471), 'random.seed', 'random.seed', (['seed_val'], {}), '(seed_val)\n', (4461, 4471), False, 'import random\n'), ((4480, 4504), 'numpy.random.seed', 'np.random.seed', (['seed_val'], {}), '(seed_val)\n', (4494, 4504), True, 'import numpy as np\n'), ((4513, 4540), 'torch.manual_seed', 'torch.manual_seed', (['seed_val'], {}), '(seed_val)\n', (4530, 4540), False, 'import torch\n'), ((4549, 4585), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed_val'], {}), '(seed_val)\n', (4575, 4585), False, 'import torch\n'), ((4696, 4707), 'time.time', 'time.time', ([], {}), '()\n', (4705, 4707), False, 'import time\n'), ((2279, 2299), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2291, 2299), False, 'import torch\n'), ((2455, 2487), 'numpy.sum', 'np.sum', (['(pred_flat == labels_flat)'], {}), '(pred_flat == labels_flat)\n', (2461, 2487), True, 'import numpy as np\n'), ((2691, 2734), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'elapsed_rounded'}), '(seconds=elapsed_rounded)\n', (2709, 2734), False, 'import datetime\n'), ((4961, 4972), 'time.time', 'time.time', ([], {}), '()\n', (4970, 4972), False, 'import time\n'), ((7023, 7034), 'time.time', 'time.time', ([], {}), '()\n', (7032, 7034), False, 'import time\n'), ((2366, 2390), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (2375, 2390), True, 'import numpy as np\n'), ((2923, 2951), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (2936, 2951), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((3194, 3224), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['val_dataset'], {}), '(val_dataset)\n', (3211, 3224), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((6465, 6476), 'time.time', 'time.time', ([], {}), '()\n', (6474, 6476), False, 'import time\n'), ((7442, 7457), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7455, 7457), False, 'import torch\n'), ((8657, 8668), 'time.time', 'time.time', ([], {}), '()\n', (8666, 8668), False, 'import time\n'), ((9493, 9504), 'time.time', 'time.time', ([], {}), '()\n', (9502, 9504), False, 'import time\n'), ((5352, 5363), 'time.time', 'time.time', ([], {}), '()\n', (5361, 5363), False, 'import time\n')]
|
import numpy as np
import operator
# TODO: Make Mutation Operator.
class TerminationCriteria:
@staticmethod
def _convergence_check(convergence_ratio, population_fitness):
if abs((np.max(population_fitness) - np.mean(population_fitness)) / np.mean(
population_fitness)) <= convergence_ratio / 2:
return True
else:
return False
@staticmethod
def _fitness_level_check(fitness_level, population_fitness, _operator):
ops = {'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq}
inp = abs(np.max(population_fitness))
relate = _operator
cut = fitness_level
return ops[relate](inp, cut)
@staticmethod
def _generations_check(generations, generation_limit):
if generations >= generation_limit:
return True
else:
return False
def __init__(self):
self._checks = []
self._convergence_limit = None
self._fitness_limit = None
self._generation_limit = None
self._operator = None
def _checker_of_convergence(self):
def _checker(population_fitness, generation_number):
return self._convergence_check(self._convergence_limit, population_fitness)
return _checker
def _checker_of_fitness(self):
def _checker(population_fitness, generation_number):
return self._fitness_level_check(self._convergence_limit, population_fitness, self._operator)
return _checker
def _checker_of_generations(self):
def _checker(population_fitness, generation_number):
return self._generations_check(generation_number, self._generation_limit)
return _checker
def add_convergence_limit(self, convergence_ratio):
self._checks.append(self._checker_of_convergence())
self._convergence_limit = convergence_ratio
def add_fitness_limit(self, operator, fitness_level):
self._checks.append(self._checker_of_fitness())
self._generation_limit = fitness_level
self._operator = operator
def add_generation_limit(self, generation_limit):
self._checks.append(self._checker_of_generations())
self._generation_limit = generation_limit
def check(self, population_fitness, generation_number):
if np.any([check(population_fitness, generation_number) for check in self._checks]) == True:
return True
else:
return False
# def convergence_or_100(population_fitness, convergence_ratio):
# if abs((np.max(population_fitness) - np.mean(population_fitness)) / np.mean(
# population_fitness)) <= convergence_ratio / 2:
# return True
# elif abs(np.max(population_fitness)) == 100:
# return True
# else:
# return False
class SelectionOperator:
@staticmethod
def supremacy(m, contestants, fitness):
return np.argpartition(np.array(fitness), -m)[-m:], np.array(contestants)[
np.argpartition(np.array(fitness), -m)[-m:]]
@staticmethod
def random(m, contestants, fitness):
# TODO: Update for idx return. (BROKEN)
# used = None
# assert fitness is not used
return list(np.random.choice(contestants, m))
class CrossoverOperator:
@staticmethod
def random_polygamous(parents, n_children):
gene_lst = []
child_ls = []
for gene_idx in range(len(parents[0].split(' '))):
gene_col = np.random.choice(np.array([parent.split(' ') for parent in parents])[:, gene_idx], n_children)
gene_lst.append(gene_col)
gene_arr = np.array(gene_lst).T
for child_idx in range(len(gene_arr[:, 0])):
child_new = ' '.join(list(gene_arr[child_idx, :]))
child_ls.append(child_new)
return child_ls
@staticmethod
def supremecy_polygamous(parents, n_children, fitness):
raise NotImplemented("Supremacy not implemented yet")
def fitness_function_himmelblau(x, y): # execute himmelblau function
f = (x ** 2. + y - 11.) ** 2. + (x + y ** 2. - 7.) ** 2.
return 100 - f
|
[
"numpy.random.choice",
"numpy.array",
"numpy.mean",
"numpy.max"
] |
[((674, 700), 'numpy.max', 'np.max', (['population_fitness'], {}), '(population_fitness)\n', (680, 700), True, 'import numpy as np\n'), ((3339, 3371), 'numpy.random.choice', 'np.random.choice', (['contestants', 'm'], {}), '(contestants, m)\n', (3355, 3371), True, 'import numpy as np\n'), ((3744, 3762), 'numpy.array', 'np.array', (['gene_lst'], {}), '(gene_lst)\n', (3752, 3762), True, 'import numpy as np\n'), ((3072, 3093), 'numpy.array', 'np.array', (['contestants'], {}), '(contestants)\n', (3080, 3093), True, 'import numpy as np\n'), ((259, 286), 'numpy.mean', 'np.mean', (['population_fitness'], {}), '(population_fitness)\n', (266, 286), True, 'import numpy as np\n'), ((3043, 3060), 'numpy.array', 'np.array', (['fitness'], {}), '(fitness)\n', (3051, 3060), True, 'import numpy as np\n'), ((199, 225), 'numpy.max', 'np.max', (['population_fitness'], {}), '(population_fitness)\n', (205, 225), True, 'import numpy as np\n'), ((228, 255), 'numpy.mean', 'np.mean', (['population_fitness'], {}), '(population_fitness)\n', (235, 255), True, 'import numpy as np\n'), ((3123, 3140), 'numpy.array', 'np.array', (['fitness'], {}), '(fitness)\n', (3131, 3140), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# <NAME> (<EMAIL>)
import py_trees as pt
import py_trees_ros as ptr
import time
import numpy as np
import rospy
import tf
import actionlib
# from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from smarc_msgs.msg import GotoWaypointAction, GotoWaypointGoal
import actionlib_msgs.msg as actionlib_msgs
from geometry_msgs.msg import PointStamped, PoseArray, PoseStamped
from nav_msgs.msg import Path
from std_msgs.msg import Float64, Header, Bool, Empty
from visualization_msgs.msg import MarkerArray
from sensor_msgs.msg import NavSatFix
from std_srvs.srv import SetBool
from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver
import bb_enums
import imc_enums
import common_globals
from mission_plan import MissionPlan
from mission_log import MissionLog
class A_PublishFinalize(pt.behaviour.Behaviour):
def __init__(self, topic):
super(A_PublishFinalize, self).__init__(name="A_PublishFinalize")
self.bb = pt.blackboard.Blackboard()
self.topic = topic
self.last_published_time = None
self.message_object = Empty()
def setup(self, timeout):
self.pub = rospy.Publisher(self.topic, Empty, queue_size=1)
return True
def update(self):
if self.last_published_time is not None:
time_since = time.time() - self.last_published_time
self.feedback_message = "Last pub'd:{:.2f}s ago".format(time_since)
else:
self.feedback_message = "Never published!"
finalized = self.bb.get(bb_enums.MISSION_FINALIZED)
if not finalized:
try:
self.pub.publish(self.message_object)
self.last_published_time = time.time()
self.feedback_message = "Just published"
self.bb.set(bb_enums.MISSION_FINALIZED, True)
return pt.Status.SUCCESS
except:
msg = "Couldn't publish"
rospy.logwarn_throttle(1, msg)
self.feedback_message = msg
return pt.Status.FAILURE
return pt.Status.SUCCESS
class A_ManualMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_ManualMissionLog, self).__init__(name="A_ManualMissionLog")
self.bb = pt.blackboard.Blackboard()
self.started_logs = 0
self.num_saved_logs = 0
def start_new_log(self):
save_location = self.bb.get(bb_enums.MISSION_LOG_FOLDER)
log = MissionLog(mission_plan = None,
save_location = save_location)
self.bb.set(bb_enums.MANUAL_MISSION_LOG_OBJ, log)
rospy.loginfo("Started new manual mission log")
self.started_logs += 1
return log
def update(self):
enabled = self.bb.get(bb_enums.ENABLE_MANUAL_MISSION_LOG)
log = self.bb.get(bb_enums.MANUAL_MISSION_LOG_OBJ)
if not enabled:
# if we have a log, we save it now
# and set it to None, so next time we are
# disabled we dont do anything
if log is not None:
log.save()
self.bb.set(bb_enums.MANUAL_MISSION_LOG_OBJ, None)
self.num_saved_logs += 1
self.feedback_message = "Disabled, {} logs saved".format(self.num_saved_logs)
return pt.Status.SUCCESS
if log is None:
log = self.start_new_log()
# first add the auv pose
world_trans = self.bb.get(bb_enums.WORLD_TRANS)
x,y = world_trans[0], world_trans[1]
z = -self.bb.get(bb_enums.DEPTH)
log.navigation_trace.append((x,y,z))
# then add the raw gps
gps = self.bb.get(bb_enums.RAW_GPS)
if gps is None or gps.status.status == -1: # no fix
gps_utm_point = None
else:
# translate the latlon to utm point using the same service as the mission plan
gps_utm_x, gps_utm_y = mplan.latlon_to_utm(gps.latitude, gps.lonitude)
if gps_utm_x is None:
gps_utm_point = None
log.raw_gps_trace.append(gps_utm_point)
# then add the tree tip and its status
tree_tip = self.bb.get(bb_enums.TREE_TIP_NAME)
tip_status = self.bb.get(bb_enums.TREE_TIP_STATUS)
log.tree_tip_trace.append((tree_tip, tip_status))
self.feedback_message = "Log len:{} of log#{}".format(len(log.navigation_trace), self.started_logs)
return pt.Status.SUCCESS
class A_SaveMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_SaveMissionLog, self).__init__(name="A_SaveMissionLog")
self.bb = pt.blackboard.Blackboard()
self.num_saved_logs = 0
def update(self):
log = self.bb.get(bb_enums.MISSION_LOG_OBJ)
if log is not None:
log.save()
self.num_saved_logs += 1
self.bb.set(bb_enums.MISSION_LOG_OBJ, None)
self.feedback_message = "Saved log #{}!".format(self.num_saved_logs)
else:
self.feedback_message = "#saved logs:{}".format(self.num_saved_logs)
return pt.Status.SUCCESS
class A_UpdateMissionLog(pt.behaviour.Behaviour):
def __init__(self):
super(A_UpdateMissionLog, self).__init__(name="A_UpdateMissionLog")
self.bb = pt.blackboard.Blackboard()
self.started_logs = 0
def start_new_log(self, mplan):
save_location = self.bb.get(bb_enums.MISSION_LOG_FOLDER)
log = MissionLog(mission_plan = mplan,
save_location = save_location)
self.bb.set(bb_enums.MISSION_LOG_OBJ, log)
rospy.loginfo("Started new mission log")
self.started_logs += 1
return log
def update(self):
# only update if there is an unfinalized mission that has been started
mplan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mplan is None:
rospy.loginfo("Mission plan is None, can't make a log of this?")
self.feedback_message = "No mission plan!"
return pt.Status.FAILURE
log = self.bb.get(bb_enums.MISSION_LOG_OBJ)
if log is None:
log = self.start_new_log(mplan)
# check if the mission has changed in the meantime
# this can happen when the user starts a mission, stops it,
# and then starts a different one
# we dont wanna log the incomplete one
# did it change since we last got called?
if log.creation_time != mplan.creation_time:
# it changed!
# re-start a log
log = self.start_new_log(mplan)
# now we got a valid mission plan
# first add the auv pose
world_trans = self.bb.get(bb_enums.WORLD_TRANS)
x,y = world_trans[0], world_trans[1]
z = -self.bb.get(bb_enums.DEPTH)
log.navigation_trace.append((x,y,z))
# then add the raw gps
gps = self.bb.get(bb_enums.RAW_GPS)
if gps is None or gps.status.status == -1: # no fix
gps_utm_point = None
else:
# translate the latlon to utm point using the same service as the mission plan
gps_utm_x, gps_utm_y = mplan.latlon_to_utm(gps.latitude, gps.lonitude)
if gps_utm_x is None:
gps_utm_point = None
log.raw_gps_trace.append(gps_utm_point)
# then add the tree tip and its status
tree_tip = self.bb.get(bb_enums.TREE_TIP_NAME)
tip_status = self.bb.get(bb_enums.TREE_TIP_STATUS)
log.tree_tip_trace.append((tree_tip, tip_status))
self.feedback_message = "Log len:{} of log#{}".format(len(log.navigation_trace), self.started_logs)
return pt.Status.SUCCESS
class A_SetDVLRunning(pt.behaviour.Behaviour):
def __init__(self, dvl_on_off_service_name, running, cooldown):
super(A_SetDVLRunning, self).__init__(name="A_SetDVLRunning")
self.switcher_service = rospy.ServiceProxy(dvl_on_off_service_name,
SetBool)
self.bb = pt.blackboard.Blackboard()
self.sb = SetBool()
self.sb.data = running
self.running = running
self.last_toggle = 0
self.cooldown = cooldown
self.service_name = dvl_on_off_service_name
def update(self):
# try not to call the service every tick...
dvl_is_running = self.bb.get(bb_enums.DVL_IS_RUNNING)
if dvl_is_running is not None:
if dvl_is_running == self.sb.data:
rospy.loginfo_throttle_identical(20, "DVL is already running:"+str(self.sb.data))
return pt.Status.SUCCESS
# check if enough time has passed since last call
t = time.time()
if t - self.last_toggle < self.cooldown:
# nope, return running while we wait
rospy.loginfo_throttle_identical(5, "Waiting on DVL toggle cooldown")
return pt.Status.RUNNING
try:
ret = self.switcher_service(self.running)
except rospy.service.ServiceException:
rospy.logwarn_throttle_identical(60, "DVL Start/stop service not found! Succeeding by default namespace:{}".format(self.service_name))
return pt.Status.SUCCESS
if ret.success:
rospy.loginfo_throttle_identical(5, "DVL TOGGLED:"+str(self.sb.data))
self.last_toggle = time.time()
self.bb.set(bb_enums.DVL_IS_RUNNING, self.sb.data)
return pt.Status.SUCCESS
rospy.logwarn_throttle_identical(5, "DVL COULD NOT BE TOGGLED:{}, ret:{}".format(self.sb.data, ret))
return pt.Status.FAILURE
class A_EmergencySurface(ptr.actions.ActionClient):
def __init__(self, emergency_action_namespace):
"""
What to do when an emergency happens. This should be a very simple
action that is super unlikely to fail, ever. It should also 'just work'
without a goal.
Like surfacing.
"""
self.bb = pt.blackboard.Blackboard()
self.action_goal_handle = None
ptr.actions.ActionClient.__init__(
self,
name="A_EmergencySurface",
action_spec=GotoWaypointAction,
action_goal=None,
action_namespace= emergency_action_namespace,
override_feedback_message_on_running="EMERGENCY SURFACING"
)
self.action_server_ok = False
def setup(self, timeout):
"""
Overwriting the normal ptr action setup to stop it from failiing the setup step
and instead handling this failure in the tree.
"""
self.logger.debug("%s.setup()" % self.__class__.__name__)
self.action_client = actionlib.SimpleActionClient(
self.action_namespace,
self.action_spec
)
if not self.action_client.wait_for_server(rospy.Duration(timeout)):
self.logger.error("{0}.setup() could not connect to the action server at '{1}'".format(self.__class__.__name__, self.action_namespace))
self.action_client = None
self.action_server_ok = False
else:
self.action_server_ok = True
return True
def initialise(self):
if not self.action_server_ok:
rospy.logwarn_throttle_identical(5, "No Action Server found for emergency action, will just block the tree!")
return
self.feedback_message = "EMERGENCY SURFACING"
# construct the message
self.action_goal = GotoWaypointGoal()
self.sent_goal = False
def update(self):
if not self.action_server_ok:
self.feedback_message = "Action Server for emergency action can not be used!"
rospy.logerr_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if your action client is not valid
if not self.action_client:
self.feedback_message = "ActionClient for emergency action is invalid!"
rospy.logwarn_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if the action_goal is invalid
if not self.action_goal:
self.feedback_message = "No action_goal!"
rospy.logwarn(self.feedback_message)
return pt.Status.FAILURE
# if goal hasn't been sent yet
if not self.sent_goal:
self.action_goal_handle = self.action_client.send_goal(self.action_goal, feedback_cb=self.feedback_cb)
self.sent_goal = True
rospy.loginfo("Sent goal to action server:"+str(self.action_goal))
self.feedback_message = "Emergency goal sent"
return pt.Status.RUNNING
# if the goal was aborted or preempted
if self.action_client.get_state() in [actionlib_msgs.GoalStatus.ABORTED,
actionlib_msgs.GoalStatus.PREEMPTED]:
self.feedback_message = "Aborted emergency"
rospy.loginfo(self.feedback_message)
return pt.Status.FAILURE
result = self.action_client.get_result()
# if the goal was accomplished
if result:
self.feedback_message = "Completed emergency"
rospy.loginfo(self.feedback_message)
return pt.Status.SUCCESS
# if we're still trying to accomplish the goal
return pt.Status.RUNNING
def feedback_cb(self, msg):
pass
class A_SetNextPlanAction(pt.behaviour.Behaviour):
def __init__(self, do_not_visit=False):
"""
Sets the current plan action to the next one
SUCCESS if it can set it to something that is not None
FAILURE otherwise
if do_not_visit=True, then this action will only get the current wp
and set it and wont actually advance the plan forward.
This is useful for when you want to set the current wp right after
you created a plan.
"""
self.bb = pt.blackboard.Blackboard()
super(A_SetNextPlanAction, self).__init__('A_SetNextPlanAction')
self.do_not_visit = do_not_visit
def update(self):
mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission_plan is None:
rospy.logwarn_throttle(5, "Mission plan was None!")
return pt.Status.FAILURE
if not self.do_not_visit:
mission_plan.visit_wp()
next_action = mission_plan.get_current_wp()
if next_action is None:
self.feedback_message = "Next action was None"
rospy.logwarn_throttle(5, "Mission is complete:{}".format(mission_plan.is_complete()))
return pt.Status.FAILURE
rospy.loginfo_throttle_identical(5, "Set CURRENT_PLAN_ACTION {} to: {}".format(self.do_not_visit, str(next_action)))
self.bb.set(bb_enums.CURRENT_PLAN_ACTION, next_action)
return pt.Status.SUCCESS
class A_GotoWaypoint(ptr.actions.ActionClient):
def __init__(self,
action_namespace,
goal_tf_frame = 'utm',
node_name = "A_GotoWaypoint"):
"""
Runs an action server that will move the robot to the given waypoint
"""
self.bb = pt.blackboard.Blackboard()
self.node_name = node_name
list_of_maneuvers = self.bb.get(bb_enums.MANEUVER_ACTIONS)
if list_of_maneuvers is None:
list_of_maneuvers = [self.node_name]
else:
list_of_maneuvers.append(self.node_name)
self.bb.set(bb_enums.MANEUVER_ACTIONS, list_of_maneuvers)
self.action_goal_handle = None
# become action client
ptr.actions.ActionClient.__init__(
self,
name = self.node_name,
action_spec = GotoWaypointAction,
action_goal = None,
action_namespace = action_namespace,
override_feedback_message_on_running = "Moving to waypoint"
)
self.action_server_ok = False
self.goal_tf_frame = goal_tf_frame
def setup(self, timeout):
"""
Overwriting the normal ptr action setup to stop it from failiing the setup step
and instead handling this failure in the tree.
"""
self.logger.debug("%s.setup()" % self.__class__.__name__)
self.action_client = actionlib.SimpleActionClient(
self.action_namespace,
self.action_spec
)
if not self.action_client.wait_for_server(rospy.Duration(timeout)):
self.logger.error("{0}.setup() could not connect to the action server at '{1}'".format(self.__class__.__name__, self.action_namespace))
self.action_client = None
else:
self.action_server_ok = True
return True
def initialise(self):
if not self.action_server_ok:
rospy.logwarn_throttle(5, "No action server found for A_GotoWaypoint!")
return
mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission_plan is None:
rospy.logwarn("No mission plan found!")
return
wp = mission_plan.get_current_wp()
if wp is None:
rospy.loginfo("No wp found to execute! Does the plan have any waypoints that we understand?")
return
if wp.tf_frame != self.goal_tf_frame:
rospy.logerr_throttle(5, 'The frame of the waypoint({0}) does not match the expected frame({1}) of the action client!'.format(frame, self.goal_tf_frame))
return
if wp.maneuver_id != imc_enums.MANEUVER_GOTO:
rospy.loginfo("THIS IS A GOTO MANEUVER, WE ARE USING IT FOR SOMETHING ELSE")
# get the goal tolerance as a dynamic variable from the bb
goal_tolerance = self.bb.get(bb_enums.WAYPOINT_TOLERANCE)
# construct the message
goal = GotoWaypointGoal()
goal.waypoint_pose.pose.position.x = wp.x
goal.waypoint_pose.pose.position.y = wp.y
goal.goal_tolerance = goal_tolerance
# 0=None, 1=Depth, 2=Altitude in the action
# thankfully these are the same in IMC and in the Action
# but Action doesnt have 'height'
if wp.z_unit == imc_enums.Z_HEIGHT:
wp.z_unit = imc_enums.Z_NONE
goal.z_control_mode = wp.z_unit
goal.travel_depth = wp.z
# 0=None, 1=RPM, 2=speed in the action
# 0=speed, 1=rpm, 2=percentage in IMC
if wp.speed_unit == imc_enums.SPEED_UNIT_RPM:
goal.speed_control_mode = GotoWaypointGoal.SPEED_CONTROL_RPM
goal.travel_rpm = wp.speed
elif wp.speed_unit == imc_enums.SPEED_UNIT_MPS:
goal.speed_control_mode = GotoWaypointGoal.SPEED_CONTROL_SPEED
goal.travel_speed = wp.speed
else:
goal.speed_control_mode = GotoWaypointGoal.SPEED_CONTROL_NONE
rospy.logwarn_throttle(1, "Speed control of the waypoint action is NONE!")
self.action_goal = goal
rospy.loginfo(">>> Goto waypoint action goal initialized:"+str(goal))
# ensure that we still need to send the goal
self.sent_goal = False
def update(self):
"""
Check only to see whether the underlying action server has
succeeded, is running, or has cancelled/aborted for some reason and
map these to the usual behaviour return states.
"""
if not self.action_server_ok:
self.feedback_message = "Action Server for gotowp action can not be used!"
rospy.logerr_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if your action client is not valid
if not self.action_client:
self.feedback_message = "ActionClient is invalid! Client:"+str(self.action_client)
rospy.logerr(self.feedback_message)
return pt.Status.FAILURE
# if the action_goal is invalid
if not self.action_goal:
self.feedback_message = "No action_goal!"
rospy.logwarn(self.feedback_message)
return pt.Status.FAILURE
# if goal hasn't been sent yet
if not self.sent_goal:
self.action_goal_handle = self.action_client.send_goal(self.action_goal, feedback_cb=self.feedback_cb)
self.sent_goal = True
rospy.loginfo("Sent goal to action server:"+str(self.action_goal))
self.feedback_message = "Goal sent"
return pt.Status.RUNNING
# if the goal was aborted or preempted
if self.action_client.get_state() in [actionlib_msgs.GoalStatus.ABORTED,
actionlib_msgs.GoalStatus.PREEMPTED]:
self.feedback_message = "Aborted goal"
rospy.loginfo(self.feedback_message)
return pt.Status.FAILURE
result = self.action_client.get_result()
# if the goal was accomplished
if result is not None and result.reached_waypoint:
self.feedback_message = "Completed goal"
rospy.loginfo(self.feedback_message)
return pt.Status.SUCCESS
return pt.Status.RUNNING
def feedback_cb(self, msg):
fb = str(msg.ETA)
self.feedback_message = "ETA:"+fb
rospy.loginfo_throttle(5, fb)
class A_UpdateTF(pt.behaviour.Behaviour):
def __init__(self, utm_link, base_link):
"""
reads the current translation and orientation from the TF tree
and puts that into the BB
utm_link and base_link are tf link names where utm_link is essentially the world coordinates.
check the neptus-related actions too for more info on utm_link
"""
super(A_UpdateTF, self).__init__("A_UpdateTF")
self.bb = pt.blackboard.Blackboard()
self.utm_link = utm_link
self.base_link = base_link
self.listener = tf.TransformListener()
self.tf_ok = False
self.last_read_time = None
def setup(self, timeout):
try:
rospy.loginfo_throttle(3, "Waiting for transform from {} to {}...".format(self.utm_link, self.base_link))
self.listener.waitForTransform(self.utm_link, self.base_link, rospy.Time(), rospy.Duration(timeout))
rospy.loginfo_throttle(3, "...Got it")
self.tf_ok = True
except:
rospy.logerr_throttle(5, "Could not find from "+self.utm_link+" to "+self.base_link + "... Nothing except safety will be run")
return True
def update(self):
if self.last_read_time is not None:
time_since = time.time() - self.last_read_time
self.feedback_message = "Last read:{:.2f}s ago".format(time_since)
else:
self.feedback_message = "No msg received ever"
try:
(world_trans, world_rot) = self.listener.lookupTransform(self.utm_link,
self.base_link,
rospy.Time(0))
self.last_read_time = time.time()
except (tf.LookupException, tf.ConnectivityException):
rospy.logerr_throttle_identical(5, "Could not get transform between {} and {}".format(self.utm_link, self.base_link))
return pt.Status.FAILURE
except:
rospy.logerr_throttle_identical(5, "Could not do tf lookup for some other reason")
return pt.Status.FAILURE
self.bb.set(bb_enums.WORLD_TRANS, world_trans)
self.bb.set(bb_enums.WORLD_ROT, world_rot)
# also create this pointstamped object so that we can transform this
# easily to w/e other frame is needed later
ps = PointStamped()
ps.header.frame_id = self.utm_link
ps.header.stamp = rospy.Time(0)
ps.point.x = world_trans[0]
ps.point.y = world_trans[1]
ps.point.z = world_trans[2]
self.bb.set(bb_enums.LOCATION_POINT_STAMPED, ps)
# the Z component is UP, so invert to get "depth"
self.bb.set(bb_enums.DEPTH, -world_trans[2])
return pt.Status.SUCCESS
class A_UpdateNeptusPlanControl(pt.behaviour.Behaviour):
def __init__(self, plan_control_topic):
super(A_UpdateNeptusPlanControl, self).__init__("A_UpdateNeptusPlanControl")
self.bb = pt.blackboard.Blackboard()
self.plan_control_msg = None
self.plan_control_topic = plan_control_topic
self.sub = None
def setup(self, timeout):
self.sub = rospy.Subscriber(self.plan_control_topic, PlanControl, self.plancontrol_cb)
return True
def plancontrol_cb(self, plan_control_msg):
# rospy.loginfo("plancontrol_cb {}".format(plan_control_msg))
self.plan_control_msg = plan_control_msg
def update(self):
plan_control_msg = self.plan_control_msg
if plan_control_msg is None:
# not receiving anything is ok.
return pt.Status.SUCCESS
# check if this message is a 'go' or 'no go' message
# imc/plan_control(569):
# int type:[0,1,2,3] req,suc,fail,in prog
# int op:[0,1,2,3] start, stop, load, get
# int request_id
# string plan_id
# int flags
# string info
# the start button in neptus sends:
# type:0 op:0 plan_id:"string" flags:1
# stop button sends:
# type:0 op:1 plan_id:'' flags:1
# teleop button sends:
# type:0 op:0 plan_id:"teleoperation-mode" flags:0
typee = plan_control_msg.type
op = plan_control_msg.op
plan_id = plan_control_msg.plan_id
flags = plan_control_msg.flags
# somehow this happens...
if plan_id is None:
plan_id=''
# separate well-defined ifs for possible future shenanigans.
if typee==0 and op==0 and plan_id!='' and flags==1:
# start button
# check if the start was given for our current plan
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
self.bb.set(bb_enums.PLAN_IS_GO, True)
self.bb.set(bb_enums.ENABLE_AUTONOMY, False)
if current_mission_plan is not None and plan_id == current_mission_plan.plan_id:
rospy.loginfo("Started plan:{}".format(plan_id))
else:
if current_mission_plan is None:
rospy.logwarn("Start given for plan:{} but we don't have a plan!".format(plan_id))
else:
rospy.logwarn("Start given for plan:{} our plan:{}".format(plan_id, current_mission_plan.plan_id))
if typee==0 and op==1 and plan_id=='' and flags==1:
# stop button
self.bb.set(bb_enums.PLAN_IS_GO, False)
self.bb.set(bb_enums.ENABLE_AUTONOMY, False)
# this string is hardcoded in Neptus, so we hardcode it here too!
if typee==0 and op==0 and plan_id=='teleoperation-mode' and flags==0:
# teleop button
self.bb.set(bb_enums.ENABLE_AUTONOMY, True)
rospy.logwarn_throttle_identical(10, "AUTONOMOUS MODE")
# reset it until next message
self.plan_control_msg = None
return pt.Status.SUCCESS
class A_UpdateNeptusEstimatedState(pt.behaviour.Behaviour):
def __init__(self,
estimated_state_topic,
gps_fix_topic,
gps_nav_data_topic):
super(A_UpdateNeptusEstimatedState, self).__init__("A_UpdateNeptusEstimatedState")
self.bb = pt.blackboard.Blackboard()
self.estimated_state_pub = None
self.estimated_state_topic = estimated_state_topic
self.e_state = EstimatedState()
self.gps_fix_pub = None
self.gps_fix_topic = gps_fix_topic
self.gps_nav_data_pub = None
self.gps_nav_data_topic = gps_nav_data_topic
self.gps_fix = NavSatFix()
def setup(self, timeout):
self.estimated_state_pub = rospy.Publisher(self.estimated_state_topic, EstimatedState, queue_size=1)
self.gps_fix_pub = rospy.Publisher(self.gps_fix_topic, NavSatFix, queue_size=1)
self.gps_nav_data_pub = rospy.Publisher(self.gps_nav_data_topic, NavSatFix, queue_size=1)
return True
def update(self):
lat = self.bb.get(bb_enums.CURRENT_LATITUDE)
lon = self.bb.get(bb_enums.CURRENT_LONGITUDE)
depth = self.bb.get(bb_enums.DEPTH)
world_rot = self.bb.get(bb_enums.WORLD_ROT)
if depth is None:
reason = "depth was None, using 0"
self.feedback_message = reason
depth = 0
if lat is None or lon is None or world_rot is None:
rospy.logwarn_throttle_identical(10, "Could not update neptus estimated state because lat/lon/world_rot was None!")
return pt.Status.SUCCESS
# construct message for neptus
self.e_state.lat = np.radians(lat)
self.e_state.lon= np.radians(lon)
self.e_state.depth = depth
roll, pitch, yaw = tf.transformations.euler_from_quaternion(world_rot)
self.e_state.psi = np.pi/2. - yaw
# send the message to neptus
self.estimated_state_pub.publish(self.e_state)
# same thing with gps fix
# the bridge only looks at lat lon height=altitude
self.gps_fix.latitude = lat
self.gps_fix.longitude = lon
self.gps_fix.altitude = -depth
self.gps_fix.header.seq = int(time.time())
self.gps_fix_pub.publish(self.gps_fix)
self.gps_nav_data_pub.publish(self.gps_fix)
return pt.Status.SUCCESS
class A_UpdateNeptusPlanControlState(pt.behaviour.Behaviour):
def __init__(self, plan_control_state_topic):
super(A_UpdateNeptusPlanControlState, self).__init__("A_UpdateNeptusPlanControlState")
self.bb = pt.blackboard.Blackboard()
self.plan_control_state_pub = None
self.plan_control_state_topic = plan_control_state_topic
def setup(self, timeout):
self.plan_control_state_pub = rospy.Publisher(self.plan_control_state_topic, PlanControlState, queue_size=1)
return True
def update(self):
# construct current progress message for neptus
msg = PlanControlState()
tip_name = self.bb.get(bb_enums.TREE_TIP_NAME)
tip_status = self.bb.get(bb_enums.TREE_TIP_STATUS)
# this tip_status looks like: "Status.FAILURE"
# I just wanna get the first letter after dot.
msg.man_id = tip_name+'('+tip_status[7]+')'
mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission_plan is None:
msg.plan_id = 'No plan'
msg.plan_progress = 100.0
elif mission_plan.is_complete():
msg.plan_id = 'Mission complete'
msg.plan_progress = 100.0
else:
current_wp_index = mission_plan.current_wp_index
current_man_id = mission_plan.waypoint_man_ids[current_wp_index]
total = len(mission_plan.waypoints)
msg.plan_id = str(mission_plan.plan_id)
if self.bb.get(bb_enums.PLAN_IS_GO):
msg.man_id = current_man_id
plan_progress = (current_wp_index * 100.0) / total # percent float
msg.plan_progress = plan_progress
if tip_name in imc_enums.EXECUTING_ACTION_NAMES:
msg.state = imc_enums.STATE_EXECUTING
elif tip_name in imc_enums.BLOCKED_ACTION_NAMES:
msg.state = imc_enums.STATE_BLOCKED
msg.plan_id = 'SAFETY FALLBACK'
msg.man_id = 'EMERGENCY'
msg.plan_progress = 0.0
else:
msg.state = imc_enums.STATE_READY
if self.bb.get(bb_enums.ENABLE_AUTONOMY):
msg.plan_id += '(AUTONOMOUS)'
# send message to neptus
self.plan_control_state_pub.publish(msg)
return pt.Status.SUCCESS
class A_UpdateNeptusVehicleState(pt.behaviour.Behaviour):
def __init__(self, vehicle_state_topic):
super(A_UpdateNeptusVehicleState, self).__init__("A_UpdateNeptusVehicleState")
self.bb = pt.blackboard.Blackboard()
self.vehicle_state_pub = None
self.vehicle_state_topic = vehicle_state_topic
def setup(self, timeout):
self.vehicle_state_pub = rospy.Publisher(self.vehicle_state_topic, VehicleState, queue_size=1)
return True
def update(self):
"""
this is the message that makes SAM:DISCONNECTED better.
"""
vs = VehicleState()
tip_name = self.bb.get(bb_enums.TREE_TIP_NAME)
if tip_name in imc_enums.EXECUTING_ACTION_NAMES:
vs.op_mode = imc_enums.OP_MODE_MANEUVER
elif tip_name == 'A_EmergencySurface':
vs.op_mode = imc_enums.OP_MODE_ERROR
else:
vs.op_mode = imc_enums.OP_MODE_SERVICE
self.vehicle_state_pub.publish(vs)
return pt.Status.SUCCESS
class A_UpdateNeptusPlanDB(pt.behaviour.Behaviour):
def __init__(self,
plandb_topic,
utm_link,
local_link,
latlontoutm_service_name,
latlontoutm_service_name_alternative):
super(A_UpdateNeptusPlanDB, self).__init__("A_UpdateNeptusPlanDB")
self.bb = pt.blackboard.Blackboard()
# neptus sends lat/lon, which we convert to utm, which we then convert to local
self.utm_link = utm_link
self.local_link = local_link
self.latlontoutm_service_name = latlontoutm_service_name
self.latlontoutm_service_name_alternative = latlontoutm_service_name_alternative
# the message body is largely the same, so we can re-use most of it
self.plandb_msg = PlanDB()
self.plandb_msg.type = imc_enums.PLANDB_TYPE_SUCCESS
self.plandb_msg.op = imc_enums.PLANDB_OP_SET
self.plandb_pub = None
self.plandb_sub = None
self.latest_plandb_msg = None
self.plandb_topic = plandb_topic
def setup(self, timeout):
self.plandb_pub = rospy.Publisher(self.plandb_topic, PlanDB, queue_size=1)
self.plandb_sub = rospy.Subscriber(self.plandb_topic, PlanDB, callback=self.plandb_cb, queue_size=1)
return True
def plandb_cb(self, plandb_msg):
"""
as an answer to OUR answer of 'type=succes, op=set', neptus sends a 'type=request, op=get_info'.
"""
# rospy.loginfo("plandb_db {}".format(plandb_msg))
self.latest_plandb_msg = plandb_msg
def make_plandb_info(self):
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
plan_info = PlanDBInformation()
plan_info.plan_id = current_mission_plan.plan_id
plan_info.md5 = current_mission_plan.plandb_msg.plan_spec_md5
plan_info.change_time = current_mission_plan.creation_time/1000.0
return plan_info
def handle_request_get_info(self, plandb_msg):
# we need to respond to this with some info... but what?
rospy.loginfo_throttle_identical(30, "Got REQUEST GET_INFO planDB msg from Neptus")
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if current_mission_plan is None:
return
response = PlanDB()
response.plan_id = current_mission_plan.plan_id
response.type = imc_enums.PLANDB_TYPE_SUCCESS
response.op = imc_enums.PLANDB_OP_GET_INFO
response.plandb_information = self.make_plandb_info()
self.plandb_pub.publish(response)
rospy.loginfo_throttle_identical(30, "Answered GET_INFO for plan:"+str(response.plan_id))
def handle_request_get_state(self, plandb_msg):
rospy.loginfo_throttle_identical(30, "Got REQUEST GET_STATE planDB msg from Neptus")
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if current_mission_plan is None:
return
# https://github.com/LSTS/imcjava/blob/d95fddeab4c439e603cf5e30a32979ad7ace5fbc/src/java/pt/lsts/imc/adapter/PlanDbManager.java#L160
# See above for an example
# TODO it seems like we need to keep a planDB ourselves on this side, collect all the plans we
# received and answer this get_state with data from them all.
# lets try telling neptus that we just got one plan, maybe that'll be okay?
# seems alright, but after this message is sent, the plan goes red :/
response = PlanDB()
response.plan_id = current_mission_plan.plan_id
response.type = imc_enums.PLANDB_TYPE_SUCCESS
response.op = imc_enums.PLANDB_OP_GET_STATE
response.plandb_state = PlanDBState()
response.plandb_state.plan_count = 1
response.plandb_state.plans_info.append(self.make_plandb_info())
self.plandb_pub.publish(response)
rospy.loginfo_throttle_identical(30, "Answered GET_STATE for plan:\n"+str(response.plan_id))
def handle_set_plan(self, plandb_msg):
# there is a plan we can at least look at
mission_plan = MissionPlan(plan_frame = self.utm_link,
plandb_msg = plandb_msg,
latlontoutm_service_name = self.latlontoutm_service_name,
latlontoutm_service_name_alternative = self.latlontoutm_service_name_alternative,
coverage_swath = self.bb.get(bb_enums.SWATH),
vehicle_localization_error_growth = self.bb.get(bb_enums.LOCALIZATION_ERROR_GROWTH))
if mission_plan.no_service:
self.feedback_message = "MISSION PLAN HAS NO SERVICE"
rospy.logerr(self.feedback_message)
return
self.bb.set(bb_enums.MISSION_PLAN_OBJ, mission_plan)
self.bb.set(bb_enums.ENABLE_AUTONOMY, False)
self.bb.set(bb_enums.MISSION_FINALIZED, False)
self.bb.set(bb_enums.PLAN_IS_GO, False)
rospy.loginfo_throttle_identical(5, "Set the mission plan to:{} and un-finalized the mission.".format(mission_plan))
def handle_plandb_msg(self):
plandb_msg = self.latest_plandb_msg
if plandb_msg is None:
return
typee = plandb_msg.type
op = plandb_msg.op
# request get_info
if typee == imc_enums.PLANDB_TYPE_REQUEST and op == imc_enums.PLANDB_OP_GET_INFO:
self.handle_request_get_info(plandb_msg)
elif typee == imc_enums.PLANDB_TYPE_REQUEST and op == imc_enums.PLANDB_OP_GET_STATE:
self.handle_request_get_state(plandb_msg)
elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_SET:
self.feedback_message = "Got SUCCESS for plandb set"
elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_GET_INFO:
self.feedback_message = "Got SUCCESS for plandb get info"
elif typee == imc_enums.PLANDB_TYPE_SUCCESS and op == imc_enums.PLANDB_OP_GET_STATE:
self.feedback_message = "Got SUCCESS for plandb get state"
elif op == imc_enums.PLANDB_OP_SET:
self.handle_set_plan(plandb_msg)
else:
self.feedback_message = "Got some unhandled planDB message:\n"+str(plandb_msg)
def respond_set_success(self):
current_mission_plan = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if current_mission_plan is None:
self.feedback_message = "No mission plan obj!"
return
plan_id = current_mission_plan.plan_id
self.plandb_msg.plan_id = plan_id
self.plandb_pub.publish(self.plandb_msg)
self.feedback_message = "Answered set success for plan_id:"+str(plan_id)
def update(self):
# we just want to tell neptus we got the plan all the time
# this keeps the thingy green
self.respond_set_success()
self.handle_plandb_msg()
# reset
self.latest_plandb_msg = None
return pt.Status.SUCCESS
class A_UpdateMissonForPOI(pt.behaviour.Behaviour):
"""
creates a new diamond-shaped mission over a detected POI
and sets that as the current mission plan.
always returns SUCCESS
"""
def __init__(self, utm_link, poi_link, latlontoutm_service_name):
super(A_UpdateMissonForPOI, self).__init__(name="A_UpdateMissonForPOI")
self.bb = pt.blackboard.Blackboard()
self.utm_link = utm_link
self.poi_link = poi_link
self.tf_listener = tf.TransformListener()
self.latlontoutm_service_name = latlontoutm_service_name
self.poi_link_available = False
def setup(self, timeout):
try:
rospy.loginfo_throttle(3, "Waiting for transform from {} to {}...".format(self.poi_link, self.utm_link))
self.tf_listener.waitForTransform(self.poi_link, self.utm_link, rospy.Time(), rospy.Duration(timeout))
rospy.loginfo_throttle(3, "...Got it")
self.poi_link_available = True
except:
rospy.logerr_throttle(5, "Could not find tf from:"+self.poi_link+" to:"+self.utm_link+" disabling updates")
return True
def update(self):
#XXX UNTESTED STUFF HERE, RETURN FAILURE TO KEEP PPL
#XXX FROM USING THIS ACTION
return pt.Status.FAILURE
if not self.poi_link_available:
return pt.Status.FAILURE
poi = self.bb.get(bb_enums.POI_POINT_STAMPED)
if poi is None:
return pt.Status.SUCCESS
poi_local = self.tf_listener.transformPoint(self.utm_link, poi)
x = poi_local.point.x
y = poi_local.point.y
depth = poi.point.z
# construct the waypoints that we want to go to
inspection_depth = max(1, depth - 5)
radius = 10
# go east,west,north,south,center
# so we do bunch of fly-overs
waypoints = [
(x+radius, y, inspection_depth),
(x-radius, y, inspection_depth),
(x, y+radius, inspection_depth),
(x, y-radius, inspection_depth),
(x, y, 0)
]
waypoint_man_ids = ['east', 'west', 'north', 'south', 'surface_center']
# construct a planDB message to be given to the mission_plan
# we will not fill the plan_spec of this plandb message,
# and instead call a different constructor of MissionPlan
# to bypass the lat/lon stuff
pdb = PlanDB()
pdb.request_id = 42
pdb.plan_id = "POI"
# set it in the tree
mission_plan = MissionPlan(plan_frame = self.utm_link,
plandb_msg = pdb,
waypoints = waypoints,
waypoint_man_ids=waypoint_man_ids,
latlontoutm_service_name = self.latlontoutm_service_name)
self.bb.set(bb_enums.MISSION_PLAN_OBJ, mission_plan)
rospy.loginfo_throttle_identical(5, "Due to POI, set the mission plan to:"+str(mission_plan))
return pt.Status.SUCCESS
class A_VizPublishPlan(pt.behaviour.Behaviour):
"""
Publishes the current plans waypoints as a PoseArray
"""
def __init__(self, plan_viz_topic):
super(A_VizPublishPlan, self).__init__(name="A_VizPublishPlan")
self.bb = pt.blackboard.Blackboard()
self.pa_pub = None
self.plan_viz_topic = plan_viz_topic
def setup(self, timeout):
self.pa_pub = rospy.Publisher(self.plan_viz_topic, PoseArray, queue_size=1)
return True
def update(self):
mission = self.bb.get(bb_enums.MISSION_PLAN_OBJ)
if mission is not None:
pa = mission.get_pose_array(flip_z=True)
else:
pa = PoseArray()
self.pa_pub.publish(pa)
return pt.Status.SUCCESS
class A_FollowLeader(ptr.actions.ActionClient):
def __init__(self,
action_namespace,
leader_link):
"""
Runs an action server that will move the robot towards another tf link
"""
self.bb = pt.blackboard.Blackboard()
list_of_maneuvers = self.bb.get(bb_enums.MANEUVER_ACTIONS)
if list_of_maneuvers is None:
list_of_maneuvers = ["A_FollowLeader"]
else:
list_of_maneuvers.append("A_FollowLeader")
self.bb.set(bb_enums.MANEUVER_ACTIONS, list_of_maneuvers)
self.action_goal_handle = None
self.leader_link = leader_link
# become action client
ptr.actions.ActionClient.__init__(
self,
name="A_FollowLeader",
action_spec=GotoWaypointAction,
action_goal=None,
action_namespace = action_namespace,
override_feedback_message_on_running="Moving towards"+str(leader_link)
)
self.action_server_ok = False
def setup(self, timeout):
"""
Overwriting the normal ptr action setup to stop it from failiing the setup step
and instead handling this failure in the tree.
"""
self.logger.debug("%s.setup()" % self.__class__.__name__)
self.action_client = actionlib.SimpleActionClient(
self.action_namespace,
self.action_spec
)
if not self.action_client.wait_for_server(rospy.Duration(timeout)):
self.logger.error("{0}.setup() could not connect to the action server at '{1}'".format(self.__class__.__name__, self.action_namespace))
self.action_client = None
else:
self.action_server_ok = True
return True
def initialise(self):
# construct the message
self.action_goal = GotoWaypointGoal()
# leave 0,0,0 because we want to go to the frame's center
self.action_goal.target_pose.header.frame_id = self.leader_link
rospy.loginfo("Follow action goal initialized")
# ensure that we still need to send the goal
self.sent_goal = False
def update(self):
"""
Check only to see whether the underlying action server has
succeeded, is running, or has cancelled/aborted for some reason and
map these to the usual behaviour return states.
"""
if not self.action_server_ok:
self.feedback_message = "Action Server for follow leader action can not be used!"
rospy.logerr_throttle_identical(5,self.feedback_message)
return pt.Status.FAILURE
# if your action client is not valid
if not self.action_client:
self.feedback_message = "ActionClient is invalid! Client:"+str(self.action_client)
rospy.logerr(self.feedback_message)
return pt.Status.FAILURE
# if the action_goal is invalid
if not self.action_goal:
self.feedback_message = "No action_goal!"
rospy.logwarn(self.feedback_message)
return pt.Status.FAILURE
# if goal hasn't been sent yet
if not self.sent_goal:
self.action_goal_handle = self.action_client.send_goal(self.action_goal, feedback_cb=self.feedback_cb)
self.sent_goal = True
rospy.loginfo("Sent goal to action server:"+str(self.action_goal))
self.feedback_message = "Goal sent"
return pt.Status.RUNNING
# if the goal was aborted or preempted
if self.action_client.get_state() in [actionlib_msgs.GoalStatus.ABORTED,
actionlib_msgs.GoalStatus.PREEMPTED]:
self.feedback_message = "Aborted goal"
rospy.loginfo(self.feedback_message)
return pt.Status.FAILURE
result = self.action_client.get_result()
# if the goal was accomplished
if result:
self.feedback_message = "Completed goal"
rospy.loginfo(self.feedback_message)
return pt.Status.SUCCESS
return pt.Status.RUNNING
def feedback_cb(self, msg):
pass
class A_ReadBuoys(pt.behaviour.Behaviour):
'''
This action reads the uncertain positions
(mean and covariance) of buoys from the rostopic.
'''
def __init__(
self,
topic_name,
buoy_link,
utm_link,
latlon_utm_serv,
):
# rostopic name and type (e.g. marker array)
self.topic_name = topic_name
# frame IDs for TF
self.buoy_link = buoy_link
self.utm_link = utm_link
# lat/lon to utm service
self.latlon_utm_serv = latlon_utm_serv
# blackboard for info
self.bb = pt.blackboard.Blackboard()
# become a behaviour
pt.behaviour.Behaviour.__init__(
self,
name="A_ReadBuoys"
)
# for coordinate frame transformations
self.tf_listener = tf.TransformListener()
def setup(self, timeout):
# wait for TF transformation
try:
rospy.loginfo('Waiting for transform from {} to {}.'.format(
self.buoy_link,
self.utm_link
))
self.tf_listener.waitForTransform(
self.buoy_link,
self.utm_link,
rospy.Time(),
rospy.Duration(timeout)
)
except:
rospy.loginfo('Transform from {} to {} not found.'.format(
self.buoy_link,
self.utm_link
))
# subscribe to buoy positions
self.sub = rospy.Subscriber(
self.topic_name,
MarkerArray,
callback=self.cb,
queue_size=10
)
# self.bb.set(bb_enums.BUOYS, None)
self.buoys = None
return True
def cb(self, msg):
'''
This will read the uncertain buoy positions
from the SLAM backend and sensors.
But, for now, it just read the simulator buoys.
The buoys here are assumed to be in the map frame.
'''
# space for bouy positions
# rospy.loginfo('hello')
self.buoys = list()
# loop through visualization markers
for marker in msg.markers:
# convert their pose to pose stamped
pose = PoseStamped(
header=marker.header,
pose=marker.pose
)
# # transform it from local to UTM frame
# pose = self.tf_listener.transformPose(
# self.utm_link,
# pose
# )
# add it to the list
self.buoys.append([
pose.pose.position.x,
pose.pose.position.y,
pose.pose.position.z
])
# make it into a numpy array because why not
self.buoys = np.array(self.buoys)
self.buoys = self.buoys[np.argsort(self.buoys[:,0])]
self.buoys = self.buoys.reshape((-1, 3, 3))
self.buoys = np.sort(self.buoys, axis=1)
self.buoys = dict(
front=self.buoys[:,0,:],
left=self.buoys[0,:,:],
back=self.buoys[:,-1,:],
right=self.buoys[-1,:,:],
all=self.buoys
)
def update(self):
# put the buoy positions in the blackboard
self.bb.set(bb_enums.BUOYS, self.buoys)
return pt.Status.SUCCESS
|
[
"std_srvs.srv.SetBool",
"numpy.radians",
"rospy.logerr",
"rospy.logwarn",
"imc_ros_bridge.msg.EstimatedState",
"numpy.argsort",
"py_trees.behaviour.Behaviour.__init__",
"numpy.array",
"rospy.logwarn_throttle",
"tf.TransformListener",
"imc_ros_bridge.msg.PlanDBState",
"imc_ros_bridge.msg.PlanDB",
"sensor_msgs.msg.NavSatFix",
"numpy.sort",
"rospy.ServiceProxy",
"rospy.loginfo_throttle_identical",
"rospy.Subscriber",
"geometry_msgs.msg.PoseArray",
"actionlib.SimpleActionClient",
"rospy.loginfo_throttle",
"imc_ros_bridge.msg.PlanDBInformation",
"rospy.logerr_throttle",
"mission_plan.MissionPlan",
"rospy.Time",
"rospy.logerr_throttle_identical",
"mission_log.MissionLog",
"rospy.Duration",
"rospy.logwarn_throttle_identical",
"rospy.Publisher",
"time.time",
"rospy.loginfo",
"py_trees.blackboard.Blackboard",
"tf.transformations.euler_from_quaternion",
"imc_ros_bridge.msg.PlanControlState",
"smarc_msgs.msg.GotoWaypointGoal",
"py_trees_ros.actions.ActionClient.__init__",
"geometry_msgs.msg.PointStamped",
"geometry_msgs.msg.PoseStamped",
"imc_ros_bridge.msg.VehicleState",
"std_msgs.msg.Empty"
] |
[((1112, 1138), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (1136, 1138), True, 'import py_trees as pt\n'), ((1238, 1245), 'std_msgs.msg.Empty', 'Empty', ([], {}), '()\n', (1243, 1245), False, 'from std_msgs.msg import Float64, Header, Bool, Empty\n'), ((1297, 1345), 'rospy.Publisher', 'rospy.Publisher', (['self.topic', 'Empty'], {'queue_size': '(1)'}), '(self.topic, Empty, queue_size=1)\n', (1312, 1345), False, 'import rospy\n'), ((2425, 2451), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (2449, 2451), True, 'import py_trees as pt\n'), ((2623, 2681), 'mission_log.MissionLog', 'MissionLog', ([], {'mission_plan': 'None', 'save_location': 'save_location'}), '(mission_plan=None, save_location=save_location)\n', (2633, 2681), False, 'from mission_log import MissionLog\n'), ((2777, 2824), 'rospy.loginfo', 'rospy.loginfo', (['"""Started new manual mission log"""'], {}), "('Started new manual mission log')\n", (2790, 2824), False, 'import rospy\n'), ((4780, 4806), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (4804, 4806), True, 'import py_trees as pt\n'), ((5439, 5465), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (5463, 5465), True, 'import py_trees as pt\n'), ((5613, 5672), 'mission_log.MissionLog', 'MissionLog', ([], {'mission_plan': 'mplan', 'save_location': 'save_location'}), '(mission_plan=mplan, save_location=save_location)\n', (5623, 5672), False, 'from mission_log import MissionLog\n'), ((5761, 5801), 'rospy.loginfo', 'rospy.loginfo', (['"""Started new mission log"""'], {}), "('Started new mission log')\n", (5774, 5801), False, 'import rospy\n'), ((8074, 8126), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['dvl_on_off_service_name', 'SetBool'], {}), '(dvl_on_off_service_name, SetBool)\n', (8092, 8126), False, 'import rospy\n'), ((8196, 8222), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (8220, 8222), True, 'import py_trees as pt\n'), ((8242, 8251), 'std_srvs.srv.SetBool', 'SetBool', ([], {}), '()\n', (8249, 8251), False, 'from std_srvs.srv import SetBool\n'), ((8863, 8874), 'time.time', 'time.time', ([], {}), '()\n', (8872, 8874), False, 'import time\n'), ((10136, 10162), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (10160, 10162), True, 'import py_trees as pt\n'), ((10211, 10446), 'py_trees_ros.actions.ActionClient.__init__', 'ptr.actions.ActionClient.__init__', (['self'], {'name': '"""A_EmergencySurface"""', 'action_spec': 'GotoWaypointAction', 'action_goal': 'None', 'action_namespace': 'emergency_action_namespace', 'override_feedback_message_on_running': '"""EMERGENCY SURFACING"""'}), "(self, name='A_EmergencySurface',\n action_spec=GotoWaypointAction, action_goal=None, action_namespace=\n emergency_action_namespace, override_feedback_message_on_running=\n 'EMERGENCY SURFACING')\n", (10244, 10446), True, 'import py_trees_ros as ptr\n'), ((10848, 10917), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['self.action_namespace', 'self.action_spec'], {}), '(self.action_namespace, self.action_spec)\n', (10876, 10917), False, 'import actionlib\n'), ((11651, 11669), 'smarc_msgs.msg.GotoWaypointGoal', 'GotoWaypointGoal', ([], {}), '()\n', (11667, 11669), False, 'from smarc_msgs.msg import GotoWaypointAction, GotoWaypointGoal\n'), ((14109, 14135), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (14133, 14135), True, 'import py_trees as pt\n'), ((15359, 15385), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (15383, 15385), True, 'import py_trees as pt\n'), ((15789, 16002), 'py_trees_ros.actions.ActionClient.__init__', 'ptr.actions.ActionClient.__init__', (['self'], {'name': 'self.node_name', 'action_spec': 'GotoWaypointAction', 'action_goal': 'None', 'action_namespace': 'action_namespace', 'override_feedback_message_on_running': '"""Moving to waypoint"""'}), "(self, name=self.node_name, action_spec=\n GotoWaypointAction, action_goal=None, action_namespace=action_namespace,\n override_feedback_message_on_running='Moving to waypoint')\n", (15822, 16002), True, 'import py_trees_ros as ptr\n'), ((16463, 16532), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['self.action_namespace', 'self.action_spec'], {}), '(self.action_namespace, self.action_spec)\n', (16491, 16532), False, 'import actionlib\n'), ((17993, 18011), 'smarc_msgs.msg.GotoWaypointGoal', 'GotoWaypointGoal', ([], {}), '()\n', (18009, 18011), False, 'from smarc_msgs.msg import GotoWaypointAction, GotoWaypointGoal\n'), ((21400, 21429), 'rospy.loginfo_throttle', 'rospy.loginfo_throttle', (['(5)', 'fb'], {}), '(5, fb)\n', (21422, 21429), False, 'import rospy\n'), ((21896, 21922), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (21920, 21922), True, 'import py_trees as pt\n'), ((22015, 22037), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (22035, 22037), False, 'import tf\n'), ((23852, 23866), 'geometry_msgs.msg.PointStamped', 'PointStamped', ([], {}), '()\n', (23864, 23866), False, 'from geometry_msgs.msg import PointStamped, PoseArray, PoseStamped\n'), ((23936, 23949), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (23946, 23949), False, 'import rospy\n'), ((24470, 24496), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (24494, 24496), True, 'import py_trees as pt\n'), ((24662, 24737), 'rospy.Subscriber', 'rospy.Subscriber', (['self.plan_control_topic', 'PlanControl', 'self.plancontrol_cb'], {}), '(self.plan_control_topic, PlanControl, self.plancontrol_cb)\n', (24678, 24737), False, 'import rospy\n'), ((27684, 27710), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (27708, 27710), True, 'import py_trees as pt\n'), ((27833, 27849), 'imc_ros_bridge.msg.EstimatedState', 'EstimatedState', ([], {}), '()\n', (27847, 27849), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((28039, 28050), 'sensor_msgs.msg.NavSatFix', 'NavSatFix', ([], {}), '()\n', (28048, 28050), False, 'from sensor_msgs.msg import NavSatFix\n'), ((28117, 28190), 'rospy.Publisher', 'rospy.Publisher', (['self.estimated_state_topic', 'EstimatedState'], {'queue_size': '(1)'}), '(self.estimated_state_topic, EstimatedState, queue_size=1)\n', (28132, 28190), False, 'import rospy\n'), ((28218, 28278), 'rospy.Publisher', 'rospy.Publisher', (['self.gps_fix_topic', 'NavSatFix'], {'queue_size': '(1)'}), '(self.gps_fix_topic, NavSatFix, queue_size=1)\n', (28233, 28278), False, 'import rospy\n'), ((28311, 28376), 'rospy.Publisher', 'rospy.Publisher', (['self.gps_nav_data_topic', 'NavSatFix'], {'queue_size': '(1)'}), '(self.gps_nav_data_topic, NavSatFix, queue_size=1)\n', (28326, 28376), False, 'import rospy\n'), ((29056, 29071), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (29066, 29071), True, 'import numpy as np\n'), ((29098, 29113), 'numpy.radians', 'np.radians', (['lon'], {}), '(lon)\n', (29108, 29113), True, 'import numpy as np\n'), ((29176, 29227), 'tf.transformations.euler_from_quaternion', 'tf.transformations.euler_from_quaternion', (['world_rot'], {}), '(world_rot)\n', (29216, 29227), False, 'import tf\n'), ((29979, 30005), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (30003, 30005), True, 'import py_trees as pt\n'), ((30184, 30262), 'rospy.Publisher', 'rospy.Publisher', (['self.plan_control_state_topic', 'PlanControlState'], {'queue_size': '(1)'}), '(self.plan_control_state_topic, PlanControlState, queue_size=1)\n', (30199, 30262), False, 'import rospy\n'), ((30377, 30395), 'imc_ros_bridge.msg.PlanControlState', 'PlanControlState', ([], {}), '()\n', (30393, 30395), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((32248, 32274), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (32272, 32274), True, 'import py_trees as pt\n'), ((32432, 32501), 'rospy.Publisher', 'rospy.Publisher', (['self.vehicle_state_topic', 'VehicleState'], {'queue_size': '(1)'}), '(self.vehicle_state_topic, VehicleState, queue_size=1)\n', (32447, 32501), False, 'import rospy\n'), ((32647, 32661), 'imc_ros_bridge.msg.VehicleState', 'VehicleState', ([], {}), '()\n', (32659, 32661), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((33422, 33448), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (33446, 33448), True, 'import py_trees as pt\n'), ((33864, 33872), 'imc_ros_bridge.msg.PlanDB', 'PlanDB', ([], {}), '()\n', (33870, 33872), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((34188, 34244), 'rospy.Publisher', 'rospy.Publisher', (['self.plandb_topic', 'PlanDB'], {'queue_size': '(1)'}), '(self.plandb_topic, PlanDB, queue_size=1)\n', (34203, 34244), False, 'import rospy\n'), ((34271, 34357), 'rospy.Subscriber', 'rospy.Subscriber', (['self.plandb_topic', 'PlanDB'], {'callback': 'self.plandb_cb', 'queue_size': '(1)'}), '(self.plandb_topic, PlanDB, callback=self.plandb_cb,\n queue_size=1)\n', (34287, 34357), False, 'import rospy\n'), ((34771, 34790), 'imc_ros_bridge.msg.PlanDBInformation', 'PlanDBInformation', ([], {}), '()\n', (34788, 34790), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((35143, 35230), 'rospy.loginfo_throttle_identical', 'rospy.loginfo_throttle_identical', (['(30)', '"""Got REQUEST GET_INFO planDB msg from Neptus"""'], {}), "(30,\n 'Got REQUEST GET_INFO planDB msg from Neptus')\n", (35175, 35230), False, 'import rospy\n'), ((35378, 35386), 'imc_ros_bridge.msg.PlanDB', 'PlanDB', ([], {}), '()\n', (35384, 35386), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((35811, 35899), 'rospy.loginfo_throttle_identical', 'rospy.loginfo_throttle_identical', (['(30)', '"""Got REQUEST GET_STATE planDB msg from Neptus"""'], {}), "(30,\n 'Got REQUEST GET_STATE planDB msg from Neptus')\n", (35843, 35899), False, 'import rospy\n'), ((36557, 36565), 'imc_ros_bridge.msg.PlanDB', 'PlanDB', ([], {}), '()\n', (36563, 36565), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((36761, 36774), 'imc_ros_bridge.msg.PlanDBState', 'PlanDBState', ([], {}), '()\n', (36772, 36774), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((40471, 40497), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (40495, 40497), True, 'import py_trees as pt\n'), ((40591, 40613), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (40611, 40613), False, 'import tf\n'), ((42524, 42532), 'imc_ros_bridge.msg.PlanDB', 'PlanDB', ([], {}), '()\n', (42530, 42532), False, 'from imc_ros_bridge.msg import EstimatedState, VehicleState, PlanDB, PlanDBInformation, PlanDBState, PlanControlState, PlanControl, PlanSpecification, Maneuver\n'), ((42642, 42816), 'mission_plan.MissionPlan', 'MissionPlan', ([], {'plan_frame': 'self.utm_link', 'plandb_msg': 'pdb', 'waypoints': 'waypoints', 'waypoint_man_ids': 'waypoint_man_ids', 'latlontoutm_service_name': 'self.latlontoutm_service_name'}), '(plan_frame=self.utm_link, plandb_msg=pdb, waypoints=waypoints,\n waypoint_man_ids=waypoint_man_ids, latlontoutm_service_name=self.\n latlontoutm_service_name)\n', (42653, 42816), False, 'from mission_plan import MissionPlan\n'), ((43406, 43432), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (43430, 43432), True, 'import py_trees as pt\n'), ((43558, 43619), 'rospy.Publisher', 'rospy.Publisher', (['self.plan_viz_topic', 'PoseArray'], {'queue_size': '(1)'}), '(self.plan_viz_topic, PoseArray, queue_size=1)\n', (43573, 43619), False, 'import rospy\n'), ((44178, 44204), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (44202, 44204), True, 'import py_trees as pt\n'), ((45251, 45320), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['self.action_namespace', 'self.action_spec'], {}), '(self.action_namespace, self.action_spec)\n', (45279, 45320), False, 'import actionlib\n'), ((45780, 45798), 'smarc_msgs.msg.GotoWaypointGoal', 'GotoWaypointGoal', ([], {}), '()\n', (45796, 45798), False, 'from smarc_msgs.msg import GotoWaypointAction, GotoWaypointGoal\n'), ((45945, 45992), 'rospy.loginfo', 'rospy.loginfo', (['"""Follow action goal initialized"""'], {}), "('Follow action goal initialized')\n", (45958, 45992), False, 'import rospy\n'), ((48702, 48728), 'py_trees.blackboard.Blackboard', 'pt.blackboard.Blackboard', ([], {}), '()\n', (48726, 48728), True, 'import py_trees as pt\n'), ((48767, 48824), 'py_trees.behaviour.Behaviour.__init__', 'pt.behaviour.Behaviour.__init__', (['self'], {'name': '"""A_ReadBuoys"""'}), "(self, name='A_ReadBuoys')\n", (48798, 48824), True, 'import py_trees as pt\n'), ((48934, 48956), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (48954, 48956), False, 'import tf\n'), ((49605, 49684), 'rospy.Subscriber', 'rospy.Subscriber', (['self.topic_name', 'MarkerArray'], {'callback': 'self.cb', 'queue_size': '(10)'}), '(self.topic_name, MarkerArray, callback=self.cb, queue_size=10)\n', (49621, 49684), False, 'import rospy\n'), ((50885, 50905), 'numpy.array', 'np.array', (['self.buoys'], {}), '(self.buoys)\n', (50893, 50905), True, 'import numpy as np\n'), ((51040, 51067), 'numpy.sort', 'np.sort', (['self.buoys'], {'axis': '(1)'}), '(self.buoys, axis=1)\n', (51047, 51067), True, 'import numpy as np\n'), ((6048, 6112), 'rospy.loginfo', 'rospy.loginfo', (['"""Mission plan is None, can\'t make a log of this?"""'], {}), '("Mission plan is None, can\'t make a log of this?")\n', (6061, 6112), False, 'import rospy\n'), ((8985, 9054), 'rospy.loginfo_throttle_identical', 'rospy.loginfo_throttle_identical', (['(5)', '"""Waiting on DVL toggle cooldown"""'], {}), "(5, 'Waiting on DVL toggle cooldown')\n", (9017, 9054), False, 'import rospy\n'), ((9529, 9540), 'time.time', 'time.time', ([], {}), '()\n', (9538, 9540), False, 'import time\n'), ((11409, 11522), 'rospy.logwarn_throttle_identical', 'rospy.logwarn_throttle_identical', (['(5)', '"""No Action Server found for emergency action, will just block the tree!"""'], {}), "(5,\n 'No Action Server found for emergency action, will just block the tree!')\n", (11441, 11522), False, 'import rospy\n'), ((11864, 11921), 'rospy.logerr_throttle_identical', 'rospy.logerr_throttle_identical', (['(5)', 'self.feedback_message'], {}), '(5, self.feedback_message)\n', (11895, 11921), False, 'import rospy\n'), ((12135, 12193), 'rospy.logwarn_throttle_identical', 'rospy.logwarn_throttle_identical', (['(5)', 'self.feedback_message'], {}), '(5, self.feedback_message)\n', (12167, 12193), False, 'import rospy\n'), ((12370, 12406), 'rospy.logwarn', 'rospy.logwarn', (['self.feedback_message'], {}), '(self.feedback_message)\n', (12383, 12406), False, 'import rospy\n'), ((13120, 13156), 'rospy.loginfo', 'rospy.loginfo', (['self.feedback_message'], {}), '(self.feedback_message)\n', (13133, 13156), False, 'import rospy\n'), ((13373, 13409), 'rospy.loginfo', 'rospy.loginfo', (['self.feedback_message'], {}), '(self.feedback_message)\n', (13386, 13409), False, 'import rospy\n'), ((14380, 14431), 'rospy.logwarn_throttle', 'rospy.logwarn_throttle', (['(5)', '"""Mission plan was None!"""'], {}), "(5, 'Mission plan was None!')\n", (14402, 14431), False, 'import rospy\n'), ((16984, 17055), 'rospy.logwarn_throttle', 'rospy.logwarn_throttle', (['(5)', '"""No action server found for A_GotoWaypoint!"""'], {}), "(5, 'No action server found for A_GotoWaypoint!')\n", (17006, 17055), False, 'import rospy\n'), ((17183, 17222), 'rospy.logwarn', 'rospy.logwarn', (['"""No mission plan found!"""'], {}), "('No mission plan found!')\n", (17196, 17222), False, 'import rospy\n'), ((17322, 17425), 'rospy.loginfo', 'rospy.loginfo', (['"""No wp found to execute! Does the plan have any waypoints that we understand?"""'], {}), "(\n 'No wp found to execute! Does the plan have any waypoints that we understand?'\n )\n", (17335, 17425), False, 'import rospy\n'), ((17734, 17810), 'rospy.loginfo', 'rospy.loginfo', (['"""THIS IS A GOTO MANEUVER, WE ARE USING IT FOR SOMETHING ELSE"""'], {}), "('THIS IS A GOTO MANEUVER, WE ARE USING IT FOR SOMETHING ELSE')\n", (17747, 17810), False, 'import rospy\n'), ((19664, 19721), 'rospy.logerr_throttle_identical', 'rospy.logerr_throttle_identical', (['(5)', 'self.feedback_message'], {}), '(5, self.feedback_message)\n', (19695, 19721), False, 'import rospy\n'), ((19946, 19981), 'rospy.logerr', 'rospy.logerr', (['self.feedback_message'], {}), '(self.feedback_message)\n', (19958, 19981), False, 'import rospy\n'), ((20159, 20195), 'rospy.logwarn', 'rospy.logwarn', (['self.feedback_message'], {}), '(self.feedback_message)\n', (20172, 20195), False, 'import rospy\n'), ((20894, 20930), 'rospy.loginfo', 'rospy.loginfo', (['self.feedback_message'], {}), '(self.feedback_message)\n', (20907, 20930), False, 'import rospy\n'), ((21182, 21218), 'rospy.loginfo', 'rospy.loginfo', (['self.feedback_message'], {}), '(self.feedback_message)\n', (21195, 21218), False, 'import rospy\n'), ((22389, 22427), 'rospy.loginfo_throttle', 'rospy.loginfo_throttle', (['(3)', '"""...Got it"""'], {}), "(3, '...Got it')\n", (22411, 22427), False, 'import rospy\n'), ((23213, 23224), 'time.time', 'time.time', ([], {}), '()\n', (23222, 23224), False, 'import time\n'), ((27214, 27269), 'rospy.logwarn_throttle_identical', 'rospy.logwarn_throttle_identical', (['(10)', '"""AUTONOMOUS MODE"""'], {}), "(10, 'AUTONOMOUS MODE')\n", (27246, 27269), False, 'import rospy\n'), ((28836, 28960), 'rospy.logwarn_throttle_identical', 'rospy.logwarn_throttle_identical', (['(10)', '"""Could not update neptus estimated state because lat/lon/world_rot was None!"""'], {}), "(10,\n 'Could not update neptus estimated state because lat/lon/world_rot was None!'\n )\n", (28868, 28960), False, 'import rospy\n'), ((29606, 29617), 'time.time', 'time.time', ([], {}), '()\n', (29615, 29617), False, 'import time\n'), ((37780, 37815), 'rospy.logerr', 'rospy.logerr', (['self.feedback_message'], {}), '(self.feedback_message)\n', (37792, 37815), False, 'import rospy\n'), ((41009, 41047), 'rospy.loginfo_throttle', 'rospy.loginfo_throttle', (['(3)', '"""...Got it"""'], {}), "(3, '...Got it')\n", (41031, 41047), False, 'import rospy\n'), ((43837, 43848), 'geometry_msgs.msg.PoseArray', 'PoseArray', ([], {}), '()\n', (43846, 43848), False, 'from geometry_msgs.msg import PointStamped, PoseArray, PoseStamped\n'), ((46468, 46525), 'rospy.logerr_throttle_identical', 'rospy.logerr_throttle_identical', (['(5)', 'self.feedback_message'], {}), '(5, self.feedback_message)\n', (46499, 46525), False, 'import rospy\n'), ((46750, 46785), 'rospy.logerr', 'rospy.logerr', (['self.feedback_message'], {}), '(self.feedback_message)\n', (46762, 46785), False, 'import rospy\n'), ((46963, 46999), 'rospy.logwarn', 'rospy.logwarn', (['self.feedback_message'], {}), '(self.feedback_message)\n', (46976, 46999), False, 'import rospy\n'), ((47698, 47734), 'rospy.loginfo', 'rospy.loginfo', (['self.feedback_message'], {}), '(self.feedback_message)\n', (47711, 47734), False, 'import rospy\n'), ((47946, 47982), 'rospy.loginfo', 'rospy.loginfo', (['self.feedback_message'], {}), '(self.feedback_message)\n', (47959, 47982), False, 'import rospy\n'), ((50339, 50390), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {'header': 'marker.header', 'pose': 'marker.pose'}), '(header=marker.header, pose=marker.pose)\n', (50350, 50390), False, 'from geometry_msgs.msg import PointStamped, PoseArray, PoseStamped\n'), ((50938, 50966), 'numpy.argsort', 'np.argsort', (['self.buoys[:, 0]'], {}), '(self.buoys[:, 0])\n', (50948, 50966), True, 'import numpy as np\n'), ((1464, 1475), 'time.time', 'time.time', ([], {}), '()\n', (1473, 1475), False, 'import time\n'), ((1853, 1864), 'time.time', 'time.time', ([], {}), '()\n', (1862, 1864), False, 'import time\n'), ((11002, 11025), 'rospy.Duration', 'rospy.Duration', (['timeout'], {}), '(timeout)\n', (11016, 11025), False, 'import rospy\n'), ((16617, 16640), 'rospy.Duration', 'rospy.Duration', (['timeout'], {}), '(timeout)\n', (16631, 16640), False, 'import rospy\n'), ((19007, 19081), 'rospy.logwarn_throttle', 'rospy.logwarn_throttle', (['(1)', '"""Speed control of the waypoint action is NONE!"""'], {}), "(1, 'Speed control of the waypoint action is NONE!')\n", (19029, 19081), False, 'import rospy\n'), ((22338, 22350), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (22348, 22350), False, 'import rospy\n'), ((22352, 22375), 'rospy.Duration', 'rospy.Duration', (['timeout'], {}), '(timeout)\n', (22366, 22375), False, 'import rospy\n'), ((22486, 22622), 'rospy.logerr_throttle', 'rospy.logerr_throttle', (['(5)', "('Could not find from ' + self.utm_link + ' to ' + self.base_link +\n '... Nothing except safety will be run')"], {}), "(5, 'Could not find from ' + self.utm_link + ' to ' +\n self.base_link + '... Nothing except safety will be run')\n", (22507, 22622), False, 'import rospy\n'), ((22726, 22737), 'time.time', 'time.time', ([], {}), '()\n', (22735, 22737), False, 'import time\n'), ((23164, 23177), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (23174, 23177), False, 'import rospy\n'), ((23483, 23569), 'rospy.logerr_throttle_identical', 'rospy.logerr_throttle_identical', (['(5)', '"""Could not do tf lookup for some other reason"""'], {}), "(5,\n 'Could not do tf lookup for some other reason')\n", (23514, 23569), False, 'import rospy\n'), ((40958, 40970), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (40968, 40970), False, 'import rospy\n'), ((40972, 40995), 'rospy.Duration', 'rospy.Duration', (['timeout'], {}), '(timeout)\n', (40986, 40995), False, 'import rospy\n'), ((41119, 41238), 'rospy.logerr_throttle', 'rospy.logerr_throttle', (['(5)', "('Could not find tf from:' + self.poi_link + ' to:' + self.utm_link +\n ' disabling updates')"], {}), "(5, 'Could not find tf from:' + self.poi_link + ' to:' +\n self.utm_link + ' disabling updates')\n", (41140, 41238), False, 'import rospy\n'), ((45405, 45428), 'rospy.Duration', 'rospy.Duration', (['timeout'], {}), '(timeout)\n', (45419, 45428), False, 'import rospy\n'), ((49315, 49327), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (49325, 49327), False, 'import rospy\n'), ((49345, 49368), 'rospy.Duration', 'rospy.Duration', (['timeout'], {}), '(timeout)\n', (49359, 49368), False, 'import rospy\n'), ((2102, 2132), 'rospy.logwarn_throttle', 'rospy.logwarn_throttle', (['(1)', 'msg'], {}), '(1, msg)\n', (2124, 2132), False, 'import rospy\n')]
|
""" This Script contain the different function used in the framework
part1. Data processing
part2. Prediction and analisys
part3. Plotting
"""
import numpy as np
import librosa
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import pickle
import time
import struct
""" Data processing """
def get_mel_spectrogram(file_path, mfcc_max_padding=0, n_fft=2048, hop_length=512, n_mels=128):
"""Generates/extracts Log-MEL Spectrogram coefficients with LibRosa """
try:
# Load audio file
y, sr = librosa.load(file_path)
# Normalize audio data between -1 and 1
normalized_y = librosa.util.normalize(y)
# Generate mel scaled filterbanks
mel = librosa.feature.melspectrogram(normalized_y, sr=sr, n_mels=n_mels)
# Convert sound intensity to log amplitude:
mel_db = librosa.amplitude_to_db(abs(mel))
# Normalize between -1 and 1
normalized_mel = librosa.util.normalize(mel_db)
# Should we require padding
shape = normalized_mel.shape[1]
if (mfcc_max_padding > 0 & shape < mfcc_max_padding):
xDiff = mfcc_max_padding - shape
xLeft = xDiff//2
xRight = xDiff-xLeft
normalized_mel = np.pad(normalized_mel, pad_width=((0,0), (xLeft, xRight)), mode='constant')
except Exception as e:
print("Error parsing wavefile: ", e)
return None
return normalized_mel
def get_mfcc(file_path, mfcc_max_padding=0, n_mfcc=40, robots_noise = None, noise_amp = 1):
"""Generates/extracts MFCC coefficients with LibRosa"""
try:
# Load audio file
y, sr = librosa.load(file_path,sr=None)
if robots_noise != None :
y_n, _ = librosa.load(robots_noise)
y = (y + noise_amp * y_n)/(noise_amp + 1)
# Normalize audio data between -1 and 1
normalized_y = librosa.util.normalize(y)
# Compute MFCC coefficients
mfcc = librosa.feature.mfcc(y=normalized_y, sr=sr, n_mfcc=n_mfcc)
# Normalize MFCC between -1 and 1
normalized_mfcc = librosa.util.normalize(mfcc)
# Should we require padding
shape = normalized_mfcc.shape[1]
if (shape < mfcc_max_padding):
pad_width = mfcc_max_padding - shape
normalized_mfcc = np.pad(normalized_mfcc,
pad_width=((0, 0), (0, pad_width)),
mode ='constant',
constant_values=(0,))
except Exception as e:
print("Error parsing wavefile: ", e)
return None
return normalized_mfcc
def add_padding(features, mfcc_max_padding=174):
"""Given an numpy array of features, zero-pads each ocurrence to max_padding"""
padded = []
# Add padding
for i in range(len(features)):
px = features[i]
size = len(px[0])
# Add padding if required
if (size < mfcc_max_padding):
xDiff = mfcc_max_padding - size
xLeft = xDiff//2
xRight = xDiff-xLeft
px = np.pad(px, pad_width=((0,0), (xLeft, xRight)), mode='constant')
padded.append(px)
return padded
def scale(X, x_min, x_max, axis=0):
"""Scales data between x_min and x_max"""
nom = (X-X.min(axis=axis))*(x_max-x_min)
denom = X.max(axis=axis) - X.min(axis=axis)
denom[denom==0] = 1
return x_min + nom/denom
def save_split_distributions(test_split_idx, train_split_idx, file_path=None):
if (path == None):
print("You must enter a file path to save the splits")
return false
# Create split dictionary
split = {}
split['test_split_idx'] = test_split_idx
split['train_split_idx'] = train_split_idx
with open(file_path, 'wb') as file_pi:
pickle.dump(split, file_pi)
return file
def load_split_distributions(file_path):
file = open(file_path, 'rb')
data = pickle.load(file)
return [data['test_split_idx'], data['train_split_idx']]
def find_dupes(array):
seen = {}
dupes = []
for x in array:
if x not in seen:
seen[x] = 1
else:
if seen[x] == 1:
dupes.append(x)
seen[x] += 1
return len(dupes)
def read_header(filename):
"""Reads a file's header data and returns a list of wavefile properties"""
wave = open(filename,"rb")
riff = wave.read(12)
fmat = wave.read(36)
num_channels_string = fmat[10:12]
num_channels = struct.unpack('<H', num_channels_string)[0]
sample_rate_string = fmat[12:16]
sample_rate = struct.unpack("<I",sample_rate_string)[0]
bit_depth_string = fmat[22:24]
bit_depth = struct.unpack("<H",bit_depth_string)[0]
return (num_channels, sample_rate, bit_depth)
def play_dataset_sample(dataset_row, audio_path):
"""Given a dataset row it returns an audio player and prints the audio properties"""
fold_num = dataset_row.iloc[0]['fold']
file_name = dataset_row.iloc[0]['file']
file_path = os.path.join(audio_path, fold_num, file_name)
file_path = os.path.join(audio_path, dataset_row.iloc[0]['fold'], dataset_row.iloc[0]['file'])
print("Class:", dataset_row.iloc[0]['class'])
print("File:", file_path)
print("Sample rate:", dataset_row.iloc[0]['sample_rate'])
print("Bit depth:", dataset_row.iloc[0]['bit_depth'])
print("Duration {} seconds".format(dataset_row.iloc[0]['duration']))
# Sound preview
return IP.display.Audio(file_path)
"""
Prediction and analisys
"""
def evaluate_model(model, X_train, y_train, X_test, y_test):
train_score = model.evaluate(X_train, y_train, verbose=0)
test_score = model.evaluate(X_test, y_test, verbose=0)
return train_score, test_score
def model_evaluation_report(model, X_train, y_train, X_test, y_test, calc_normal=True):
dash = '-' * 38
# Compute scores
train_score, test_score = evaluate_model(model, X_train, y_train, X_test, y_test)
# Pint Train vs Test report
print('{:<10s}{:>14s}{:>14s}'.format("", "LOSS", "ACCURACY"))
print(dash)
print('{:<10s}{:>14.4f}{:>14.4f}'.format( "Training:", train_score[0], 100 * train_score[1]))
print('{:<10s}{:>14.4f}{:>14.4f}'.format( "Test:", test_score[0], 100 * test_score[1]))
# Calculate and report normalized error difference?
if (calc_normal):
max_err = max(train_score[0], test_score[0])
error_diff = max_err - min(train_score[0], test_score[0])
normal_diff = error_diff * 100 / max_err
print('{:<10s}{:>13.2f}{:>1s}'.format("Normal diff ", normal_diff, ""))
def acc_per_class(np_probs_array):
"""
Expects a NumPy array with probabilities and a confusion matrix data, retuns accuracy per class
"""
accs = []
for idx in range(0, np_probs_array.shape[0]):
correct = np_probs_array[idx][idx].astype(int)
total = np_probs_array[idx].sum().astype(int)
acc = (correct / total) * 100
accs.append(acc)
return accs
"""
Plotting
"""
def plot_train_history(history, x_ticks_vertical=False):
history = history.history
# min loss / max accs
min_loss = min(history['loss'])
min_val_loss = min(history['val_loss'])
max_accuracy = max(history['accuracy'])
max_val_accuracy = max(history['val_accuracy'])
# x pos for loss / acc min/max
min_loss_x = history['loss'].index(min_loss)
min_val_loss_x = history['val_loss'].index(min_val_loss)
max_accuracy_x = history['accuracy'].index(max_accuracy)
max_val_accuracy_x = history['val_accuracy'].index(max_val_accuracy)
# summarize history for loss, display min
plt.figure(figsize=(16,8))
plt.plot(history['loss'], color="#1f77b4", alpha=0.7)
plt.plot(history['val_loss'], color="#ff7f0e", linestyle="--")
plt.plot(min_loss_x, min_loss, marker='o', markersize=3, color="#1f77b4", alpha=0.7, label='Inline label')
plt.plot(min_val_loss_x, min_val_loss, marker='o', markersize=3, color="#ff7f0e", alpha=7, label='Inline label')
plt.title('Model loss', fontsize=20)
plt.ylabel('Loss', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(['Train',
'Test',
('%.3f' % min_loss),
('%.3f' % min_val_loss)],
loc='upper right',
fancybox=True,
framealpha=0.9,
shadow=True,
borderpad=1)
if (x_ticks_vertical):
plt.xticks(np.arange(0, len(history['loss']), 5.0), rotation='vertical')
else:
plt.xticks(np.arange(0, len(history['loss']), 5.0))
plt.show()
# summarize history for accuracy, display max
plt.figure(figsize=(16,6))
plt.plot(history['accuracy'], alpha=0.7)
plt.plot(history['val_accuracy'], linestyle="--")
plt.plot(max_accuracy_x, max_accuracy, marker='o', markersize=3, color="#1f77b4", alpha=7)
plt.plot(max_val_accuracy_x, max_val_accuracy, marker='o', markersize=3, color="orange", alpha=7)
plt.title('Model accuracy', fontsize=20)
plt.ylabel('Accuracy', fontsize=16)
plt.xlabel('Epoch', fontsize=16)
plt.legend(['Train',
'Test',
('%.2f' % max_accuracy),
('%.2f' % max_val_accuracy)],
loc='upper left',
fancybox=True,
framealpha=0.9,
shadow=True,
borderpad=1)
plt.figure(num=1, figsize=(10, 6))
if (x_ticks_vertical):
plt.xticks(np.arange(0, len(history['accuracy']), 5.0), rotation='vertical')
else:
plt.xticks(np.arange(0, len(history['accuracy']), 5.0))
plt.show()
def compute_confusion_matrix(y_true,
y_pred,
classes,
normalize=False):
# Compute confusion matrix
cm = metrics.confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
return cm
def plot_confusion_matrix(cm,
classes,
normalized=False,
title=None,
cmap=plt.cm.Blues,
size=(10,10)):
"""Plots a confussion matrix"""
fig, ax = plt.subplots(figsize=size)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalized else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"librosa.feature.mfcc",
"numpy.arange",
"librosa.load",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.metrics.confusion_matrix",
"pickle.load",
"struct.unpack",
"matplotlib.pyplot.title",
"librosa.util.normalize",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"librosa.feature.melspectrogram",
"pickle.dump",
"os.path.join",
"matplotlib.pyplot.figure",
"numpy.pad",
"matplotlib.pyplot.subplots"
] |
[((3975, 3992), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3986, 3992), False, 'import pickle\n'), ((5073, 5118), 'os.path.join', 'os.path.join', (['audio_path', 'fold_num', 'file_name'], {}), '(audio_path, fold_num, file_name)\n', (5085, 5118), False, 'import os\n'), ((5135, 5222), 'os.path.join', 'os.path.join', (['audio_path', "dataset_row.iloc[0]['fold']", "dataset_row.iloc[0]['file']"], {}), "(audio_path, dataset_row.iloc[0]['fold'], dataset_row.iloc[0][\n 'file'])\n", (5147, 5222), False, 'import os\n'), ((7715, 7742), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (7725, 7742), True, 'import matplotlib.pyplot as plt\n'), ((7746, 7799), 'matplotlib.pyplot.plot', 'plt.plot', (["history['loss']"], {'color': '"""#1f77b4"""', 'alpha': '(0.7)'}), "(history['loss'], color='#1f77b4', alpha=0.7)\n", (7754, 7799), True, 'import matplotlib.pyplot as plt\n'), ((7804, 7866), 'matplotlib.pyplot.plot', 'plt.plot', (["history['val_loss']"], {'color': '"""#ff7f0e"""', 'linestyle': '"""--"""'}), "(history['val_loss'], color='#ff7f0e', linestyle='--')\n", (7812, 7866), True, 'import matplotlib.pyplot as plt\n'), ((7871, 7981), 'matplotlib.pyplot.plot', 'plt.plot', (['min_loss_x', 'min_loss'], {'marker': '"""o"""', 'markersize': '(3)', 'color': '"""#1f77b4"""', 'alpha': '(0.7)', 'label': '"""Inline label"""'}), "(min_loss_x, min_loss, marker='o', markersize=3, color='#1f77b4',\n alpha=0.7, label='Inline label')\n", (7879, 7981), True, 'import matplotlib.pyplot as plt\n'), ((7982, 8099), 'matplotlib.pyplot.plot', 'plt.plot', (['min_val_loss_x', 'min_val_loss'], {'marker': '"""o"""', 'markersize': '(3)', 'color': '"""#ff7f0e"""', 'alpha': '(7)', 'label': '"""Inline label"""'}), "(min_val_loss_x, min_val_loss, marker='o', markersize=3, color=\n '#ff7f0e', alpha=7, label='Inline label')\n", (7990, 8099), True, 'import matplotlib.pyplot as plt\n'), ((8099, 8135), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {'fontsize': '(20)'}), "('Model loss', fontsize=20)\n", (8108, 8135), True, 'import matplotlib.pyplot as plt\n'), ((8140, 8171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {'fontsize': '(16)'}), "('Loss', fontsize=16)\n", (8150, 8171), True, 'import matplotlib.pyplot as plt\n'), ((8176, 8208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {'fontsize': '(16)'}), "('Epoch', fontsize=16)\n", (8186, 8208), True, 'import matplotlib.pyplot as plt\n'), ((8213, 8365), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test', '%.3f' % min_loss, '%.3f' % min_val_loss]"], {'loc': '"""upper right"""', 'fancybox': '(True)', 'framealpha': '(0.9)', 'shadow': '(True)', 'borderpad': '(1)'}), "(['Train', 'Test', '%.3f' % min_loss, '%.3f' % min_val_loss], loc\n ='upper right', fancybox=True, framealpha=0.9, shadow=True, borderpad=1)\n", (8223, 8365), True, 'import matplotlib.pyplot as plt\n'), ((8685, 8695), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8693, 8695), True, 'import matplotlib.pyplot as plt\n'), ((8751, 8778), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 6)'}), '(figsize=(16, 6))\n', (8761, 8778), True, 'import matplotlib.pyplot as plt\n'), ((8782, 8822), 'matplotlib.pyplot.plot', 'plt.plot', (["history['accuracy']"], {'alpha': '(0.7)'}), "(history['accuracy'], alpha=0.7)\n", (8790, 8822), True, 'import matplotlib.pyplot as plt\n'), ((8827, 8876), 'matplotlib.pyplot.plot', 'plt.plot', (["history['val_accuracy']"], {'linestyle': '"""--"""'}), "(history['val_accuracy'], linestyle='--')\n", (8835, 8876), True, 'import matplotlib.pyplot as plt\n'), ((8881, 8976), 'matplotlib.pyplot.plot', 'plt.plot', (['max_accuracy_x', 'max_accuracy'], {'marker': '"""o"""', 'markersize': '(3)', 'color': '"""#1f77b4"""', 'alpha': '(7)'}), "(max_accuracy_x, max_accuracy, marker='o', markersize=3, color=\n '#1f77b4', alpha=7)\n", (8889, 8976), True, 'import matplotlib.pyplot as plt\n'), ((8976, 9077), 'matplotlib.pyplot.plot', 'plt.plot', (['max_val_accuracy_x', 'max_val_accuracy'], {'marker': '"""o"""', 'markersize': '(3)', 'color': '"""orange"""', 'alpha': '(7)'}), "(max_val_accuracy_x, max_val_accuracy, marker='o', markersize=3,\n color='orange', alpha=7)\n", (8984, 9077), True, 'import matplotlib.pyplot as plt\n'), ((9078, 9118), 'matplotlib.pyplot.title', 'plt.title', (['"""Model accuracy"""'], {'fontsize': '(20)'}), "('Model accuracy', fontsize=20)\n", (9087, 9118), True, 'import matplotlib.pyplot as plt\n'), ((9123, 9158), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {'fontsize': '(16)'}), "('Accuracy', fontsize=16)\n", (9133, 9158), True, 'import matplotlib.pyplot as plt\n'), ((9163, 9195), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {'fontsize': '(16)'}), "('Epoch', fontsize=16)\n", (9173, 9195), True, 'import matplotlib.pyplot as plt\n'), ((9200, 9362), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test', '%.2f' % max_accuracy, '%.2f' % max_val_accuracy]"], {'loc': '"""upper left"""', 'fancybox': '(True)', 'framealpha': '(0.9)', 'shadow': '(True)', 'borderpad': '(1)'}), "(['Train', 'Test', '%.2f' % max_accuracy, '%.2f' %\n max_val_accuracy], loc='upper left', fancybox=True, framealpha=0.9,\n shadow=True, borderpad=1)\n", (9210, 9362), True, 'import matplotlib.pyplot as plt\n'), ((9499, 9533), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'figsize': '(10, 6)'}), '(num=1, figsize=(10, 6))\n', (9509, 9533), True, 'import matplotlib.pyplot as plt\n'), ((9726, 9736), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9734, 9736), True, 'import matplotlib.pyplot as plt\n'), ((9900, 9940), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9924, 9940), False, 'from sklearn import metrics\n'), ((10324, 10350), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'size'}), '(figsize=size)\n', (10336, 10350), True, 'import matplotlib.pyplot as plt\n'), ((11317, 11327), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11325, 11327), True, 'import matplotlib.pyplot as plt\n'), ((538, 561), 'librosa.load', 'librosa.load', (['file_path'], {}), '(file_path)\n', (550, 561), False, 'import librosa\n'), ((634, 659), 'librosa.util.normalize', 'librosa.util.normalize', (['y'], {}), '(y)\n', (656, 659), False, 'import librosa\n'), ((717, 783), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['normalized_y'], {'sr': 'sr', 'n_mels': 'n_mels'}), '(normalized_y, sr=sr, n_mels=n_mels)\n', (747, 783), False, 'import librosa\n'), ((951, 981), 'librosa.util.normalize', 'librosa.util.normalize', (['mel_db'], {}), '(mel_db)\n', (973, 981), False, 'import librosa\n'), ((1660, 1692), 'librosa.load', 'librosa.load', (['file_path'], {'sr': 'None'}), '(file_path, sr=None)\n', (1672, 1692), False, 'import librosa\n'), ((1922, 1947), 'librosa.util.normalize', 'librosa.util.normalize', (['y'], {}), '(y)\n', (1944, 1947), False, 'import librosa\n'), ((2000, 2058), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'normalized_y', 'sr': 'sr', 'n_mfcc': 'n_mfcc'}), '(y=normalized_y, sr=sr, n_mfcc=n_mfcc)\n', (2020, 2058), False, 'import librosa\n'), ((2128, 2156), 'librosa.util.normalize', 'librosa.util.normalize', (['mfcc'], {}), '(mfcc)\n', (2150, 2156), False, 'import librosa\n'), ((3843, 3870), 'pickle.dump', 'pickle.dump', (['split', 'file_pi'], {}), '(split, file_pi)\n', (3854, 3870), False, 'import pickle\n'), ((4548, 4588), 'struct.unpack', 'struct.unpack', (['"""<H"""', 'num_channels_string'], {}), "('<H', num_channels_string)\n", (4561, 4588), False, 'import struct\n'), ((4647, 4686), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'sample_rate_string'], {}), "('<I', sample_rate_string)\n", (4660, 4686), False, 'import struct\n'), ((4740, 4777), 'struct.unpack', 'struct.unpack', (['"""<H"""', 'bit_depth_string'], {}), "('<H', bit_depth_string)\n", (4753, 4777), False, 'import struct\n'), ((1257, 1333), 'numpy.pad', 'np.pad', (['normalized_mel'], {'pad_width': '((0, 0), (xLeft, xRight))', 'mode': '"""constant"""'}), "(normalized_mel, pad_width=((0, 0), (xLeft, xRight)), mode='constant')\n", (1263, 1333), True, 'import numpy as np\n'), ((1757, 1783), 'librosa.load', 'librosa.load', (['robots_noise'], {}), '(robots_noise)\n', (1769, 1783), False, 'import librosa\n'), ((2354, 2456), 'numpy.pad', 'np.pad', (['normalized_mfcc'], {'pad_width': '((0, 0), (0, pad_width))', 'mode': '"""constant"""', 'constant_values': '(0,)'}), "(normalized_mfcc, pad_width=((0, 0), (0, pad_width)), mode='constant',\n constant_values=(0,))\n", (2360, 2456), True, 'import numpy as np\n'), ((3104, 3168), 'numpy.pad', 'np.pad', (['px'], {'pad_width': '((0, 0), (xLeft, xRight))', 'mode': '"""constant"""'}), "(px, pad_width=((0, 0), (xLeft, xRight)), mode='constant')\n", (3110, 3168), True, 'import numpy as np\n'), ((10498, 10520), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (10507, 10520), True, 'import numpy as np\n'), ((10540, 10562), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (10549, 10562), True, 'import numpy as np\n')]
|
import os
import tarfile
import time
import pickle
import numpy as np
from Bio.Seq import Seq
from scipy.special import expit
from scipy.special import logit
import torch
import torch.nn.functional as F
""" Get directories for model and seengenes """
module_dir = os.path.dirname(os.path.realpath(__file__))
model_dir = os.path.join(module_dir, "balrog_models")
""" Print what the program is doing."""
verbose = True
""" Use kmer prefilter to increase gene sensitivity.
May not play nice with very high GC genomes."""
protein_kmer_filter = False
""" Nucleotide to amino acid translation table. 11 for most bacteria/archaea.
4 for Mycoplasma/Spiroplasma."""
translation_table = 11
# translation_table = 4
""" Batch size for the temporal convolutional network used to score genes.
Small batches and big batches slow down the model. Very big batches may crash the
GPU. """
gene_batch_size = 200
TIS_batch_size = 1000
""" All following are internal parameters. Change at your own risk."""
weight_gene_prob = 0.9746869839852076
weight_TIS_prob = 0.25380288790532707
score_threshold = 0.47256101519707244
weight_ATG = 0.84249804151264
weight_GTG = 0.7083689705744909
weight_TTG = 0.7512400826652517
unidirectional_penalty_per_base = 3.895921717182765 # 3' 5' overlap
convergent_penalty_per_base = 4.603432608883688 # 3' 3' overlap
divergent_penalty_per_base = 3.3830814940689975 # 5' 5' overlap
k_seengene = 10
multimer_threshold = 2
nuc_encode = {"A": 0,
"T": 1,
"G": 2,
"C": 3,
"N": 0,
"M": 0,
"R": 0,
"Y": 0,
"W": 0,
"K": 0}
start_enc = {"ATG": 0,
"GTG": 1,
"TTG": 2}
aa_table = {"L": 1,
"V": 2,
"I": 3,
"M": 4,
"C": 5,
"A": 6,
"G": 7,
"S": 8,
"T": 9,
"P": 10,
"F": 11,
"Y": 12,
"W": 13,
"E": 14,
"D": 15,
"N": 16,
"Q": 17,
"K": 18,
"R": 19,
"H": 20,
"*": 0,
"X": 0}
# generate ORF sequences from coordinates
# @profile
def generate_sequence(graph_vector, nodelist, node_coords, overlap):
sequence = ""
for i in range(0, len(nodelist)):
id = nodelist[i]
coords = node_coords[i]
# calculate strand based on value of node (if negative, strand is false)
strand = True if id >= 0 else False
if strand:
unitig_seq = graph_vector[abs(id) - 1].seq
else:
unitig_seq = str(Seq(graph_vector[abs(id) - 1].seq).reverse_complement())
if len(sequence) == 0:
substring = unitig_seq[coords[0]:(coords[1] + 1)]
else:
if coords[1] >= overlap:
substring = unitig_seq[overlap:(coords[1] + 1)]
sequence += substring
return sequence
#@profile
def tokenize_aa_seq(aa_seq):
""" Convert amino acid letters to integers."""
tokenized = torch.tensor([aa_table[aa] for aa in aa_seq])
return tokenized
#@profile
def get_ORF_info(ORF_vector, graph, overlap):
ORF_seq_list = []
TIS_seqs = []
# iterate over list of ORFs
for ORFNodeVector in ORF_vector:
# need to determine ORF sequences from paths
ORF_nodelist = ORFNodeVector[0]
ORF_node_coords = ORFNodeVector[1]
TIS_nodelist = ORFNodeVector[3]
TIS_node_coords = ORFNodeVector[4]
# generate ORF_seq, as well as upstream and downstream TIS seq
ORF_seq = graph.generate_sequence(ORF_nodelist, ORF_node_coords, overlap)
upstream_TIS_seq = graph.generate_sequence(TIS_nodelist, TIS_node_coords, overlap)
downstream_TIS_seq = ORF_seq[0:19]
# generate Seq class for translation
seq = Seq(ORF_seq)
# translate once per frame, then slice. Note, do not include start or stop codons
aa = str(seq[3:-3].translate(table=translation_table, to_stop=False))
ORF_seq_list.append(aa)
TIS_seqs.append((upstream_TIS_seq, downstream_TIS_seq))
# convert amino acids into integers
ORF_seq_enc = [tokenize_aa_seq(x) for x in ORF_seq_list]
return ORF_seq_enc, TIS_seqs
#@profile
def predict(model, X):
model.eval()
with torch.no_grad():
if torch.cuda.device_count() > 0:
X_enc = F.one_hot(X, 21).permute(0, 2, 1).float().cuda()
probs = expit(model(X_enc).cpu())
del X_enc
torch.cuda.empty_cache()
else:
X_enc = F.one_hot(X, 21).permute(0, 2, 1).float()
probs = expit(model(X_enc).cpu())
return probs
#@profile
def predict_tis(model_tis, X):
model_tis.eval()
with torch.no_grad():
if torch.cuda.device_count() > 0:
X_enc = F.one_hot(X, 4).permute(0, 2, 1).float().cuda()
else:
X_enc = F.one_hot(X, 4).permute(0, 2, 1).float()
probs = expit(model_tis(X_enc).cpu())
return probs
#@profile
def kmerize(seq, k):
kmerset = set()
for i in range(len(seq) - k + 1):
kmer = tuple(seq[i: i + k].tolist())
kmerset.add(kmer)
return kmerset
def load_kmer_model():
# check if directory exists. If not, unzip file
if not os.path.exists(model_dir):
tar = tarfile.open(model_dir + ".tar.gz", mode="r:gz")
tar.extractall(module_dir)
tar.close()
"""Load k-mer filters"""
genexa_kmer_path = os.path.join(model_dir, "10mer_thresh2_minusARF_all.pkl")
with open(genexa_kmer_path, "rb") as f:
aa_kmer_set = pickle.load(f)
return aa_kmer_set
def load_gene_models():
# check if directory exists. If not, unzip file
if not os.path.exists(model_dir):
tar = tarfile.open(model_dir + ".tar.gz", mode="r:gz")
tar.extractall(module_dir)
tar.close()
torch.hub.set_dir(model_dir)
# print("Loading convolutional model...")
if torch.cuda.device_count() > 0:
# print("GPU detected...")
model = torch.hub.load(model_dir, "geneTCN", source='local').cuda()
model_tis = torch.hub.load(model_dir, "tisTCN", source='local').cuda()
time.sleep(0.5)
else:
# print("No GPU detected, using CPU...")
model = torch.hub.load(model_dir, "geneTCN", source='local')
model_tis = torch.hub.load(model_dir, "tisTCN", source='local')
time.sleep(0.5)
return (model, model_tis)
#@profile
def score_genes(ORF_vector, graph_vector, minimum_ORF_score, overlap, model, model_tis, aa_kmer_set):
# get sequences and coordinates of ORFs
# print("Finding and translating open reading frames...")
ORF_seq_enc, TIS_seqs = get_ORF_info(ORF_vector, graph_vector, overlap)
# seengene check
if protein_kmer_filter:
seengene = []
for s in ORF_seq_enc:
kmerset = kmerize(s, k_seengene)
# s = [x in aa_kmer_set for x in kmerset]
s = np.isin(list(kmerset), aa_kmer_set)
seen = np.count_nonzero(s) >= multimer_threshold
seengene.append(seen)
# score
# print("Scoring ORFs with temporal convolutional network...")
# sort by length to minimize impact of batch padding
ORF_lengths = np.asarray([len(x) for x in ORF_seq_enc])
length_idx = np.argsort(ORF_lengths)
ORF_seq_sorted = [ORF_seq_enc[i] for i in length_idx]
# pad to allow creation of batch matrix
prob_list = []
for i in range(0, len(ORF_seq_sorted), gene_batch_size):
batch = ORF_seq_sorted[i:i + gene_batch_size]
seq_lengths = torch.LongTensor(list(map(len, batch)))
seq_tensor = torch.zeros((len(batch), seq_lengths.max())).long()
for idx, (seq, seqlen) in enumerate(zip(batch, seq_lengths)):
seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
pred_all = predict(model, seq_tensor)
pred = []
for j, length in enumerate(seq_lengths):
subseq = pred_all[j, 0, 0:int(length)]
predprob = float(expit(torch.mean(logit(subseq))))
pred.append(predprob)
prob_list.extend(pred)
prob_arr = np.asarray(prob_list, dtype=float)
# unsort
unsort_idx = np.argsort(length_idx)
ORF_prob = prob_arr[unsort_idx]
# recombine ORFs
idx = 0
ORF_gene_score = [None] * len(ORF_seq_enc)
for k, coord in enumerate(ORF_gene_score):
ORF_gene_score[k] = float(ORF_prob[idx])
idx += 1
# print("Scoring translation initiation sites...")
# extract nucleotide sequence surrounding potential start codons
ORF_TIS_seq_flat = []
ORF_TIS_seq_idx = []
ORF_TIS_prob = [None] * len(TIS_seqs)
ORF_start_codon = [None] * len(ORF_seq_enc)
for i, TIS in enumerate(TIS_seqs):
# unpack tuple. Note, downsteam includes start codon, which needs to be removed
upstream, downstream = TIS
if len(upstream) == 16:
TIS_seq = torch.tensor([nuc_encode[c] for c in (upstream + downstream[3:])[::-1]],
dtype=int) # model scores 3' to 5' direction
ORF_TIS_seq_flat.append(TIS_seq)
ORF_TIS_seq_idx.append(i)
else:
ORF_TIS_prob[i] = 0.5
# encode start codon
start_codon = start_enc[downstream[0:3]]
ORF_start_codon[i] = start_codon
# batch score TIS
TIS_prob_list = []
for i in range(0, len(ORF_TIS_seq_flat), TIS_batch_size):
batch = ORF_TIS_seq_flat[i:i + TIS_batch_size]
TIS_stacked = torch.stack(batch)
pred = predict_tis(model_tis, TIS_stacked)
TIS_prob_list.extend(pred)
y_pred_TIS = np.asarray(TIS_prob_list, dtype=float)
# reindex batched scores
for i, prob in enumerate(y_pred_TIS):
idx = ORF_TIS_seq_idx[i]
ORF_TIS_prob[idx] = float(prob)
# combine all info into single score for each ORF
if protein_kmer_filter:
ORF_score_flat = []
for i, geneprob in enumerate(ORF_gene_score):
if not geneprob:
ORF_score_flat.append(None)
continue
seengene_idx = 0
# calculate length by multiplying number of amino acids by 3, then adding 6 for start and stop
length = (len(ORF_seq_enc[i]) * 3) + 6
TIS_prob = ORF_TIS_prob[i]
start_codon = ORF_start_codon[i]
ATG = start_codon == 0
GTG = start_codon == 1
TTG = start_codon == 2
combprob = geneprob * weight_gene_prob \
+ TIS_prob * weight_TIS_prob \
+ ATG * weight_ATG \
+ GTG * weight_GTG \
+ TTG * weight_TTG
maxprob = weight_gene_prob + weight_TIS_prob + max(weight_ATG, weight_TTG, weight_GTG)
probthresh = score_threshold * maxprob
score = (combprob - probthresh) * length + 1e6 * seengene[seengene_idx]
seengene_idx += 1
ORF_score_flat.append(score)
else:
ORF_score_flat = []
for i, geneprob in enumerate(ORF_gene_score):
if not geneprob:
ORF_score_flat.append(None)
continue
# calculate length by multiplying number of amino acids by 3, then adding 6 for start and stop
length = len(ORF_seq_enc[i]) * 3
TIS_prob = ORF_TIS_prob[i]
start_codon = ORF_start_codon[i]
ATG = start_codon == 0
GTG = start_codon == 1
TTG = start_codon == 2
combprob = geneprob * weight_gene_prob \
+ TIS_prob * weight_TIS_prob \
+ ATG * weight_ATG \
+ GTG * weight_GTG \
+ TTG * weight_TTG
maxprob = weight_gene_prob + weight_TIS_prob + max(weight_ATG, weight_TTG, weight_GTG)
probthresh = score_threshold * maxprob
score = (combprob - probthresh) * length
ORF_score_flat.append(score)
# update initial dictionary, removing low scoring ORFs and create score mapping score within a tuple
ORF_score_dict = {}
for i, score in enumerate(ORF_score_flat):
# if score greater than minimum, add to the ORF_score_dict
if score >= minimum_ORF_score:
ORF_score_dict[i] = score
return ORF_score_dict
|
[
"tarfile.open",
"torch.LongTensor",
"Bio.Seq.Seq",
"torch.cuda.device_count",
"time.sleep",
"numpy.argsort",
"numpy.count_nonzero",
"os.path.exists",
"numpy.asarray",
"torch.hub.load",
"pickle.load",
"torch.hub.set_dir",
"torch.nn.functional.one_hot",
"torch.cuda.empty_cache",
"torch.stack",
"os.path.join",
"os.path.realpath",
"torch.tensor",
"scipy.special.logit",
"torch.no_grad"
] |
[((322, 363), 'os.path.join', 'os.path.join', (['module_dir', '"""balrog_models"""'], {}), "(module_dir, 'balrog_models')\n", (334, 363), False, 'import os\n'), ((282, 308), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (298, 308), False, 'import os\n'), ((3091, 3136), 'torch.tensor', 'torch.tensor', (['[aa_table[aa] for aa in aa_seq]'], {}), '([aa_table[aa] for aa in aa_seq])\n', (3103, 3136), False, 'import torch\n'), ((5546, 5603), 'os.path.join', 'os.path.join', (['model_dir', '"""10mer_thresh2_minusARF_all.pkl"""'], {}), "(model_dir, '10mer_thresh2_minusARF_all.pkl')\n", (5558, 5603), False, 'import os\n'), ((5949, 5977), 'torch.hub.set_dir', 'torch.hub.set_dir', (['model_dir'], {}), '(model_dir)\n', (5966, 5977), False, 'import torch\n'), ((7391, 7414), 'numpy.argsort', 'np.argsort', (['ORF_lengths'], {}), '(ORF_lengths)\n', (7401, 7414), True, 'import numpy as np\n'), ((8229, 8263), 'numpy.asarray', 'np.asarray', (['prob_list'], {'dtype': 'float'}), '(prob_list, dtype=float)\n', (8239, 8263), True, 'import numpy as np\n'), ((8295, 8317), 'numpy.argsort', 'np.argsort', (['length_idx'], {}), '(length_idx)\n', (8305, 8317), True, 'import numpy as np\n'), ((9745, 9783), 'numpy.asarray', 'np.asarray', (['TIS_prob_list'], {'dtype': 'float'}), '(TIS_prob_list, dtype=float)\n', (9755, 9783), True, 'import numpy as np\n'), ((3892, 3904), 'Bio.Seq.Seq', 'Seq', (['ORF_seq'], {}), '(ORF_seq)\n', (3895, 3904), False, 'from Bio.Seq import Seq\n'), ((4368, 4383), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4381, 4383), False, 'import torch\n'), ((4814, 4829), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4827, 4829), False, 'import torch\n'), ((5348, 5373), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (5362, 5373), False, 'import os\n'), ((5389, 5437), 'tarfile.open', 'tarfile.open', (["(model_dir + '.tar.gz')"], {'mode': '"""r:gz"""'}), "(model_dir + '.tar.gz', mode='r:gz')\n", (5401, 5437), False, 'import tarfile\n'), ((5671, 5685), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5682, 5685), False, 'import pickle\n'), ((5799, 5824), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (5813, 5824), False, 'import os\n'), ((5840, 5888), 'tarfile.open', 'tarfile.open', (["(model_dir + '.tar.gz')"], {'mode': '"""r:gz"""'}), "(model_dir + '.tar.gz', mode='r:gz')\n", (5852, 5888), False, 'import tarfile\n'), ((6031, 6056), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6054, 6056), False, 'import torch\n'), ((6260, 6275), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (6270, 6275), False, 'import time\n'), ((6351, 6403), 'torch.hub.load', 'torch.hub.load', (['model_dir', '"""geneTCN"""'], {'source': '"""local"""'}), "(model_dir, 'geneTCN', source='local')\n", (6365, 6403), False, 'import torch\n'), ((6424, 6475), 'torch.hub.load', 'torch.hub.load', (['model_dir', '"""tisTCN"""'], {'source': '"""local"""'}), "(model_dir, 'tisTCN', source='local')\n", (6438, 6475), False, 'import torch\n'), ((6484, 6499), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (6494, 6499), False, 'import time\n'), ((9622, 9640), 'torch.stack', 'torch.stack', (['batch'], {}), '(batch)\n', (9633, 9640), False, 'import torch\n'), ((4396, 4421), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4419, 4421), False, 'import torch\n'), ((4576, 4600), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4598, 4600), False, 'import torch\n'), ((4842, 4867), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4865, 4867), False, 'import torch\n'), ((7897, 7918), 'torch.LongTensor', 'torch.LongTensor', (['seq'], {}), '(seq)\n', (7913, 7918), False, 'import torch\n'), ((9032, 9119), 'torch.tensor', 'torch.tensor', (['[nuc_encode[c] for c in (upstream + downstream[3:])[::-1]]'], {'dtype': 'int'}), '([nuc_encode[c] for c in (upstream + downstream[3:])[::-1]],\n dtype=int)\n', (9044, 9119), False, 'import torch\n'), ((6113, 6165), 'torch.hub.load', 'torch.hub.load', (['model_dir', '"""geneTCN"""'], {'source': '"""local"""'}), "(model_dir, 'geneTCN', source='local')\n", (6127, 6165), False, 'import torch\n'), ((6193, 6244), 'torch.hub.load', 'torch.hub.load', (['model_dir', '"""tisTCN"""'], {'source': '"""local"""'}), "(model_dir, 'tisTCN', source='local')\n", (6207, 6244), False, 'import torch\n'), ((7100, 7119), 'numpy.count_nonzero', 'np.count_nonzero', (['s'], {}), '(s)\n', (7116, 7119), True, 'import numpy as np\n'), ((8131, 8144), 'scipy.special.logit', 'logit', (['subseq'], {}), '(subseq)\n', (8136, 8144), False, 'from scipy.special import logit\n'), ((4635, 4651), 'torch.nn.functional.one_hot', 'F.one_hot', (['X', '(21)'], {}), '(X, 21)\n', (4644, 4651), True, 'import torch.nn.functional as F\n'), ((4975, 4990), 'torch.nn.functional.one_hot', 'F.one_hot', (['X', '(4)'], {}), '(X, 4)\n', (4984, 4990), True, 'import torch.nn.functional as F\n'), ((4447, 4463), 'torch.nn.functional.one_hot', 'F.one_hot', (['X', '(21)'], {}), '(X, 21)\n', (4456, 4463), True, 'import torch.nn.functional as F\n'), ((4893, 4908), 'torch.nn.functional.one_hot', 'F.one_hot', (['X', '(4)'], {}), '(X, 4)\n', (4902, 4908), True, 'import torch.nn.functional as F\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 18:18:29 2020
@author: xuhuiying
"""
import numpy as np
import matplotlib.pyplot as plt
def plotHistory(history,times,xLabelText,yLabelText,legendText):#画出每个history plot each history
history = np.array(history) #history是二维数组 history is a 2D array
history = history.T
iteration = range(0,times)
# plt.figure()
for j in range(0,history.shape[0]):
plt.plot(iteration,history[j],label = "%s %d"%(legendText,j + 1))
# plt.legend(loc='upper left',prop = {'size': 10},handlelength = 1)
plt.xlabel(xLabelText,fontsize = 8)
plt.ylabel(yLabelText,fontsize = 8)
plt.tick_params(labelsize=8)
# plt.savefig('%sWith%dSellersAnd%dBuyer.jpg'%(fileNamePre,N,M), dpi=300,bbox_inches = 'tight')
# plt.show()
|
[
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.plot",
"numpy.array"
] |
[((273, 290), 'numpy.array', 'np.array', (['history'], {}), '(history)\n', (281, 290), True, 'import numpy as np\n'), ((589, 623), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xLabelText'], {'fontsize': '(8)'}), '(xLabelText, fontsize=8)\n', (599, 623), True, 'import matplotlib.pyplot as plt\n'), ((629, 663), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['yLabelText'], {'fontsize': '(8)'}), '(yLabelText, fontsize=8)\n', (639, 663), True, 'import matplotlib.pyplot as plt\n'), ((669, 697), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(8)'}), '(labelsize=8)\n', (684, 697), True, 'import matplotlib.pyplot as plt\n'), ((448, 516), 'matplotlib.pyplot.plot', 'plt.plot', (['iteration', 'history[j]'], {'label': "('%s %d' % (legendText, j + 1))"}), "(iteration, history[j], label='%s %d' % (legendText, j + 1))\n", (456, 516), True, 'import matplotlib.pyplot as plt\n')]
|
import os
from bc import Imitator
import numpy as np
from dataset import Example, Dataset
import utils
#from ale_wrapper import ALEInterfaceWrapper
from evaluator import Evaluator
from pdb import set_trace
import matplotlib.pyplot as plt
#try bmh
plt.style.use('bmh')
def smooth(losses, run=10):
new_losses = []
for i in range(len(losses)):
new_losses.append(np.mean(losses[max(0, i - 10):i+1]))
return new_losses
def plot(losses, checkpoint_dir, env_name):
print("Plotting losses to ", os.path.join(checkpoint_dir, env_name + "_loss.png"))
p=plt.plot(smooth(losses, 25))
plt.xlabel("Update")
plt.ylabel("Loss")
plt.legend(loc='lower center')
plt.savefig(os.path.join(checkpoint_dir, env_name + "loss.png"))
def train(env_name,
minimal_action_set,
learning_rate,
alpha,
l2_penalty,
minibatch_size,
hist_len,
discount,
checkpoint_dir,
updates,
dataset,
validation_dataset,
num_eval_episodes,
epsilon_greedy,
extra_info):
import tracemalloc
# create DQN agent
agent = Imitator(list(minimal_action_set),
learning_rate,
alpha,
checkpoint_dir,
hist_len,
l2_penalty)
print("Beginning training...")
log_frequency = 500
log_num = log_frequency
update = 1
running_loss = 0.
best_v_loss = np.float('inf')
count = 0
while update < updates:
# snapshot = tracemalloc.take_snapshot()
# top_stats = snapshot.statistics('lineno')
# import gc
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# print(type(obj), obj.size())
# except:
# pass
#
# print("[ Top 10 ]")
# for stat in top_stats[:10]:
# print(stat)
if update > log_num:
print(str(update) + " updates completed. Loss {}".format(running_loss / log_frequency))
log_num += log_frequency
running_loss = 0
#run validation loss test
v_loss = agent.validate(validation_dataset, 10)
print("Validation accuracy = {}".format(v_loss / validation_dataset.size))
if v_loss > best_v_loss:
count += 1
if count > 5:
print("validation not improing for {} steps. Stopping to prevent overfitting".format(count))
break
else:
best_v_loss = v_loss
print("updating best vloss", best_v_loss)
count = 0
l = agent.train(dataset, minibatch_size)
running_loss += l
update += 1
print("Training completed.")
agent.checkpoint_network(env_name, extra_info)
#Plot losses
#Evaluation
print("beginning evaluation")
evaluator = Evaluator(env_name, num_eval_episodes, checkpoint_dir, epsilon_greedy)
evaluator.evaluate(agent)
return agent
def train_transitions(env_name,
minimal_action_set,
learning_rate,
alpha,
l2_penalty,
minibatch_size,
hist_len,
discount,
checkpoint_dir,
updates,
dataset,
num_eval_episodes):
# create DQN agent
agent = Imitator(list(minimal_action_set),
learning_rate,
alpha,
checkpoint_dir,
hist_len,
l2_penalty)
print("Beginning training...")
log_frequency = 1000
log_num = log_frequency
update = 1
running_loss = 0.
while update < updates:
if update > log_num:
print(str(update) + " updates completed. Loss {}".format(running_loss / log_frequency))
log_num += log_frequency
running_loss = 0
l = agent.train(dataset, minibatch_size)
running_loss += l
update += 1
print("Training completed.")
agent.checkpoint_network(env_name + "_transitions")
#calculate accuacy
#Evaluation
#evaluator = Evaluator(env_name, num_eval_episodes)
#evaluator.evaluate(agent)
return agent
if __name__ == '__main__':
train()
|
[
"numpy.float",
"matplotlib.pyplot.ylabel",
"evaluator.Evaluator",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.legend"
] |
[((247, 267), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""bmh"""'], {}), "('bmh')\n", (260, 267), True, 'import matplotlib.pyplot as plt\n'), ((619, 639), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Update"""'], {}), "('Update')\n", (629, 639), True, 'import matplotlib.pyplot as plt\n'), ((648, 666), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (658, 666), True, 'import matplotlib.pyplot as plt\n'), ((675, 705), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower center"""'}), "(loc='lower center')\n", (685, 705), True, 'import matplotlib.pyplot as plt\n'), ((1482, 1497), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (1490, 1497), True, 'import numpy as np\n'), ((3025, 3095), 'evaluator.Evaluator', 'Evaluator', (['env_name', 'num_eval_episodes', 'checkpoint_dir', 'epsilon_greedy'], {}), '(env_name, num_eval_episodes, checkpoint_dir, epsilon_greedy)\n', (3034, 3095), False, 'from evaluator import Evaluator\n'), ((518, 570), 'os.path.join', 'os.path.join', (['checkpoint_dir', "(env_name + '_loss.png')"], {}), "(checkpoint_dir, env_name + '_loss.png')\n", (530, 570), False, 'import os\n'), ((726, 777), 'os.path.join', 'os.path.join', (['checkpoint_dir', "(env_name + 'loss.png')"], {}), "(checkpoint_dir, env_name + 'loss.png')\n", (738, 777), False, 'import os\n')]
|
#
# See top-level LICENSE.rst file for Copyright information
#
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
from ..defs import (task_name_sep, task_state_to_int, task_int_to_state)
from ...util import option_list
from ...io import findfile
from .base import (BaseTask, task_classes)
from desiutil.log import get_logger
import sys,re,os,glob
import numpy as np
# NOTE: only one class in this file should have a name that starts with "Task".
class TaskPSFNight(BaseTask):
"""Class containing the properties of one PSF combined night task.
"""
def __init__(self):
super(TaskPSFNight, self).__init__()
# then put int the specifics of this class
# _cols must have a state
self._type = "psfnight"
self._cols = [
"night",
"band",
"spec",
"state"
]
self._coltypes = [
"integer",
"text",
"integer",
"integer"
]
# _name_fields must also be in _cols
self._name_fields = ["night","band","spec"]
self._name_formats = ["08d","s","d"]
def _paths(self, name):
"""See BaseTask.paths.
"""
props = self.name_split(name)
camera = "{}{}".format(props["band"], props["spec"])
return [ findfile("psfnight", night=props["night"],
camera=camera, groupname=None, nside=None, band=props["band"],
spectrograph=props["spec"]) ]
def _deps(self, name, db, inputs):
"""See BaseTask.deps.
"""
return dict()
def _run_max_procs(self):
# This is a serial task.
return 1
def _run_time(self, name, procs, db):
# Run time on one proc on machine with scale factor == 1.0
return 2.0
def _run_defaults(self):
"""See BaseTask.run_defaults.
"""
return {}
def _option_dict(self, name, opts):
"""Build the full list of options.
This includes appending the filenames and incorporating runtime
options.
"""
from .base import task_classes, task_type
options = OrderedDict()
options["output"] = self.paths(name)[0]
# look for psf for this night on disk
options["input"] = []
props = self.name_split(name)
camera = "{}{}".format(props["band"], props["spec"])
dummy_expid = 99999999
template_input = findfile("psf", night=props["night"], expid=dummy_expid,
camera=camera,
band=props["band"],
spectrograph=props["spec"])
template_input = template_input.replace("{:08d}".format(dummy_expid),"????????")
options["input"] = glob.glob(template_input)
return options
def _option_list(self, name, opts):
"""Build the full list of options.
This includes appending the filenames and incorporating runtime
options.
"""
return option_list(self._option_dict(name,opts))
def _run_cli(self, name, opts, procs, db):
"""See BaseTask.run_cli.
"""
optlist = self._option_list(name, opts)
com = "# command line for psfnight not implemented"
return com
def _run(self, name, opts, comm, db):
"""See BaseTask.run.
"""
from ...scripts import specex
optdict = self._option_dict(name, opts)
specex.mean_psf(optdict["input"], optdict["output"])
return
def getready(self, db, name, cur):
"""Checks whether dependencies are ready"""
log = get_logger()
# look for the state of psf with same night,band,spectro
props = self.name_split(name)
cmd = "select state from psf where night={} and band='{}' and spec={}".format(props["night"],props["band"],props["spec"])
cur.execute(cmd)
states = np.array([ x for (x,) in cur.fetchall() ])
log.debug("states={}".format(states))
# psfnight ready if all psf from the night have been processed, and at least one is done (failures are allowed)
n_done = np.sum(states==task_state_to_int["done"])
n_failed = np.sum(states==task_state_to_int["failed"])
ready = (n_done > 0) & ( (n_done + n_failed) == states.size )
if ready :
self.state_set(db=db,name=name,state="ready",cur=cur)
def postprocessing(self, db, name, cur):
"""For successful runs, postprocessing on DB"""
# run getready for all extraction with same night,band,spec
props = self.name_split(name)
log = get_logger()
tt = "traceshift"
cmd = "select name from {} where night={} and band='{}' and spec={} and state=0".format(tt,props["night"],props["band"],props["spec"])
cur.execute(cmd)
tasks = [ x for (x,) in cur.fetchall() ]
log.debug("checking {}".format(tasks))
for task in tasks :
task_classes[tt].getready( db=db,name=task,cur=cur)
|
[
"desiutil.log.get_logger",
"collections.OrderedDict",
"numpy.sum",
"glob.glob"
] |
[((2223, 2236), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2234, 2236), False, 'from collections import OrderedDict\n'), ((2860, 2885), 'glob.glob', 'glob.glob', (['template_input'], {}), '(template_input)\n', (2869, 2885), False, 'import sys, re, os, glob\n'), ((3726, 3738), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (3736, 3738), False, 'from desiutil.log import get_logger\n'), ((4245, 4288), 'numpy.sum', 'np.sum', (["(states == task_state_to_int['done'])"], {}), "(states == task_state_to_int['done'])\n", (4251, 4288), True, 'import numpy as np\n'), ((4306, 4351), 'numpy.sum', 'np.sum', (["(states == task_state_to_int['failed'])"], {}), "(states == task_state_to_int['failed'])\n", (4312, 4351), True, 'import numpy as np\n'), ((4732, 4744), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (4742, 4744), False, 'from desiutil.log import get_logger\n')]
|
# Load in our dependencies
# Forking from http://matplotlib.org/xkcd/examples/showcase/xkcd.html
from matplotlib import pyplot
import numpy
"""
Comments on PRs about style
20 | --------\
| |
| |
| |
| |
1 | \--\
0 | -------
-----------------------
|
Introduction of `jscs`
Time
"""
def main():
"""Generate and save an image as per the docstring above"""
# Define our style as XKCD
pyplot.xkcd()
# Start a new graph
dpi = 72
fig = pyplot.figure(1, figsize=(600 / dpi, 400 / dpi))
# Add labels and a title
pyplot.xlabel('Time')
pyplot.title('Comments on PRs about style')
# Define our axes and limits
# http://matplotlib.org/xkcd/api/pyplot_api.html#matplotlib.pyplot.subplot
ax = fig.add_subplot(1, 1, 1) # cols, rows, plot number
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
pyplot.xticks([])
pyplot.yticks([0, 20])
ax.set_ylim([-1, 25])
# Hide right side of ticks
# http://stackoverflow.com/questions/9051494/customizing-just-one-side-of-tick-marks-in-matplotlib-using-spines
# http://matplotlib.org/api/axis_api.html
ax.yaxis.set_ticks_position('none')
# Generate 100 nodes for our graph and draw them
# http://wiki.scipy.org/Numpy_Example_List#fill
data = numpy.zeros(100)
data.fill(20)
inflection_point = 50
data[inflection_point:inflection_point+10] = numpy.arange(20, 0, -2)
data[inflection_point+10:] = numpy.zeros(100 - (inflection_point + 10))
pyplot.plot(data)
# Add our annotation
pyplot.annotate(
'Introduction of `jscs`',
xy=(inflection_point, 20), arrowprops=dict(arrowstyle='->'), xytext=(10, 15))
# Save the image
pyplot.savefig('graph.png', dpi=dpi)
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xkcd",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.yticks",
"numpy.zeros",
"matplotlib.pyplot.title",
"numpy.arange"
] |
[((499, 512), 'matplotlib.pyplot.xkcd', 'pyplot.xkcd', ([], {}), '()\n', (510, 512), False, 'from matplotlib import pyplot\n'), ((561, 609), 'matplotlib.pyplot.figure', 'pyplot.figure', (['(1)'], {'figsize': '(600 / dpi, 400 / dpi)'}), '(1, figsize=(600 / dpi, 400 / dpi))\n', (574, 609), False, 'from matplotlib import pyplot\n'), ((644, 665), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Time"""'], {}), "('Time')\n", (657, 665), False, 'from matplotlib import pyplot\n'), ((670, 713), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Comments on PRs about style"""'], {}), "('Comments on PRs about style')\n", (682, 713), False, 'from matplotlib import pyplot\n'), ((972, 989), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['[]'], {}), '([])\n', (985, 989), False, 'from matplotlib import pyplot\n'), ((994, 1016), 'matplotlib.pyplot.yticks', 'pyplot.yticks', (['[0, 20]'], {}), '([0, 20])\n', (1007, 1016), False, 'from matplotlib import pyplot\n'), ((1394, 1410), 'numpy.zeros', 'numpy.zeros', (['(100)'], {}), '(100)\n', (1405, 1410), False, 'import numpy\n'), ((1504, 1527), 'numpy.arange', 'numpy.arange', (['(20)', '(0)', '(-2)'], {}), '(20, 0, -2)\n', (1516, 1527), False, 'import numpy\n'), ((1561, 1603), 'numpy.zeros', 'numpy.zeros', (['(100 - (inflection_point + 10))'], {}), '(100 - (inflection_point + 10))\n', (1572, 1603), False, 'import numpy\n'), ((1608, 1625), 'matplotlib.pyplot.plot', 'pyplot.plot', (['data'], {}), '(data)\n', (1619, 1625), False, 'from matplotlib import pyplot\n'), ((1819, 1855), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['"""graph.png"""'], {'dpi': 'dpi'}), "('graph.png', dpi=dpi)\n", (1833, 1855), False, 'from matplotlib import pyplot\n')]
|
import logging
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from networkx import DiGraph
from torch import Tensor, nn as nn
from torch.autograd.variable import Variable
from binlin.data.ud import index_data
from binlin.model.nn_utils import get_embed_matrix, pad_seq
from binlin.model.syn.sympairs import SymModel
from binlin.model.syn.utils.bintree import BinTreeBase
from binlin.utils.combinatorics import flatten_nested_lists
from binlin.utils.constants import VocabSymbols
logger = logging.getLogger('main')
class SymGraphEncModel(SymModel):
@property
def _data_fields(self):
return ['LEMMA', 'UPOS', 'XPOS', 'DEPREL']
@property
def _num_embedding_feats(self):
# todo: need to avoid hard-coding
# 6 = 2 * 3
# 2 because we consider two nodes at a time,
# 3 because for a node we consider itself + its context
# consisting of its parent and a random child
return 2 * 3 * self._data_fields_num
def _init_weights(self):
self._dim_emb = self.config["embedding_dim"]
self._mat_emb = get_embed_matrix(len(self._id2tok), self._dim_emb, padding_idx=self.PAD_ID)
self._dim_emb_proj_in = self._num_embedding_feats * self._dim_emb
self._dim_emb_proj_out = self._num_embedding_feats * self.config['embedding_proj_dim']
self._mat_emb_proj = nn.Linear(self._dim_emb_proj_in, self._dim_emb_proj_out)
self._dim_dense_out = self.config["dense_dim"]
self._mat_dense = nn.Linear(self._dim_emb_proj_out, self._dim_dense_out)
self._mat_attn = nn.Linear(self._dim_emb, 1)
self._dim_concat_in = self._dim_dense_out + self._dim_emb
self._dim_concat_out = self._dim_dense_out
self._mat_concat = nn.Linear(self._dim_concat_in, self._dim_concat_out)
self._dim_out = 1
self._mat_out = nn.Linear(self._dim_dense_out, self._dim_out)
def forward(self, batch_data: Tuple[Tensor, Tensor, Union[Tensor, None]]) -> Dict:
other_nodes_var, head_child_var, _ = batch_data
# head-child pair is encoded using an MLP
x_head_child = self._mat_emb(head_child_var).view(-1,
self._dim_emb_proj_in) # size(num_node_pairs, self.emb_proj_in)
x_head_child = F.leaky_relu(self._mat_emb_proj(x_head_child)) # size (num_node_pairs, self.emb_proj_out)
x_head_child = F.leaky_relu(self._mat_dense(x_head_child)) # size (num_node_pairs, self.dense1_dim)
# x_head_child = F.leaky_relu(x_head_child) # size (num_node_pairs, self.dense1_dim)
# graph nodes are encoded using embedding lookup --> summing
node_number = other_nodes_var.shape[-1]
x_graph = self._mat_emb(other_nodes_var) # size (max_num_nodes, batch_size, self.emb_dim)
# variant1: sum over all vecs
# x_graph = torch.sum(x_graph, dim=[0], keepdim=True).view(-1, self._dim_emb) # size (batch_size, emb_dim)
# variant2: use attn scores
attn_unnorm_scores = self._mat_attn(x_graph.view(-1, self._dim_emb)) # num_edges x 1
attn_weights = F.leaky_relu(attn_unnorm_scores).view(-1, 1, node_number)
# TODO: find a simpler way to do it w/o squeezing and unsqueezing?
# apply attention weights to the graph vectors to get weighted average
# size (1, dense)
x_graph = torch.bmm(attn_weights, x_graph).squeeze(1) # size: (bsize, emb_size)
# Concat head, child and graph representations
x_combined = torch.cat((x_head_child, x_graph), 1) # size (bs, emb_dim + self.dense1_dim)
# size (batch_size, self.dense1_dim)
x_combined = self._mat_concat(x_combined)
x_combined = F.leaky_relu(x_combined)
x_combined = self._mat_out(x_combined)
logits = torch.sigmoid(x_combined)
return {'logits': logits}
def extract_features(self, bt, new_node_nxid, dg, feats_d):
# pair-level features
head_nxid = bt.nxid
head_deptree_feats = feats_d[head_nxid]
child_deptree_feats = feats_d[new_node_nxid]
x_pair_ids_l = head_deptree_feats + child_deptree_feats
# extracting graph-level features
graph_ids_l = self.extract_graph_level_feats(dg, new_node_nxid, bt, feats_d)
return (graph_ids_l, x_pair_ids_l)
def extract_graph_level_feats(self, dg, new_node_nxid, bt, feats_d):
head_sbl = dg.node[bt.nxid]['sbl']
if head_sbl is None:
head_sbl_feats_l = self._dummy_node_feats_vec
else:
head_sbl_feats_l = flatten_nested_lists([feats_d[ch] for ch in head_sbl])
child_sbl = dg.node[new_node_nxid]['sbl']
if child_sbl is None:
ch_sbl_feats_l = self._dummy_node_feats_vec
else:
ch_sbl_feats_l = flatten_nested_lists([feats_d[ch] for ch in child_sbl])
child_children = dg[new_node_nxid]
if len(child_children) == 0:
ch_ch_feats_l = self._dummy_node_feats_vec
else:
ch_ch_feats_l = flatten_nested_lists([feats_d[ch] for ch in child_children])
graph_ids_l = head_sbl_feats_l + ch_sbl_feats_l + ch_ch_feats_l
return graph_ids_l
def init_data_containers(self):
return {'X': [],
'Y': [],
'Xg': []}
def add_xy_pairs(self, data_containers: Dict, y: int, model_inputs: Tuple[List[int], List[int]]):
x_graph, x_head_child = model_inputs
data_containers['X'].append(x_head_child)
data_containers['Xg'].append(x_graph)
data_containers['Y'].append(y)
def _batchify(self, data_containers: Dict, batch_size: int):
# sort according to the lemma length
sorted_data = sorted(zip(*(data_containers['Xg'],
data_containers['X'],
data_containers['Y'])), key=lambda p: len(p[0]), reverse=True)
data_size = len(sorted_data)
num_batches = data_size // batch_size
data_indices = index_data(data_size, mode='no_shuffling')
batch_pairs = []
for bi in range(num_batches + 1): # including the last (smaller) batch
batch_x_pair_feats = []
batch_x_graph_feats = []
batch_x_lens = []
batch_y = []
curr_batch_indices = data_indices[bi * batch_size: (bi + 1) * batch_size]
if len(curr_batch_indices) == 0:
break
for idx in curr_batch_indices:
graph_f_ids, node_pairs_f_ids, y_ids = sorted_data[idx]
batch_x_graph_feats.append(graph_f_ids)
batch_x_lens.append(len(graph_f_ids))
batch_x_pair_feats.append(node_pairs_f_ids)
batch_y.append(y_ids)
max_graph_f_len = max(batch_x_lens)
batch_x_graph_feats_padded = [pad_seq(x, max_graph_f_len, pad_id=self.PAD_ID) for x in batch_x_graph_feats]
# size: (num_nodes, batch_size)
batch_x_graph_feats_var = Variable(torch.LongTensor(batch_x_graph_feats_padded)).to(self.device)
# size: (batch_size, 2 * num_node_feats)
batch_x_pair_feats_var = Variable(torch.LongTensor(batch_x_pair_feats)).to(self.device)
# size: (batch_size, 1)
batch_y_var = Variable(torch.FloatTensor(batch_y)).unsqueeze(1).to(self.device)
batch_pairs.append((batch_x_graph_feats_var, batch_x_pair_feats_var, batch_y_var))
return batch_pairs
def make_decision(self,
bt: BinTreeBase,
new_node_nxid: str,
dg: DiGraph,
feats_d: Dict, *other_inputs) -> int:
x_graph_ids_l, x_ids_l = self.extract_features(bt, new_node_nxid, dg, feats_d)
x_ids_np = np.asarray(x_ids_l)
x_graph_ids_l = np.asarray([x_graph_ids_l])
outputs = self.__call__(
(
torch.from_numpy(x_graph_ids_l).to(self.device),
torch.from_numpy(x_ids_np).to(self.device),
None)
)
logit_val = outputs['logits'].cpu().data[0].numpy()
if logit_val >= 0.5:
decision = VocabSymbols.RIGHT
else:
decision = VocabSymbols.LEFT
return decision
component = SymGraphEncModel
|
[
"logging.getLogger",
"torch.nn.functional.leaky_relu",
"binlin.utils.combinatorics.flatten_nested_lists",
"torch.bmm",
"torch.LongTensor",
"torch.sigmoid",
"numpy.asarray",
"torch.from_numpy",
"binlin.model.nn_utils.pad_seq",
"torch.nn.Linear",
"binlin.data.ud.index_data",
"torch.FloatTensor",
"torch.cat"
] |
[((552, 577), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (569, 577), False, 'import logging\n'), ((1419, 1475), 'torch.nn.Linear', 'nn.Linear', (['self._dim_emb_proj_in', 'self._dim_emb_proj_out'], {}), '(self._dim_emb_proj_in, self._dim_emb_proj_out)\n', (1428, 1475), True, 'from torch import Tensor, nn as nn\n'), ((1558, 1612), 'torch.nn.Linear', 'nn.Linear', (['self._dim_emb_proj_out', 'self._dim_dense_out'], {}), '(self._dim_emb_proj_out, self._dim_dense_out)\n', (1567, 1612), True, 'from torch import Tensor, nn as nn\n'), ((1639, 1666), 'torch.nn.Linear', 'nn.Linear', (['self._dim_emb', '(1)'], {}), '(self._dim_emb, 1)\n', (1648, 1666), True, 'from torch import Tensor, nn as nn\n'), ((1812, 1864), 'torch.nn.Linear', 'nn.Linear', (['self._dim_concat_in', 'self._dim_concat_out'], {}), '(self._dim_concat_in, self._dim_concat_out)\n', (1821, 1864), True, 'from torch import Tensor, nn as nn\n'), ((1916, 1961), 'torch.nn.Linear', 'nn.Linear', (['self._dim_dense_out', 'self._dim_out'], {}), '(self._dim_dense_out, self._dim_out)\n', (1925, 1961), True, 'from torch import Tensor, nn as nn\n'), ((3588, 3625), 'torch.cat', 'torch.cat', (['(x_head_child, x_graph)', '(1)'], {}), '((x_head_child, x_graph), 1)\n', (3597, 3625), False, 'import torch\n'), ((3783, 3807), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x_combined'], {}), '(x_combined)\n', (3795, 3807), True, 'import torch.nn.functional as F\n'), ((3872, 3897), 'torch.sigmoid', 'torch.sigmoid', (['x_combined'], {}), '(x_combined)\n', (3885, 3897), False, 'import torch\n'), ((6100, 6142), 'binlin.data.ud.index_data', 'index_data', (['data_size'], {'mode': '"""no_shuffling"""'}), "(data_size, mode='no_shuffling')\n", (6110, 6142), False, 'from binlin.data.ud import index_data\n'), ((7896, 7915), 'numpy.asarray', 'np.asarray', (['x_ids_l'], {}), '(x_ids_l)\n', (7906, 7915), True, 'import numpy as np\n'), ((7940, 7967), 'numpy.asarray', 'np.asarray', (['[x_graph_ids_l]'], {}), '([x_graph_ids_l])\n', (7950, 7967), True, 'import numpy as np\n'), ((4643, 4697), 'binlin.utils.combinatorics.flatten_nested_lists', 'flatten_nested_lists', (['[feats_d[ch] for ch in head_sbl]'], {}), '([feats_d[ch] for ch in head_sbl])\n', (4663, 4697), False, 'from binlin.utils.combinatorics import flatten_nested_lists\n'), ((4878, 4933), 'binlin.utils.combinatorics.flatten_nested_lists', 'flatten_nested_lists', (['[feats_d[ch] for ch in child_sbl]'], {}), '([feats_d[ch] for ch in child_sbl])\n', (4898, 4933), False, 'from binlin.utils.combinatorics import flatten_nested_lists\n'), ((5112, 5172), 'binlin.utils.combinatorics.flatten_nested_lists', 'flatten_nested_lists', (['[feats_d[ch] for ch in child_children]'], {}), '([feats_d[ch] for ch in child_children])\n', (5132, 5172), False, 'from binlin.utils.combinatorics import flatten_nested_lists\n'), ((3183, 3215), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['attn_unnorm_scores'], {}), '(attn_unnorm_scores)\n', (3195, 3215), True, 'import torch.nn.functional as F\n'), ((3440, 3472), 'torch.bmm', 'torch.bmm', (['attn_weights', 'x_graph'], {}), '(attn_weights, x_graph)\n', (3449, 3472), False, 'import torch\n'), ((6948, 6995), 'binlin.model.nn_utils.pad_seq', 'pad_seq', (['x', 'max_graph_f_len'], {'pad_id': 'self.PAD_ID'}), '(x, max_graph_f_len, pad_id=self.PAD_ID)\n', (6955, 6995), False, 'from binlin.model.nn_utils import get_embed_matrix, pad_seq\n'), ((7118, 7162), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_graph_feats_padded'], {}), '(batch_x_graph_feats_padded)\n', (7134, 7162), False, 'import torch\n'), ((7279, 7315), 'torch.LongTensor', 'torch.LongTensor', (['batch_x_pair_feats'], {}), '(batch_x_pair_feats)\n', (7295, 7315), False, 'import torch\n'), ((8031, 8062), 'torch.from_numpy', 'torch.from_numpy', (['x_graph_ids_l'], {}), '(x_graph_ids_l)\n', (8047, 8062), False, 'import torch\n'), ((8096, 8122), 'torch.from_numpy', 'torch.from_numpy', (['x_ids_np'], {}), '(x_ids_np)\n', (8112, 8122), False, 'import torch\n'), ((7404, 7430), 'torch.FloatTensor', 'torch.FloatTensor', (['batch_y'], {}), '(batch_y)\n', (7421, 7430), False, 'import torch\n')]
|
## This script will define the functions used in the locate lane lines pipeline
## The end of this script will process a video file to locate and plot the lane lines
import pickle
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
import sys
## Unpickle Required Data
cam_mtx = pickle.load(open("camera_matrix.p","rb"))
dist_coef = pickle.load(open("camera_distortion_coefficients.p","rb"))
M = pickle.load(open("M.p","rb"))
Minv = pickle.load(open("Minv.p","rb"))
## Undistort Function
def undistort(img_RGB_in):
# Input RGB distorted, Output RGB undistorted
img_out = cv2.undistort(img_RGB_in, cam_mtx, dist_coef, None, cam_mtx)
return(img_out)
# Sample undistort image
if (False):
img = mpimg.imread('camera_cal/calibration1.jpg')
dst_img = undistort(img)
plt.figure(0)
plt.imshow(img)
plt.title('Original Image')
plt.savefig('output_images/distorted_image.png')
plt.figure(1)
plt.imshow(dst_img)
plt.title('Undistorted Image')
plt.savefig('output_images/undistorted_image.png')
plt.show()
# Color Threshold Function
def color_thresh(img_RGB_in,RGB_out):
# Input RGB undistorted, Output Binary (or RGB for video)
# Convert image to HSV color space
img_HSV = cv2.cvtColor(img_RGB_in, cv2.COLOR_RGB2HSV)
# Extract S layer
H_layer = img_HSV[:,:,0]*2
S_layer = img_HSV[:,:,1]/255*100
V_layer = img_HSV[:,:,2]/255*100
# Apply threshold to S layer to identify white and yellow lane lines
H_Yellow = (40,70)
S_Yellow = (30,100)
V_Yellow = (30,100)
H_White = (0,50)
S_White = (0,10)
V_White = (75,100)
img_out = np.zeros_like(H_layer)
img_out[(((H_layer >= H_Yellow[0]) & (H_layer <= H_Yellow[1])) \
& ((S_layer >= S_Yellow[0]) & (S_layer <= S_Yellow[1])) \
& ((V_layer >= V_Yellow[0]) & (V_layer <= V_Yellow[1]))) \
| (((H_layer >= H_White[0]) & (H_layer <= H_White[1])) \
& ((S_layer >= S_White[0]) & (S_layer <= S_White[1])) \
& ((V_layer >= V_White[0]) & (V_layer <= V_White[1])))] = 1
if (RGB_out):
black_out_idxs = np.where(img_out == 0)
img_out = np.copy(img_RGB_in)
img_out[black_out_idxs[0],black_out_idxs[1],:] = 0
return(img_out)
# Sample color threshold image
if (False):
img = mpimg.imread('test_images/test5.jpg')
thrsh_img = color_thresh(img,RGB_out=True)
plt.figure(2)
plt.imshow(img)
plt.title('Original Image')
plt.savefig('output_images/pre_color_thresh.png')
plt.figure(3)
plt.imshow(thrsh_img, cmap='gray')
plt.title('Color Threshold')
plt.savefig('output_images/post_color_thresh.png')
plt.show()
## Perspective Transform to Top-Down View Function
def top_down_xfrm(img_RGB_in,frwd):
# Input RGB undistorted, Output RGB top-down
# frwd is bool that specifies if normal transform is requested (true) or inverse (false)
img_size = (img_RGB_in.shape[1], img_RGB_in.shape[0])
if (frwd):
Xfrm = M
else:
Xfrm = Minv
img_RGB_out = cv2.warpPerspective(img_RGB_in, Xfrm, img_size, flags=cv2.INTER_LINEAR)
return(img_RGB_out)
# Sample top-down perspective transform on image
if (False):
img = mpimg.imread('test_images/test6.jpg')
warped = top_down_xfrm(img,frwd=True)
plt.figure(4)
plt.imshow(img)
plt.title('Original Image')
plt.savefig('output_images/pre_top_down.png')
plt.figure(5)
plt.imshow(warped)
plt.title('Top Down View Warp')
plt.savefig('output_images/post_top_down.png')
plt.show()
## Gradient Threshold Function
def grad_thresh(img_RGB_in,RGB_out):
# Input RGB top-down, Output Binary (or RGB for video)
# RGB_out boolean can be used for video testing
#Apply gradient threshold in x direction
img_GRAY = cv2.cvtColor(img_RGB_in, cv2.COLOR_RGB2GRAY)
grad_thresh = (10,100)
abs_sobel = np.absolute(cv2.Sobel(img_GRAY, cv2.CV_64F, 1, 0))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
img_out = np.zeros_like(img_GRAY, dtype=np.uint8)
img_out[(scaled_sobel >= grad_thresh[0]) & (scaled_sobel <= grad_thresh[1])] = 1
if (RGB_out):
black_out_idxs = np.where(img_out == 0)
img_out = np.copy(img_RGB_in)
img_out[black_out_idxs[0],black_out_idxs[1],:] = 0
# print(out.shape)
return(img_out)
# Sample gradient threshold image
if (False):
img = mpimg.imread('test_images/test6.jpg')
img = top_down_xfrm(img,frwd=True)
thrsh_img = grad_thresh(img,RGB_out=False)
plt.figure(6)
plt.imshow(img)
plt.title('Original Top Down Transformed Image')
plt.savefig('output_images/pre_grad_thresh.png')
plt.figure(7)
plt.imshow(thrsh_img, cmap='gray')
plt.title('Gradient Threshold')
plt.savefig('output_images/post_grad_thresh.png')
plt.show()
# Class to store and calculate both lane line parameters
class LaneLines():
def __init__(self,img_RGB_in,img_BIN_in):
frame_height = img_RGB_in.shape[0]
frame_width = img_RGB_in.shape[1]
# CONSTANTS
# Frame height
self.frame_height = frame_height
# Frame width
self.frame_width = frame_width
self.midpoint_width = np.int(frame_width//2)
# y values
self.ploty = np.linspace(0, frame_height-1, frame_height)
# Polynomial fit dimension
self.poly_fit_dim = 2
# FRAME
self.Frame = img_RGB_in
# Binary image for current frame
self.img_BIN_in = img_BIN_in
# Histogram for current frame
self.histogram = None
# RGB image for output of current frame
self.img_RGB_out = img_RGB_in
# Current number of consecutive failed frames
self.num_failed_frame_curr = 0
# Number of frames processed
self.frame_num = 0
# TEXT
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.Ofst_Text_pos = (20,500)
self.Rad_L_Text_pos = (20,550)
self.Rad_R_Text_pos = (20,600)
self.fontScale = 1
self.fontColor = (255,255,255)
self.lineType = 2
# HYPERPARAMETERS
# Choose the number of sliding windows
self.nwindows = 9
# Set the width of the windows +/- margin
self.margin_hist = 100
# Set the width of the windows +/- margin
self.margin_poly = 100
# Set minimum number of pixels found to re-center window
self.minpix = 50
# Number of windows that must contain minpix number of pixels for lane line to be considered valid
self.nwindow_fnd = 5
# Number of pixels that must be found for poly search method to be considered valid
self.minpix_poly = 300
# Set height of windows - based on nwindows above and image shape
self.window_height = np.int(frame_height//self.nwindows)
# Define conversions in x and y from pixels space to meters
self.x_width_pix = 700 #pixel width of lane
self.y_height_pix = 720 #pixel height of lane (frame height)
self.xm_per_pix = 3.7/self.x_width_pix # meters per pixel in x dimension
self.ym_per_pix = 30/self.y_height_pix # meters per pixel in y dimension
# Number of frames that failed to find lane lines before reset
self.num_failed_frame_alwd = 25
# Number of frames for rolling average filter
self.filt_size = 25
# LINE PARAMETERS
# was the left line detected in the current frame
self.detected_L = False
self.detected_R = False
# x values of the last n fits of the left line
self.x_fit_all_L = np.empty((0,self.ploty.size), dtype='float')
self.x_fit_all_R = np.empty((0,self.ploty.size), dtype='float')
#average x values of the fitted left line over the last n iterations
self.x_fit_best_L = np.zeros((self.ploty.size), dtype='float')
self.x_fit_best_R = np.zeros((self.ploty.size), dtype='float')
#polynomial coefficients for the most recent fit
self.coef_fit_current_L = np.zeros((self.poly_fit_dim+1), dtype='float')
self.coef_fit_current_R = np.zeros((self.poly_fit_dim+1), dtype='float')
#polynomial coefficients for the previous n iterations
self.coef_fit_all_L = np.empty((0,self.poly_fit_dim+1), dtype='float')
self.coef_fit_all_R = np.empty((0,self.poly_fit_dim+1), dtype='float')
#polynomial coefficients averaged over the last n iterations
self.coef_fit_best_L = np.zeros((self.poly_fit_dim+1), dtype='float')
self.coef_fit_best_R = np.zeros((self.poly_fit_dim+1), dtype='float')
#radius of curvature of the line in [m]
self.radius_of_curvature_L = 0
self.radius_of_curvature_R = 0
#distance in meters of vehicle center from the line
self.center_line_offst = 0
#difference in fit coefficients between last and new fits
# self.diffs = np.array([0,0,0], dtype='float')
return
def update_frame(self,img_RGB_in):
'''
Stores the new frame in memory
'''
self.Frame = img_RGB_in
self.histogram = None
self.img_RGB_out = img_RGB_in
return
def hist(self):
'''
Calculate histogram of points
'''
#Grab only the bottom half of the image
#Lane lines are likely to be mostly vertical nearest to the car
#Sum across image pixels vertically - make sure to set an `axis`
#i.e. the highest areas of vertical lines should be larger values
self.histogram = np.sum(self.img_BIN_in[self.img_BIN_in.shape[0]//2:,:], axis=0)
return
def find_lane_pixels_hist(self):
'''
Find lane pixels with histogram method
'''
# Reset previous rolling average queues
self.x_fit_all_L = np.empty((0,self.ploty.size), dtype='float')
self.x_fit_all_R = np.empty((0,self.ploty.size), dtype='float')
self.coef_fit_all_L = np.empty((0,self.poly_fit_dim+1), dtype='float')
self.coef_fit_all_R = np.empty((0,self.poly_fit_dim+1), dtype='float')
# Take a histogram of the bottom half of the image
self.hist()
# Create an output image to draw on and visualize the result
self.img_RGB_out = np.dstack((self.img_BIN_in, self.img_BIN_in, self.img_BIN_in))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint_height = np.int(self.histogram.shape[0]//2)
leftx_base = np.argmax(self.histogram[:midpoint_height])
rightx_base = np.argmax(self.histogram[midpoint_height:]) + midpoint_height
# Identify the x and y positions of all nonzero pixels in the image
nonzero = self.img_BIN_in.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Counter of valid windows found
cnt_wdw_fnd_L = 0
cnt_wdw_fnd_R = 0
#Step through the windows one by one
for window in range(self.nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = self.img_BIN_in.shape[0] - (window+1)*self.window_height
win_y_high = self.img_BIN_in.shape[0] - window*self.window_height
win_xleft_low = leftx_current - self.margin_hist
win_xleft_high = leftx_current + self.margin_hist
win_xright_low = rightx_current - self.margin_hist
win_xright_high = rightx_current + self.margin_hist
# Draw the windows on the visualization image
cv2.rectangle(self.img_RGB_out,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(self.img_RGB_out,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, re-center next window on their mean position (otherwise keep previous window x position)
if len(good_left_inds) > self.minpix:
cnt_wdw_fnd_L = cnt_wdw_fnd_L + 1
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > self.minpix:
cnt_wdw_fnd_R = cnt_wdw_fnd_R + 1
self.detected_R = True
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Create numpy arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Determine if valid number of windows was found with pixels
self.detected_L = (self.frame_num == 0) or (cnt_wdw_fnd_L >= self.nwindow_fnd)
self.detected_R = (self.frame_num == 0) or (cnt_wdw_fnd_R >= self.nwindow_fnd)
# Color in left and right line pixels
self.img_RGB_out[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
self.img_RGB_out[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty
def fit_polynomial(self,x,y):
# Fit a second order polynomial to data using `np.polyfit`
# coef_fit = [A, B, C] of y = A*x^2 + B*x + C
coef_fit = np.polyfit(y, x, self.poly_fit_dim)
# Generate x and y values for plotting
x_fit = coef_fit[0]*self.ploty**2 + coef_fit[1]*self.ploty + coef_fit[2]
# Limit x_fit by size of frame
x_fit = np.minimum(np.maximum(x_fit,0),self.frame_width-1)
# Visualization
# Colors in the activated pixels
self.img_RGB_out[y, x] = [255, 0, 0]
# Colors in the poly line
self.img_RGB_out[self.ploty.astype(int), x_fit.astype(int)] = [255, 255, 0]
return coef_fit, x_fit
def find_lane_pixels_poly(self):
'''
Search around polynomial for new lane pixels
'''
# Grab activated pixels
nonzero = self.img_BIN_in.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Set the area of search based on activated x-values
# within the +/- margin of our polynomial function (from previous frame)
left_lane_inds = ((nonzerox > (self.coef_fit_current_L[0]*(nonzeroy**2) + self.coef_fit_current_L[1]*nonzeroy + self.coef_fit_current_L[2] - self.margin_poly)) & (nonzerox < (self.coef_fit_current_L[0]*(nonzeroy**2) + self.coef_fit_current_L[1]*nonzeroy + self.coef_fit_current_L[2] + self.margin_poly)))
right_lane_inds = ((nonzerox > (self.coef_fit_current_R[0]*(nonzeroy**2) + self.coef_fit_current_R[1]*nonzeroy + self.coef_fit_current_R[2] - self.margin_poly)) & (nonzerox < (self.coef_fit_current_R[0]*(nonzeroy**2) + self.coef_fit_current_R[1]*nonzeroy + self.coef_fit_current_R[2] + self.margin_poly)))
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Determine pixel find validity
self.detected_L = len(leftx) > self.minpix_poly
self.detected_R = len(rightx) > self.minpix_poly
if (self.detected_L and self.detected_R):
# Prepare output RGB image
self.img_RGB_out = np.dstack((self.img_BIN_in, self.img_BIN_in, self.img_BIN_in))
# Visualization
# Create an image to draw on and an image to show the selection window
# out_img = np.dstack((img_bin, img_bin, img_bin))*255
window_img = np.zeros_like(self.img_RGB_out)
# Color in left and right line pixels
self.img_RGB_out[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
self.img_RGB_out[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
coef_tmp_L, x_fit_L = self.fit_polynomial(leftx,lefty)
coef_tmp_R, x_fit_R = self.fit_polynomial(rightx,righty)
left_line_window1 = np.array([np.transpose(np.vstack([x_fit_L-self.margin_poly, self.ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([x_fit_L+self.margin_poly, self.ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([x_fit_R-self.margin_poly, self.ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([x_fit_R+self.margin_poly, self.ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
self.img_RGB_out = cv2.addWeighted(self.img_RGB_out, 1, window_img, 0.3, 0)
# Plot the polynomial lines onto the image
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# End visualization steps
return leftx, lefty, rightx, righty
def calc_best(self):
'''
Perform rolling average on polynomials to determine best fit.
'''
# Reset best
self.coef_fit_best_L = np.zeros((self.poly_fit_dim+1), dtype='float')
self.coef_fit_best_R = np.zeros((self.poly_fit_dim+1), dtype='float')
self.x_fit_best_L = np.zeros((self.ploty.size), dtype='float')
self.x_fit_best_R = np.zeros((self.ploty.size), dtype='float')
# Check if size of queue is larger than filter size
if (self.x_fit_all_L.shape[0] > self.filt_size):
self.x_fit_all_L = np.delete(self.x_fit_all_L,(0),axis=0)
self.x_fit_all_R = np.delete(self.x_fit_all_R,(0),axis=0)
self.coef_fit_all_L = np.delete(self.coef_fit_all_L,(0),axis=0)
self.coef_fit_all_R = np.delete(self.coef_fit_all_R,(0),axis=0)
# Loop through and compute average
n = self.x_fit_all_L.shape[0]
for row in range(n):
for col_x_fit in range(self.x_fit_all_L.shape[1]):
self.x_fit_best_L[col_x_fit] = self.x_fit_best_L[col_x_fit] + self.x_fit_all_L[row,col_x_fit]
self.x_fit_best_R[col_x_fit] = self.x_fit_best_R[col_x_fit] + self.x_fit_all_R[row,col_x_fit]
for col_coef_fit in range(self.coef_fit_all_L.shape[1]):
self.coef_fit_best_L[col_coef_fit] = self.coef_fit_best_L[col_coef_fit] + self.coef_fit_all_L[row,col_coef_fit]
self.coef_fit_best_R[col_coef_fit] = self.coef_fit_best_R[col_coef_fit] + self.coef_fit_all_R[row,col_coef_fit]
self.x_fit_best_L = self.x_fit_best_L/n
self.x_fit_best_R = self.x_fit_best_R/n
self.coef_fit_best_L = self.coef_fit_best_L/n
self.coef_fit_best_R = self.coef_fit_best_R/n
return
def calc_rad_real(self):
'''
Calculates the radius of polynomial functions in meters.
'''
# Convert parabola coefficients into pixels
A_L = self.xm_per_pix / (self.ym_per_pix**2) * self.coef_fit_best_L[0]
B_L = self.xm_per_pix / (self.ym_per_pix) * self.coef_fit_best_L[1]
A_R = self.xm_per_pix / (self.ym_per_pix**2) * self.coef_fit_best_R[0]
B_R = self.xm_per_pix / (self.ym_per_pix) * self.coef_fit_best_R[1]
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = (self.frame_height - 1)*self.ym_per_pix
# Calculation of R_curve (radius of curvature)
self.radius_of_curvature_L = ((1 + (2*A_L*y_eval + B_L)**2)**1.5) / np.absolute(2*A_L)
self.radius_of_curvature_R = ((1 + (2*A_R*y_eval + B_R)**2)**1.5) / np.absolute(2*A_R)
return
def calc_offset(self):
'''
Calculates the offset between vehicle and center of lane
'''
self.center_line_offst = abs(self.midpoint_width - (self.x_fit_best_L[-1] + self.x_fit_best_R[-1])/2) * self.xm_per_pix
return
def find_lane_lines(self):
'''
Find lane lines with an appropriate method
'''
## Find lane pixels
# If left or right detection from previous loop is false: Use histogram method
if (not(self.detected_L)) or (not(self.detected_R)):
print("Histogram search method used.")
# Call histogram method to find pixel locations of lanes and determine current frame detection validity
leftx, lefty, rightx, righty = self.find_lane_pixels_hist()
else:
print("Polynomial search method used")
# Call poly search method to find pixel locations of lanes and determine current frame detection validity
leftx, lefty, rightx, righty = self.find_lane_pixels_poly()
if (not(self.detected_L)) or (not(self.detected_R)):
print("Polynomial search method failed. Histogram search method used.")
# Neither lane was found, must use histogram method
leftx, lefty, rightx, righty = self.find_lane_pixels_hist()
## Check if both lane lines were found
if (self.detected_L and self.detected_R):
# Reset failed counter
self.num_failed_frame_curr = 0
# Fit new polynomials for both lanes
self.coef_fit_current_L, x_fit_L = self.fit_polynomial(leftx,lefty)
self.coef_fit_current_R, x_fit_R = self.fit_polynomial(rightx,righty)
# Append x_fit to list
self.x_fit_all_L = np.vstack((self.x_fit_all_L, x_fit_L))
self.x_fit_all_R = np.vstack((self.x_fit_all_R, x_fit_R))
# Append coefficients to list
self.coef_fit_all_L = np.vstack((self.coef_fit_all_L, self.coef_fit_current_L))
self.coef_fit_all_R = np.vstack((self.coef_fit_all_R, self.coef_fit_current_R))
# Calculate rolling average
self.calc_best()
else:
# Increment failed counter
self.num_failed_frame_curr = self.num_failed_frame_curr + 1
print("Number of failed frames: " + str(self.num_failed_frame_curr))
# Do not compute new polynomial, use previous best
# Check if number of consecutive failed frames has exceed max
if (self.num_failed_frame_curr > self.num_failed_frame_alwd):
print("Number of consecutive failed frames exceeded.")
sys.exit()
# Calculate radius of curvature
self.calc_rad_real()
# Calculate center line offset
self.calc_offset()
return
def draw_frame(self,img_RGB_in):
'''
Draws the frame with desired polynomials in original image perspective
'''
print("\n")
#print("Processing Frame # " + str(self.frame_num))
# Store new frame
self.update_frame(img_RGB_in)
# Calculate binary image of color and gradient thresholds
self.img_BIN_in = grad_thresh(top_down_xfrm(color_thresh(undistort(img_RGB_in),RGB_out=True),frwd=True),RGB_out=False)
# Create an image to draw the lines on
warp_zero = np.zeros_like(self.img_BIN_in).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Find lane lines
self.find_lane_lines()
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([self.x_fit_best_L, self.ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([self.x_fit_best_R, self.ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img_RGB_in.shape[1], img_RGB_in.shape[0]))
# Combine the result with the original image
self.Frame = cv2.addWeighted(img_RGB_in, 1, newwarp, 0.3, 0)
# Draw text on image
cv2.putText(self.Frame,"Lane Center Offset [m]: " + str(round(self.center_line_offst,2)),self.Ofst_Text_pos,self.font,self.fontScale,self.fontColor,self.lineType)
cv2.putText(self.Frame,"Radius Left [m]: " + str(round(self.radius_of_curvature_L,0)),self.Rad_L_Text_pos,self.font,self.fontScale,self.fontColor,self.lineType)
cv2.putText(self.Frame,"Radius Right [m]: " + str(round(self.radius_of_curvature_R,0)),self.Rad_R_Text_pos,self.font,self.fontScale,self.fontColor,self.lineType)
self.frame_num = self.frame_num + 1
#print("Left Radius: " + str(self.radius_of_curvature_L))
#print("Right Radius: " + str(self.radius_of_curvature_R))
#print("Lane Center Offset: " + str(lane_lines.center_line_offst))
#return(self.img_RGB_out)
return(self.Frame)
# Sample histogram
if (False):
img = mpimg.imread('test_images/test6.jpg')
img_BIN_in = grad_thresh(top_down_xfrm(color_thresh(undistort(img),RGB_out=True),frwd=True),RGB_out=False);
lane_lines = LaneLines(img,img_BIN_in)
lane_lines.hist()
histogram = lane_lines.histogram
plt.figure(7)
plt.imshow(img)
plt.title('Original Image')
plt.savefig('output_images/original_histogram.png')
plt.figure(8)
plt.imshow(img_BIN_in, cmap='gray')
plt.title('Original Binary Image')
plt.savefig('output_images/original_bin_histogram.png')
plt.figure(9)
plt.plot(histogram)
plt.title('Histogram')
plt.savefig('output_images/histogram.png')
plt.show()
# Sample polyfit with histogram search
if (False):
img = mpimg.imread('test_images/test6.jpg')
plt.figure(10)
plt.imshow(img)
plt.title('Original Image')
img_BIN_in = grad_thresh(top_down_xfrm(color_thresh(undistort(img),RGB_out=True),frwd=True),RGB_out=False)
lane_lines = LaneLines(img,img_BIN_in)
# Search for lane lines using histogram method
leftx, lefty, rightx, righty = lane_lines.find_lane_pixels_hist()
# Fit new polynomials for both lanes
lane_lines.coef_fit_current_L, x_fit_L = lane_lines.fit_polynomial(leftx,lefty)
lane_lines.coef_fit_current_R, x_fit_R = lane_lines.fit_polynomial(rightx,righty)
print("Current Left Coefficients: " + str(lane_lines.coef_fit_current_L))
print("Current Right Coefficients: " + str(lane_lines.coef_fit_current_R))
plt.figure(11)
plt.imshow(lane_lines.img_RGB_out)
plt.title('2nd Order Polynomial Fit - Histogram Search Method')
plt.savefig('output_images/poly_hist.png')
# Sample search around poly
if (False):
# Append x_fit to list
lane_lines.x_fit_all_L = np.vstack((lane_lines.x_fit_all_L, x_fit_L))
lane_lines.x_fit_all_R = np.vstack((lane_lines.x_fit_all_R, x_fit_R))
# Append coefficients to list
lane_lines.coef_fit_all_L = np.vstack((lane_lines.coef_fit_all_L, lane_lines.coef_fit_current_L))
lane_lines.coef_fit_all_R = np.vstack((lane_lines.coef_fit_all_R, lane_lines.coef_fit_current_R))
print("All Left Coefficients: " + str(lane_lines.coef_fit_all_L))
print("All Right Coefficients: " + str(lane_lines.coef_fit_all_R))
# Calculate rolling average
lane_lines.calc_best()
print("Best Left Coefficients: " + str(lane_lines.coef_fit_best_L))
print("Best Right Coefficients: " + str(lane_lines.coef_fit_best_R))
# Calculate real radius of curvature
lane_lines.calc_rad_real()
print("Left Radius: " + str(lane_lines.radius_of_curvature_L))
print("Right Radius: " + str(lane_lines.radius_of_curvature_R))
lane_lines.calc_offset()
print("Center Lane Offset: " + str(lane_lines.center_line_offst))
# Search for lane lines around previous best polynomial
leftx, lefty, rightx, righty = lane_lines.find_lane_pixels_poly()
# Fit new polynomials for both lanes
lane_lines.coef_fit_current_L, x_fit_L = lane_lines.fit_polynomial(leftx,lefty)
lane_lines.coef_fit_current_R, x_fit_R = lane_lines.fit_polynomial(rightx,righty)
plt.figure(12)
plt.imshow(lane_lines.img_RGB_out)
plt.title('2nd Order Polynomial Fit - Polynomial Search Method')
plt.savefig('output_images/poly_poly.png')
plt.show()
# Test full pipeline
if (True):
img = mpimg.imread('test_images/test6.jpg')
lane_lines = LaneLines(img,img)
plt.figure(13)
plt.imshow(img)
plt.title('Original Image')
plt.figure(14)
plt.imshow(lane_lines.draw_frame(img))
plt.title('Found Lines')
plt.savefig('output_images/full_pipeline.png')
plt.show()
## Process video
if (False):
img = mpimg.imread('test_images/test6.jpg')
lane_lines = LaneLines(img,img)
video_output = 'output_videos/challenge_video_processed.mp4'
#video_output = 'output_videos/project_video_processed.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip = VideoFileClip("test_videos/challenge_video.mp4")
video_clip = clip.fl_image(lane_lines.draw_frame) #NOTE: this function expects color images!!
video_clip.write_videofile(video_output, audio=False)
|
[
"cv2.rectangle",
"numpy.polyfit",
"numpy.hstack",
"matplotlib.image.imread",
"numpy.array",
"cv2.warpPerspective",
"sys.exit",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.where",
"numpy.delete",
"matplotlib.pyplot.plot",
"cv2.undistort",
"numpy.max",
"cv2.addWeighted",
"numpy.linspace",
"numpy.empty",
"numpy.vstack",
"numpy.concatenate",
"numpy.maximum",
"moviepy.editor.VideoFileClip",
"matplotlib.pyplot.savefig",
"numpy.argmax",
"cv2.cvtColor",
"matplotlib.pyplot.title",
"numpy.int_",
"numpy.int",
"matplotlib.pyplot.show",
"numpy.copy",
"numpy.dstack",
"numpy.absolute",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.zeros_like",
"cv2.Sobel"
] |
[((666, 726), 'cv2.undistort', 'cv2.undistort', (['img_RGB_in', 'cam_mtx', 'dist_coef', 'None', 'cam_mtx'], {}), '(img_RGB_in, cam_mtx, dist_coef, None, cam_mtx)\n', (679, 726), False, 'import cv2\n'), ((795, 838), 'matplotlib.image.imread', 'mpimg.imread', (['"""camera_cal/calibration1.jpg"""'], {}), "('camera_cal/calibration1.jpg')\n", (807, 838), True, 'import matplotlib.image as mpimg\n'), ((872, 885), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (882, 885), True, 'import matplotlib.pyplot as plt\n'), ((890, 905), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (900, 905), True, 'import matplotlib.pyplot as plt\n'), ((910, 937), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Image"""'], {}), "('Original Image')\n", (919, 937), True, 'import matplotlib.pyplot as plt\n'), ((942, 990), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/distorted_image.png"""'], {}), "('output_images/distorted_image.png')\n", (953, 990), True, 'import matplotlib.pyplot as plt\n'), ((995, 1008), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1005, 1008), True, 'import matplotlib.pyplot as plt\n'), ((1013, 1032), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst_img'], {}), '(dst_img)\n', (1023, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1037, 1067), 'matplotlib.pyplot.title', 'plt.title', (['"""Undistorted Image"""'], {}), "('Undistorted Image')\n", (1046, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1072, 1122), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/undistorted_image.png"""'], {}), "('output_images/undistorted_image.png')\n", (1083, 1122), True, 'import matplotlib.pyplot as plt\n'), ((1127, 1137), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1135, 1137), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1366), 'cv2.cvtColor', 'cv2.cvtColor', (['img_RGB_in', 'cv2.COLOR_RGB2HSV'], {}), '(img_RGB_in, cv2.COLOR_RGB2HSV)\n', (1335, 1366), False, 'import cv2\n'), ((1717, 1739), 'numpy.zeros_like', 'np.zeros_like', (['H_layer'], {}), '(H_layer)\n', (1730, 1739), True, 'import numpy as np\n'), ((2417, 2454), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/test5.jpg"""'], {}), "('test_images/test5.jpg')\n", (2429, 2454), True, 'import matplotlib.image as mpimg\n'), ((2506, 2519), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2516, 2519), True, 'import matplotlib.pyplot as plt\n'), ((2524, 2539), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2534, 2539), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2571), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Image"""'], {}), "('Original Image')\n", (2553, 2571), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2625), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/pre_color_thresh.png"""'], {}), "('output_images/pre_color_thresh.png')\n", (2587, 2625), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2643), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (2640, 2643), True, 'import matplotlib.pyplot as plt\n'), ((2648, 2682), 'matplotlib.pyplot.imshow', 'plt.imshow', (['thrsh_img'], {'cmap': '"""gray"""'}), "(thrsh_img, cmap='gray')\n", (2658, 2682), True, 'import matplotlib.pyplot as plt\n'), ((2687, 2715), 'matplotlib.pyplot.title', 'plt.title', (['"""Color Threshold"""'], {}), "('Color Threshold')\n", (2696, 2715), True, 'import matplotlib.pyplot as plt\n'), ((2720, 2770), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/post_color_thresh.png"""'], {}), "('output_images/post_color_thresh.png')\n", (2731, 2770), True, 'import matplotlib.pyplot as plt\n'), ((2775, 2785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2783, 2785), True, 'import matplotlib.pyplot as plt\n'), ((3154, 3225), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img_RGB_in', 'Xfrm', 'img_size'], {'flags': 'cv2.INTER_LINEAR'}), '(img_RGB_in, Xfrm, img_size, flags=cv2.INTER_LINEAR)\n', (3173, 3225), False, 'import cv2\n'), ((3322, 3359), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/test6.jpg"""'], {}), "('test_images/test6.jpg')\n", (3334, 3359), True, 'import matplotlib.image as mpimg\n'), ((3406, 3419), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (3416, 3419), True, 'import matplotlib.pyplot as plt\n'), ((3424, 3439), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (3434, 3439), True, 'import matplotlib.pyplot as plt\n'), ((3444, 3471), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Image"""'], {}), "('Original Image')\n", (3453, 3471), True, 'import matplotlib.pyplot as plt\n'), ((3476, 3521), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/pre_top_down.png"""'], {}), "('output_images/pre_top_down.png')\n", (3487, 3521), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3539), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (3536, 3539), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3562), 'matplotlib.pyplot.imshow', 'plt.imshow', (['warped'], {}), '(warped)\n', (3554, 3562), True, 'import matplotlib.pyplot as plt\n'), ((3567, 3598), 'matplotlib.pyplot.title', 'plt.title', (['"""Top Down View Warp"""'], {}), "('Top Down View Warp')\n", (3576, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3649), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/post_top_down.png"""'], {}), "('output_images/post_top_down.png')\n", (3614, 3649), True, 'import matplotlib.pyplot as plt\n'), ((3654, 3664), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3662, 3664), True, 'import matplotlib.pyplot as plt\n'), ((3905, 3949), 'cv2.cvtColor', 'cv2.cvtColor', (['img_RGB_in', 'cv2.COLOR_RGB2GRAY'], {}), '(img_RGB_in, cv2.COLOR_RGB2GRAY)\n', (3917, 3949), False, 'import cv2\n'), ((4119, 4158), 'numpy.zeros_like', 'np.zeros_like', (['img_GRAY'], {'dtype': 'np.uint8'}), '(img_GRAY, dtype=np.uint8)\n', (4132, 4158), True, 'import numpy as np\n'), ((4529, 4566), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/test6.jpg"""'], {}), "('test_images/test6.jpg')\n", (4541, 4566), True, 'import matplotlib.image as mpimg\n'), ((4657, 4670), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (4667, 4670), True, 'import matplotlib.pyplot as plt\n'), ((4675, 4690), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4685, 4690), True, 'import matplotlib.pyplot as plt\n'), ((4695, 4743), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Top Down Transformed Image"""'], {}), "('Original Top Down Transformed Image')\n", (4704, 4743), True, 'import matplotlib.pyplot as plt\n'), ((4748, 4796), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/pre_grad_thresh.png"""'], {}), "('output_images/pre_grad_thresh.png')\n", (4759, 4796), True, 'import matplotlib.pyplot as plt\n'), ((4801, 4814), 'matplotlib.pyplot.figure', 'plt.figure', (['(7)'], {}), '(7)\n', (4811, 4814), True, 'import matplotlib.pyplot as plt\n'), ((4819, 4853), 'matplotlib.pyplot.imshow', 'plt.imshow', (['thrsh_img'], {'cmap': '"""gray"""'}), "(thrsh_img, cmap='gray')\n", (4829, 4853), True, 'import matplotlib.pyplot as plt\n'), ((4858, 4889), 'matplotlib.pyplot.title', 'plt.title', (['"""Gradient Threshold"""'], {}), "('Gradient Threshold')\n", (4867, 4889), True, 'import matplotlib.pyplot as plt\n'), ((4894, 4943), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/post_grad_thresh.png"""'], {}), "('output_images/post_grad_thresh.png')\n", (4905, 4943), True, 'import matplotlib.pyplot as plt\n'), ((4948, 4958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4956, 4958), True, 'import matplotlib.pyplot as plt\n'), ((27203, 27240), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/test6.jpg"""'], {}), "('test_images/test6.jpg')\n", (27215, 27240), True, 'import matplotlib.image as mpimg\n'), ((27459, 27472), 'matplotlib.pyplot.figure', 'plt.figure', (['(7)'], {}), '(7)\n', (27469, 27472), True, 'import matplotlib.pyplot as plt\n'), ((27477, 27492), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (27487, 27492), True, 'import matplotlib.pyplot as plt\n'), ((27497, 27524), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Image"""'], {}), "('Original Image')\n", (27506, 27524), True, 'import matplotlib.pyplot as plt\n'), ((27529, 27580), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/original_histogram.png"""'], {}), "('output_images/original_histogram.png')\n", (27540, 27580), True, 'import matplotlib.pyplot as plt\n'), ((27585, 27598), 'matplotlib.pyplot.figure', 'plt.figure', (['(8)'], {}), '(8)\n', (27595, 27598), True, 'import matplotlib.pyplot as plt\n'), ((27603, 27638), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_BIN_in'], {'cmap': '"""gray"""'}), "(img_BIN_in, cmap='gray')\n", (27613, 27638), True, 'import matplotlib.pyplot as plt\n'), ((27643, 27677), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Binary Image"""'], {}), "('Original Binary Image')\n", (27652, 27677), True, 'import matplotlib.pyplot as plt\n'), ((27682, 27737), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/original_bin_histogram.png"""'], {}), "('output_images/original_bin_histogram.png')\n", (27693, 27737), True, 'import matplotlib.pyplot as plt\n'), ((27742, 27755), 'matplotlib.pyplot.figure', 'plt.figure', (['(9)'], {}), '(9)\n', (27752, 27755), True, 'import matplotlib.pyplot as plt\n'), ((27760, 27779), 'matplotlib.pyplot.plot', 'plt.plot', (['histogram'], {}), '(histogram)\n', (27768, 27779), True, 'import matplotlib.pyplot as plt\n'), ((27784, 27806), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram"""'], {}), "('Histogram')\n", (27793, 27806), True, 'import matplotlib.pyplot as plt\n'), ((27811, 27853), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/histogram.png"""'], {}), "('output_images/histogram.png')\n", (27822, 27853), True, 'import matplotlib.pyplot as plt\n'), ((27858, 27868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27866, 27868), True, 'import matplotlib.pyplot as plt\n'), ((27935, 27972), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/test6.jpg"""'], {}), "('test_images/test6.jpg')\n", (27947, 27972), True, 'import matplotlib.image as mpimg\n'), ((27977, 27991), 'matplotlib.pyplot.figure', 'plt.figure', (['(10)'], {}), '(10)\n', (27987, 27991), True, 'import matplotlib.pyplot as plt\n'), ((27996, 28011), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (28006, 28011), True, 'import matplotlib.pyplot as plt\n'), ((28016, 28043), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Image"""'], {}), "('Original Image')\n", (28025, 28043), True, 'import matplotlib.pyplot as plt\n'), ((28692, 28706), 'matplotlib.pyplot.figure', 'plt.figure', (['(11)'], {}), '(11)\n', (28702, 28706), True, 'import matplotlib.pyplot as plt\n'), ((28711, 28745), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lane_lines.img_RGB_out'], {}), '(lane_lines.img_RGB_out)\n', (28721, 28745), True, 'import matplotlib.pyplot as plt\n'), ((28750, 28813), 'matplotlib.pyplot.title', 'plt.title', (['"""2nd Order Polynomial Fit - Histogram Search Method"""'], {}), "('2nd Order Polynomial Fit - Histogram Search Method')\n", (28759, 28813), True, 'import matplotlib.pyplot as plt\n'), ((28818, 28860), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/poly_hist.png"""'], {}), "('output_images/poly_hist.png')\n", (28829, 28860), True, 'import matplotlib.pyplot as plt\n'), ((30607, 30617), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30615, 30617), True, 'import matplotlib.pyplot as plt\n'), ((30665, 30702), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/test6.jpg"""'], {}), "('test_images/test6.jpg')\n", (30677, 30702), True, 'import matplotlib.image as mpimg\n'), ((30743, 30757), 'matplotlib.pyplot.figure', 'plt.figure', (['(13)'], {}), '(13)\n', (30753, 30757), True, 'import matplotlib.pyplot as plt\n'), ((30762, 30777), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (30772, 30777), True, 'import matplotlib.pyplot as plt\n'), ((30782, 30809), 'matplotlib.pyplot.title', 'plt.title', (['"""Original Image"""'], {}), "('Original Image')\n", (30791, 30809), True, 'import matplotlib.pyplot as plt\n'), ((30814, 30828), 'matplotlib.pyplot.figure', 'plt.figure', (['(14)'], {}), '(14)\n', (30824, 30828), True, 'import matplotlib.pyplot as plt\n'), ((30876, 30900), 'matplotlib.pyplot.title', 'plt.title', (['"""Found Lines"""'], {}), "('Found Lines')\n", (30885, 30900), True, 'import matplotlib.pyplot as plt\n'), ((30905, 30951), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/full_pipeline.png"""'], {}), "('output_images/full_pipeline.png')\n", (30916, 30951), True, 'import matplotlib.pyplot as plt\n'), ((30956, 30966), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30964, 30966), True, 'import matplotlib.pyplot as plt\n'), ((31011, 31048), 'matplotlib.image.imread', 'mpimg.imread', (['"""test_images/test6.jpg"""'], {}), "('test_images/test6.jpg')\n", (31023, 31048), True, 'import matplotlib.image as mpimg\n'), ((31685, 31733), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""test_videos/challenge_video.mp4"""'], {}), "('test_videos/challenge_video.mp4')\n", (31698, 31733), False, 'from moviepy.editor import VideoFileClip\n'), ((2218, 2240), 'numpy.where', 'np.where', (['(img_out == 0)'], {}), '(img_out == 0)\n', (2226, 2240), True, 'import numpy as np\n'), ((2259, 2278), 'numpy.copy', 'np.copy', (['img_RGB_in'], {}), '(img_RGB_in)\n', (2266, 2278), True, 'import numpy as np\n'), ((4005, 4042), 'cv2.Sobel', 'cv2.Sobel', (['img_GRAY', 'cv2.CV_64F', '(1)', '(0)'], {}), '(img_GRAY, cv2.CV_64F, 1, 0)\n', (4014, 4042), False, 'import cv2\n'), ((4296, 4318), 'numpy.where', 'np.where', (['(img_out == 0)'], {}), '(img_out == 0)\n', (4304, 4318), True, 'import numpy as np\n'), ((4337, 4356), 'numpy.copy', 'np.copy', (['img_RGB_in'], {}), '(img_RGB_in)\n', (4344, 4356), True, 'import numpy as np\n'), ((5368, 5392), 'numpy.int', 'np.int', (['(frame_width // 2)'], {}), '(frame_width // 2)\n', (5374, 5392), True, 'import numpy as np\n'), ((5431, 5477), 'numpy.linspace', 'np.linspace', (['(0)', '(frame_height - 1)', 'frame_height'], {}), '(0, frame_height - 1, frame_height)\n', (5442, 5477), True, 'import numpy as np\n'), ((6986, 7023), 'numpy.int', 'np.int', (['(frame_height // self.nwindows)'], {}), '(frame_height // self.nwindows)\n', (6992, 7023), True, 'import numpy as np\n'), ((7808, 7853), 'numpy.empty', 'np.empty', (['(0, self.ploty.size)'], {'dtype': '"""float"""'}), "((0, self.ploty.size), dtype='float')\n", (7816, 7853), True, 'import numpy as np\n'), ((7881, 7926), 'numpy.empty', 'np.empty', (['(0, self.ploty.size)'], {'dtype': '"""float"""'}), "((0, self.ploty.size), dtype='float')\n", (7889, 7926), True, 'import numpy as np\n'), ((8033, 8073), 'numpy.zeros', 'np.zeros', (['self.ploty.size'], {'dtype': '"""float"""'}), "(self.ploty.size, dtype='float')\n", (8041, 8073), True, 'import numpy as np\n'), ((8108, 8148), 'numpy.zeros', 'np.zeros', (['self.ploty.size'], {'dtype': '"""float"""'}), "(self.ploty.size, dtype='float')\n", (8116, 8148), True, 'import numpy as np\n'), ((8245, 8291), 'numpy.zeros', 'np.zeros', (['(self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "(self.poly_fit_dim + 1, dtype='float')\n", (8253, 8291), True, 'import numpy as np\n'), ((8326, 8372), 'numpy.zeros', 'np.zeros', (['(self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "(self.poly_fit_dim + 1, dtype='float')\n", (8334, 8372), True, 'import numpy as np\n'), ((8468, 8519), 'numpy.empty', 'np.empty', (['(0, self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "((0, self.poly_fit_dim + 1), dtype='float')\n", (8476, 8519), True, 'import numpy as np\n'), ((8549, 8600), 'numpy.empty', 'np.empty', (['(0, self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "((0, self.poly_fit_dim + 1), dtype='float')\n", (8557, 8600), True, 'import numpy as np\n'), ((8700, 8746), 'numpy.zeros', 'np.zeros', (['(self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "(self.poly_fit_dim + 1, dtype='float')\n", (8708, 8746), True, 'import numpy as np\n'), ((8780, 8826), 'numpy.zeros', 'np.zeros', (['(self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "(self.poly_fit_dim + 1, dtype='float')\n", (8788, 8826), True, 'import numpy as np\n'), ((9813, 9879), 'numpy.sum', 'np.sum', (['self.img_BIN_in[self.img_BIN_in.shape[0] // 2:, :]'], {'axis': '(0)'}), '(self.img_BIN_in[self.img_BIN_in.shape[0] // 2:, :], axis=0)\n', (9819, 9879), True, 'import numpy as np\n'), ((10093, 10138), 'numpy.empty', 'np.empty', (['(0, self.ploty.size)'], {'dtype': '"""float"""'}), "((0, self.ploty.size), dtype='float')\n", (10101, 10138), True, 'import numpy as np\n'), ((10166, 10211), 'numpy.empty', 'np.empty', (['(0, self.ploty.size)'], {'dtype': '"""float"""'}), "((0, self.ploty.size), dtype='float')\n", (10174, 10211), True, 'import numpy as np\n'), ((10241, 10292), 'numpy.empty', 'np.empty', (['(0, self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "((0, self.poly_fit_dim + 1), dtype='float')\n", (10249, 10292), True, 'import numpy as np\n'), ((10322, 10373), 'numpy.empty', 'np.empty', (['(0, self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "((0, self.poly_fit_dim + 1), dtype='float')\n", (10330, 10373), True, 'import numpy as np\n'), ((10547, 10609), 'numpy.dstack', 'np.dstack', (['(self.img_BIN_in, self.img_BIN_in, self.img_BIN_in)'], {}), '((self.img_BIN_in, self.img_BIN_in, self.img_BIN_in))\n', (10556, 10609), True, 'import numpy as np\n'), ((10778, 10814), 'numpy.int', 'np.int', (['(self.histogram.shape[0] // 2)'], {}), '(self.histogram.shape[0] // 2)\n', (10784, 10814), True, 'import numpy as np\n'), ((10834, 10877), 'numpy.argmax', 'np.argmax', (['self.histogram[:midpoint_height]'], {}), '(self.histogram[:midpoint_height])\n', (10843, 10877), True, 'import numpy as np\n'), ((11110, 11130), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (11118, 11130), True, 'import numpy as np\n'), ((11150, 11170), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (11158, 11170), True, 'import numpy as np\n'), ((13661, 13691), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (13675, 13691), True, 'import numpy as np\n'), ((13718, 13749), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (13732, 13749), True, 'import numpy as np\n'), ((14714, 14749), 'numpy.polyfit', 'np.polyfit', (['y', 'x', 'self.poly_fit_dim'], {}), '(y, x, self.poly_fit_dim)\n', (14724, 14749), True, 'import numpy as np\n'), ((15502, 15522), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (15510, 15522), True, 'import numpy as np\n'), ((15542, 15562), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (15550, 15562), True, 'import numpy as np\n'), ((19095, 19141), 'numpy.zeros', 'np.zeros', (['(self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "(self.poly_fit_dim + 1, dtype='float')\n", (19103, 19141), True, 'import numpy as np\n'), ((19175, 19221), 'numpy.zeros', 'np.zeros', (['(self.poly_fit_dim + 1)'], {'dtype': '"""float"""'}), "(self.poly_fit_dim + 1, dtype='float')\n", (19183, 19221), True, 'import numpy as np\n'), ((19251, 19291), 'numpy.zeros', 'np.zeros', (['self.ploty.size'], {'dtype': '"""float"""'}), "(self.ploty.size, dtype='float')\n", (19259, 19291), True, 'import numpy as np\n'), ((19326, 19366), 'numpy.zeros', 'np.zeros', (['self.ploty.size'], {'dtype': '"""float"""'}), "(self.ploty.size, dtype='float')\n", (19334, 19366), True, 'import numpy as np\n'), ((25391, 25435), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (25400, 25435), True, 'import numpy as np\n'), ((25787, 25819), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (25796, 25819), True, 'import numpy as np\n'), ((26063, 26149), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'Minv', '(img_RGB_in.shape[1], img_RGB_in.shape[0])'], {}), '(color_warp, Minv, (img_RGB_in.shape[1], img_RGB_in.\n shape[0]))\n', (26082, 26149), False, 'import cv2\n'), ((26220, 26267), 'cv2.addWeighted', 'cv2.addWeighted', (['img_RGB_in', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(img_RGB_in, 1, newwarp, 0.3, 0)\n', (26235, 26267), False, 'import cv2\n'), ((28973, 29017), 'numpy.vstack', 'np.vstack', (['(lane_lines.x_fit_all_L, x_fit_L)'], {}), '((lane_lines.x_fit_all_L, x_fit_L))\n', (28982, 29017), True, 'import numpy as np\n'), ((29051, 29095), 'numpy.vstack', 'np.vstack', (['(lane_lines.x_fit_all_R, x_fit_R)'], {}), '((lane_lines.x_fit_all_R, x_fit_R))\n', (29060, 29095), True, 'import numpy as np\n'), ((29170, 29239), 'numpy.vstack', 'np.vstack', (['(lane_lines.coef_fit_all_L, lane_lines.coef_fit_current_L)'], {}), '((lane_lines.coef_fit_all_L, lane_lines.coef_fit_current_L))\n', (29179, 29239), True, 'import numpy as np\n'), ((29276, 29345), 'numpy.vstack', 'np.vstack', (['(lane_lines.coef_fit_all_R, lane_lines.coef_fit_current_R)'], {}), '((lane_lines.coef_fit_all_R, lane_lines.coef_fit_current_R))\n', (29285, 29345), True, 'import numpy as np\n'), ((30421, 30435), 'matplotlib.pyplot.figure', 'plt.figure', (['(12)'], {}), '(12)\n', (30431, 30435), True, 'import matplotlib.pyplot as plt\n'), ((30444, 30478), 'matplotlib.pyplot.imshow', 'plt.imshow', (['lane_lines.img_RGB_out'], {}), '(lane_lines.img_RGB_out)\n', (30454, 30478), True, 'import matplotlib.pyplot as plt\n'), ((30487, 30551), 'matplotlib.pyplot.title', 'plt.title', (['"""2nd Order Polynomial Fit - Polynomial Search Method"""'], {}), "('2nd Order Polynomial Fit - Polynomial Search Method')\n", (30496, 30551), True, 'import matplotlib.pyplot as plt\n'), ((30560, 30602), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output_images/poly_poly.png"""'], {}), "('output_images/poly_poly.png')\n", (30571, 30602), True, 'import matplotlib.pyplot as plt\n'), ((4086, 4103), 'numpy.max', 'np.max', (['abs_sobel'], {}), '(abs_sobel)\n', (4092, 4103), True, 'import numpy as np\n'), ((10900, 10943), 'numpy.argmax', 'np.argmax', (['self.histogram[midpoint_height:]'], {}), '(self.histogram[midpoint_height:])\n', (10909, 10943), True, 'import numpy as np\n'), ((12219, 12328), 'cv2.rectangle', 'cv2.rectangle', (['self.img_RGB_out', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(self.img_RGB_out, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (0, 255, 0), 2)\n', (12232, 12328), False, 'import cv2\n'), ((12344, 12456), 'cv2.rectangle', 'cv2.rectangle', (['self.img_RGB_out', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(self.img_RGB_out, (win_xright_low, win_y_low), (\n win_xright_high, win_y_high), (0, 255, 0), 2)\n', (12357, 12456), False, 'import cv2\n'), ((14962, 14982), 'numpy.maximum', 'np.maximum', (['x_fit', '(0)'], {}), '(x_fit, 0)\n', (14972, 14982), True, 'import numpy as np\n'), ((16866, 16928), 'numpy.dstack', 'np.dstack', (['(self.img_BIN_in, self.img_BIN_in, self.img_BIN_in)'], {}), '((self.img_BIN_in, self.img_BIN_in, self.img_BIN_in))\n', (16875, 16928), True, 'import numpy as np\n'), ((17146, 17177), 'numpy.zeros_like', 'np.zeros_like', (['self.img_RGB_out'], {}), '(self.img_RGB_out)\n', (17159, 17177), True, 'import numpy as np\n'), ((17976, 18025), 'numpy.hstack', 'np.hstack', (['(left_line_window1, left_line_window2)'], {}), '((left_line_window1, left_line_window2))\n', (17985, 18025), True, 'import numpy as np\n'), ((18284, 18335), 'numpy.hstack', 'np.hstack', (['(right_line_window1, right_line_window2)'], {}), '((right_line_window1, right_line_window2))\n', (18293, 18335), True, 'import numpy as np\n'), ((18587, 18643), 'cv2.addWeighted', 'cv2.addWeighted', (['self.img_RGB_out', '(1)', 'window_img', '(0.3)', '(0)'], {}), '(self.img_RGB_out, 1, window_img, 0.3, 0)\n', (18602, 18643), False, 'import cv2\n'), ((19528, 19566), 'numpy.delete', 'np.delete', (['self.x_fit_all_L', '(0)'], {'axis': '(0)'}), '(self.x_fit_all_L, 0, axis=0)\n', (19537, 19566), True, 'import numpy as np\n'), ((19598, 19636), 'numpy.delete', 'np.delete', (['self.x_fit_all_R', '(0)'], {'axis': '(0)'}), '(self.x_fit_all_R, 0, axis=0)\n', (19607, 19636), True, 'import numpy as np\n'), ((19671, 19712), 'numpy.delete', 'np.delete', (['self.coef_fit_all_L', '(0)'], {'axis': '(0)'}), '(self.coef_fit_all_L, 0, axis=0)\n', (19680, 19712), True, 'import numpy as np\n'), ((19747, 19788), 'numpy.delete', 'np.delete', (['self.coef_fit_all_R', '(0)'], {'axis': '(0)'}), '(self.coef_fit_all_R, 0, axis=0)\n', (19756, 19788), True, 'import numpy as np\n'), ((21603, 21623), 'numpy.absolute', 'np.absolute', (['(2 * A_L)'], {}), '(2 * A_L)\n', (21614, 21623), True, 'import numpy as np\n'), ((21698, 21718), 'numpy.absolute', 'np.absolute', (['(2 * A_R)'], {}), '(2 * A_R)\n', (21709, 21718), True, 'import numpy as np\n'), ((23613, 23651), 'numpy.vstack', 'np.vstack', (['(self.x_fit_all_L, x_fit_L)'], {}), '((self.x_fit_all_L, x_fit_L))\n', (23622, 23651), True, 'import numpy as np\n'), ((23683, 23721), 'numpy.vstack', 'np.vstack', (['(self.x_fit_all_R, x_fit_R)'], {}), '((self.x_fit_all_R, x_fit_R))\n', (23692, 23721), True, 'import numpy as np\n'), ((23811, 23868), 'numpy.vstack', 'np.vstack', (['(self.coef_fit_all_L, self.coef_fit_current_L)'], {}), '((self.coef_fit_all_L, self.coef_fit_current_L))\n', (23820, 23868), True, 'import numpy as np\n'), ((23903, 23960), 'numpy.vstack', 'np.vstack', (['(self.coef_fit_all_R, self.coef_fit_current_R)'], {}), '((self.coef_fit_all_R, self.coef_fit_current_R))\n', (23912, 23960), True, 'import numpy as np\n'), ((25914, 25928), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (25921, 25928), True, 'import numpy as np\n'), ((18442, 18466), 'numpy.int_', 'np.int_', (['[left_line_pts]'], {}), '([left_line_pts])\n', (18449, 18466), True, 'import numpy as np\n'), ((18517, 18542), 'numpy.int_', 'np.int_', (['[right_line_pts]'], {}), '([right_line_pts])\n', (18524, 18542), True, 'import numpy as np\n'), ((24559, 24569), 'sys.exit', 'sys.exit', ([], {}), '()\n', (24567, 24569), False, 'import sys\n'), ((25322, 25352), 'numpy.zeros_like', 'np.zeros_like', (['self.img_BIN_in'], {}), '(self.img_BIN_in)\n', (25335, 25352), True, 'import numpy as np\n'), ((13327, 13360), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (13334, 13360), True, 'import numpy as np\n'), ((13550, 13584), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (13557, 13584), True, 'import numpy as np\n'), ((25627, 25669), 'numpy.vstack', 'np.vstack', (['[self.x_fit_best_L, self.ploty]'], {}), '([self.x_fit_best_L, self.ploty])\n', (25636, 25669), True, 'import numpy as np\n'), ((17776, 17827), 'numpy.vstack', 'np.vstack', (['[x_fit_L - self.margin_poly, self.ploty]'], {}), '([x_fit_L - self.margin_poly, self.ploty])\n', (17785, 17827), True, 'import numpy as np\n'), ((18082, 18133), 'numpy.vstack', 'np.vstack', (['[x_fit_R - self.margin_poly, self.ploty]'], {}), '([x_fit_R - self.margin_poly, self.ploty])\n', (18091, 18133), True, 'import numpy as np\n'), ((25726, 25768), 'numpy.vstack', 'np.vstack', (['[self.x_fit_best_R, self.ploty]'], {}), '([self.x_fit_best_R, self.ploty])\n', (25735, 25768), True, 'import numpy as np\n'), ((17894, 17945), 'numpy.vstack', 'np.vstack', (['[x_fit_L + self.margin_poly, self.ploty]'], {}), '([x_fit_L + self.margin_poly, self.ploty])\n', (17903, 17945), True, 'import numpy as np\n'), ((18201, 18252), 'numpy.vstack', 'np.vstack', (['[x_fit_R + self.margin_poly, self.ploty]'], {}), '([x_fit_R + self.margin_poly, self.ploty])\n', (18210, 18252), True, 'import numpy as np\n')]
|
import math
import random
from typing import Tuple
import cv2
import numpy as np
def np_free_form_mask(
max_vertex: int, max_length: int, max_brush_width: int, max_angle: int, height: int, width: int
) -> np.ndarray:
mask = np.zeros((height, width), np.float32)
num_vertex = random.randint(0, max_vertex)
start_y = random.randint(0, height - 1)
start_x = random.randint(0, width - 1)
brush_width = 0
for i in range(num_vertex):
angle = random.random() * max_angle
angle = math.radians(angle)
if i % 2 == 0:
angle = 2 * math.pi - angle
length = random.randint(0, max_length)
brush_width = random.randint(10, max_brush_width) // 2 * 2
next_y = start_y + length * np.cos(angle)
next_x = start_x + length * np.sin(angle)
next_y = np.maximum(np.minimum(next_y, height - 1), 0).astype(np.int)
next_x = np.maximum(np.minimum(next_x, width - 1), 0).astype(np.int)
cv2.line(mask, (start_y, start_x), (next_y, next_x), 1, brush_width)
cv2.circle(mask, (start_y, start_x), brush_width // 2, 2)
start_y, start_x = next_y, next_x
cv2.circle(mask, (start_y, start_x), brush_width // 2, 2)
return mask
def generate_stroke_mask(
image_size: Tuple[int, int],
parts: int = 7,
max_vertex: int = 25,
max_length: int = 80,
max_brush_width: int = 80,
max_angle: int = 360,
) -> np.ndarray:
mask = np.zeros(image_size, dtype=np.float32)
for _ in range(parts):
mask = mask + np_free_form_mask(
max_vertex, max_length, max_brush_width, max_angle, image_size[0], image_size[1]
)
return np.minimum(mask, 1.0)
|
[
"numpy.minimum",
"cv2.line",
"math.radians",
"cv2.circle",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"random.random",
"random.randint"
] |
[((235, 272), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.float32'], {}), '((height, width), np.float32)\n', (243, 272), True, 'import numpy as np\n'), ((291, 320), 'random.randint', 'random.randint', (['(0)', 'max_vertex'], {}), '(0, max_vertex)\n', (305, 320), False, 'import random\n'), ((335, 364), 'random.randint', 'random.randint', (['(0)', '(height - 1)'], {}), '(0, height - 1)\n', (349, 364), False, 'import random\n'), ((379, 407), 'random.randint', 'random.randint', (['(0)', '(width - 1)'], {}), '(0, width - 1)\n', (393, 407), False, 'import random\n'), ((1168, 1225), 'cv2.circle', 'cv2.circle', (['mask', '(start_y, start_x)', '(brush_width // 2)', '(2)'], {}), '(mask, (start_y, start_x), brush_width // 2, 2)\n', (1178, 1225), False, 'import cv2\n'), ((1461, 1499), 'numpy.zeros', 'np.zeros', (['image_size'], {'dtype': 'np.float32'}), '(image_size, dtype=np.float32)\n', (1469, 1499), True, 'import numpy as np\n'), ((1683, 1704), 'numpy.minimum', 'np.minimum', (['mask', '(1.0)'], {}), '(mask, 1.0)\n', (1693, 1704), True, 'import numpy as np\n'), ((521, 540), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (533, 540), False, 'import math\n'), ((623, 652), 'random.randint', 'random.randint', (['(0)', 'max_length'], {}), '(0, max_length)\n', (637, 652), False, 'import random\n'), ((986, 1054), 'cv2.line', 'cv2.line', (['mask', '(start_y, start_x)', '(next_y, next_x)', '(1)', 'brush_width'], {}), '(mask, (start_y, start_x), (next_y, next_x), 1, brush_width)\n', (994, 1054), False, 'import cv2\n'), ((1063, 1120), 'cv2.circle', 'cv2.circle', (['mask', '(start_y, start_x)', '(brush_width // 2)', '(2)'], {}), '(mask, (start_y, start_x), brush_width // 2, 2)\n', (1073, 1120), False, 'import cv2\n'), ((477, 492), 'random.random', 'random.random', ([], {}), '()\n', (490, 492), False, 'import random\n'), ((675, 710), 'random.randint', 'random.randint', (['(10)', 'max_brush_width'], {}), '(10, max_brush_width)\n', (689, 710), False, 'import random\n'), ((757, 770), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (763, 770), True, 'import numpy as np\n'), ((807, 820), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (813, 820), True, 'import numpy as np\n'), ((850, 880), 'numpy.minimum', 'np.minimum', (['next_y', '(height - 1)'], {}), '(next_y, height - 1)\n', (860, 880), True, 'import numpy as np\n'), ((928, 957), 'numpy.minimum', 'np.minimum', (['next_x', '(width - 1)'], {}), '(next_x, width - 1)\n', (938, 957), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# Copyright (c) 2015-2019 Analog Devices, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Modified versions of the software must be conspicuously marked as such.
# - This software is licensed solely and exclusively for use with
# processors/products manufactured by or for Analog Devices, Inc.
# - This software may not be combined or merged with other code in any manner
# that would cause the software to become subject to terms and conditions
# which differ from those listed here.
# - Neither the name of Analog Devices, Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
# - The use of this software may or may not infringe the patent rights of one
# or more patent holders. This license does not release you from the
# requirement that you obtain separate licenses from these patent holders
# to use this software.
#
# THIS SOFTWARE IS PROVIDED BY ANALOG DEVICES, INC. AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# NON-INFRINGEMENT, TITLE, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANALOG DEVICES, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, PUNITIVE OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# DAMAGES ARISING OUT OF CLAIMS OF INTELLECTUAL PROPERTY RIGHTS INFRINGEMENT;
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# 2019-01-10-7CBSD SLA
# -----------------------------------------------------------------------
'''
Simulation of some of the AD7124's filters.
This program QUALITATIVELY derives a filter of a type similar to that
used in the AD7124 family of ADCs, that is, it is not bit-accurate, refer
to the datasheet for guaranteed specifications.
Tested with Python 3.7, Anaconda distribution
'''
from numpy import min, max, convolve, random, average, ones, zeros, amax, log
import numpy as np
from scipy import linspace, fft
from scipy import signal
from scipy.signal import lti, step
from matplotlib import pyplot as plt
plot_sinc4 = True
# Base sample rate in high-power mode, From AD7124 datasheet
f0 = 19200
# Calculate SINC1 oversample ratios for 50, 60Hz
osr50 = int(f0/50)
osr60 = int(f0/60)
# Create "boxcar" SINC1 filters
sinc1_50 = np.ones(osr50)
sinc1_60 = np.ones(osr60)
# Calculate higher order filters
sinc2_50 = np.convolve(sinc1_50, sinc1_50)
sinc3_50 = np.convolve(sinc2_50, sinc1_50)
sinc4_50 = np.convolve(sinc2_50, sinc2_50)
# Here's the filter from datasheet Figure 91,
# SINC4-ish filter with one three zeros at 50Hz, one at 60Hz.
filt_50_60_rej = np.convolve(sinc3_50, sinc1_60)
# Normalize to unity gain by dividing by sum of all taps
sinc1_50 /= np.sum(sinc1_50)
sinc1_60 /= np.sum(sinc1_60)
sinc2_50 /= np.sum(sinc2_50)
sinc3_50 /= np.sum(sinc3_50)
sinc4_50 /= np.sum(sinc4_50)
filt_50_60_rej /= np.sum(filt_50_60_rej)
# freqz: Compute the frequency response of a digital filter.
# Older versions of SicPy return w as radians / sample, newer take an optional
# sample rate argument (fs). Computing frequencies (freqs)
# manually for backwards compatibility.
w, h = signal.freqz(filt_50_60_rej, 1, worN=16385, whole=False) #, fs=f0)
freqs = w * f0/(2.0*np.pi)
hmax = abs(max(h)) #Normalize to unity
response_dB = 20.0 * np.log10(abs(h)/hmax)
plt.figure(1)
plt.title('50Hz SINC1,2,4, and 50/60Hz SINC4 impulse responses')
plt.ylabel('tap val.')
plt.plot(sinc1_50)
plt.plot(sinc2_50)
plt.plot(sinc4_50)
plt.plot(filt_50_60_rej)
plt.xlabel('tap number')
plt.xlim(left=-100, right= 1.1* len(filt_50_60_rej))
plt.grid()
plt.figure(2)
plt.plot(freqs, response_dB, zorder=1)
plt.title('50/60Hz reject filter response')
plt.xlabel('Frequency')
plt.ylabel('Rejection')
plt.axis([0, 150, -120, 1])
plt.show()
|
[
"numpy.convolve",
"matplotlib.pyplot.grid",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"numpy.max",
"numpy.sum",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"scipy.signal.freqz",
"matplotlib.pyplot.show"
] |
[((3234, 3248), 'numpy.ones', 'np.ones', (['osr50'], {}), '(osr50)\n', (3241, 3248), True, 'import numpy as np\n'), ((3260, 3274), 'numpy.ones', 'np.ones', (['osr60'], {}), '(osr60)\n', (3267, 3274), True, 'import numpy as np\n'), ((3320, 3351), 'numpy.convolve', 'np.convolve', (['sinc1_50', 'sinc1_50'], {}), '(sinc1_50, sinc1_50)\n', (3331, 3351), True, 'import numpy as np\n'), ((3363, 3394), 'numpy.convolve', 'np.convolve', (['sinc2_50', 'sinc1_50'], {}), '(sinc2_50, sinc1_50)\n', (3374, 3394), True, 'import numpy as np\n'), ((3406, 3437), 'numpy.convolve', 'np.convolve', (['sinc2_50', 'sinc2_50'], {}), '(sinc2_50, sinc2_50)\n', (3417, 3437), True, 'import numpy as np\n'), ((3564, 3595), 'numpy.convolve', 'np.convolve', (['sinc3_50', 'sinc1_60'], {}), '(sinc3_50, sinc1_60)\n', (3575, 3595), True, 'import numpy as np\n'), ((3666, 3682), 'numpy.sum', 'np.sum', (['sinc1_50'], {}), '(sinc1_50)\n', (3672, 3682), True, 'import numpy as np\n'), ((3695, 3711), 'numpy.sum', 'np.sum', (['sinc1_60'], {}), '(sinc1_60)\n', (3701, 3711), True, 'import numpy as np\n'), ((3724, 3740), 'numpy.sum', 'np.sum', (['sinc2_50'], {}), '(sinc2_50)\n', (3730, 3740), True, 'import numpy as np\n'), ((3753, 3769), 'numpy.sum', 'np.sum', (['sinc3_50'], {}), '(sinc3_50)\n', (3759, 3769), True, 'import numpy as np\n'), ((3782, 3798), 'numpy.sum', 'np.sum', (['sinc4_50'], {}), '(sinc4_50)\n', (3788, 3798), True, 'import numpy as np\n'), ((3817, 3839), 'numpy.sum', 'np.sum', (['filt_50_60_rej'], {}), '(filt_50_60_rej)\n', (3823, 3839), True, 'import numpy as np\n'), ((4089, 4145), 'scipy.signal.freqz', 'signal.freqz', (['filt_50_60_rej', '(1)'], {'worN': '(16385)', 'whole': '(False)'}), '(filt_50_60_rej, 1, worN=16385, whole=False)\n', (4101, 4145), False, 'from scipy import signal\n'), ((4267, 4280), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4277, 4280), True, 'from matplotlib import pyplot as plt\n'), ((4281, 4345), 'matplotlib.pyplot.title', 'plt.title', (['"""50Hz SINC1,2,4, and 50/60Hz SINC4 impulse responses"""'], {}), "('50Hz SINC1,2,4, and 50/60Hz SINC4 impulse responses')\n", (4290, 4345), True, 'from matplotlib import pyplot as plt\n'), ((4346, 4368), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""tap val."""'], {}), "('tap val.')\n", (4356, 4368), True, 'from matplotlib import pyplot as plt\n'), ((4369, 4387), 'matplotlib.pyplot.plot', 'plt.plot', (['sinc1_50'], {}), '(sinc1_50)\n', (4377, 4387), True, 'from matplotlib import pyplot as plt\n'), ((4388, 4406), 'matplotlib.pyplot.plot', 'plt.plot', (['sinc2_50'], {}), '(sinc2_50)\n', (4396, 4406), True, 'from matplotlib import pyplot as plt\n'), ((4407, 4425), 'matplotlib.pyplot.plot', 'plt.plot', (['sinc4_50'], {}), '(sinc4_50)\n', (4415, 4425), True, 'from matplotlib import pyplot as plt\n'), ((4426, 4450), 'matplotlib.pyplot.plot', 'plt.plot', (['filt_50_60_rej'], {}), '(filt_50_60_rej)\n', (4434, 4450), True, 'from matplotlib import pyplot as plt\n'), ((4451, 4475), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""tap number"""'], {}), "('tap number')\n", (4461, 4475), True, 'from matplotlib import pyplot as plt\n'), ((4529, 4539), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4537, 4539), True, 'from matplotlib import pyplot as plt\n'), ((4541, 4554), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4551, 4554), True, 'from matplotlib import pyplot as plt\n'), ((4555, 4593), 'matplotlib.pyplot.plot', 'plt.plot', (['freqs', 'response_dB'], {'zorder': '(1)'}), '(freqs, response_dB, zorder=1)\n', (4563, 4593), True, 'from matplotlib import pyplot as plt\n'), ((4594, 4637), 'matplotlib.pyplot.title', 'plt.title', (['"""50/60Hz reject filter response"""'], {}), "('50/60Hz reject filter response')\n", (4603, 4637), True, 'from matplotlib import pyplot as plt\n'), ((4638, 4661), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency"""'], {}), "('Frequency')\n", (4648, 4661), True, 'from matplotlib import pyplot as plt\n'), ((4662, 4685), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rejection"""'], {}), "('Rejection')\n", (4672, 4685), True, 'from matplotlib import pyplot as plt\n'), ((4686, 4713), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 150, -120, 1]'], {}), '([0, 150, -120, 1])\n', (4694, 4713), True, 'from matplotlib import pyplot as plt\n'), ((4714, 4724), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4722, 4724), True, 'from matplotlib import pyplot as plt\n'), ((4194, 4200), 'numpy.max', 'max', (['h'], {}), '(h)\n', (4197, 4200), False, 'from numpy import min, max, convolve, random, average, ones, zeros, amax, log\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.