code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
from sklearn.linear_model import LogisticRegression
import numpy as np
from . import AbstractZnormClassifier
class LogisticRegressionClassifier(AbstractZnormClassifier):
"""Classifier which uses regularized logistic regression"""
def __init__(self, C=1, phi=None, degree=3, **kwargs):
# keyword arguments are passed on to scikit-learn's SVM implementation
# see http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
# relevant kwargs (* indicates default):
# C (float): 1* (inverse of regularization strength)
# penalty (string): "l1" or "l2"* (norm to regularize against)
# n_jobs (int): 1* or more (cores used to parallelize CV; -1 for all)
super(LogisticRegressionClassifier, self).__init__(
"Logistic Regression", C=C, phi=phi, degree=degree, **kwargs)
self._lr = LogisticRegression(C=C, **kwargs)
if phi is None or (phi == "poly" and degree == 1):
self.phi = lambda X: X
elif phi == "poly":
self.phi = lambda X: poly_expand(X, degree)
else:
self.phi = phi
# train a logistic regression model on a provided dataset
def _train(self, X, Y):
self._lr.fit(self.phi(X), Y)
# classify a set of test points
def _classify(self, test_X):
return self._lr.predict(self.phi(test_X))
# perform Nth-degree polynomial feature basis expansion
def poly_expand(X, n):
ft_powers = np.array([X ** i for i in np.arange(n) + 1])
return np.swapaxes(ft_powers, 0, 1).reshape((X.shape[0], -1))
#from . import test_classifier
#
#for C in (0.01, 0.1, 1, 2, 5, 10, 25):
# for penalty in ("l1", "l2"):
# test_classifier(LogisticRegressionClassifier(
# C=C, penalty=penalty, class_weight="balanced"))
|
[
"numpy.swapaxes",
"numpy.arange",
"sklearn.linear_model.LogisticRegression"
] |
[((908, 941), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'C'}), '(C=C, **kwargs)\n', (926, 941), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1564, 1592), 'numpy.swapaxes', 'np.swapaxes', (['ft_powers', '(0)', '(1)'], {}), '(ft_powers, 0, 1)\n', (1575, 1592), True, 'import numpy as np\n'), ((1534, 1546), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1543, 1546), True, 'import numpy as np\n')]
|
import numpy as np
v0 = 4.5 # Initial velocity
g = 9.81 # Acceleration of gravity
t = np.linspace(0, 1, 1000) # 1000 points in time interval
y = v0*t - 0.5*g*t**2 # Generate all heights
# Find index where ball approximately has reached y=0
i = 0
while y[i] >= 0:
i = i + 1
# Since y[i] is the height at time t[i], we do know the
# time as well when we have the index i...
print('Time of flight (in seconds): {:g}'.format(t[i]))
# We plot the path again just for comparison
import matplotlib.pyplot as plt
plt.plot(t, y)
plt.plot(t, 0*t, 'g--')
plt.xlabel('Time (s)')
plt.ylabel('Height (m)')
plt.show()
|
[
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.show"
] |
[((131, 154), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (142, 154), True, 'import numpy as np\n'), ((580, 594), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y'], {}), '(t, y)\n', (588, 594), True, 'import matplotlib.pyplot as plt\n'), ((596, 621), 'matplotlib.pyplot.plot', 'plt.plot', (['t', '(0 * t)', '"""g--"""'], {}), "(t, 0 * t, 'g--')\n", (604, 621), True, 'import matplotlib.pyplot as plt\n'), ((621, 643), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (631, 643), True, 'import matplotlib.pyplot as plt\n'), ((645, 669), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Height (m)"""'], {}), "('Height (m)')\n", (655, 669), True, 'import matplotlib.pyplot as plt\n'), ((671, 681), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (679, 681), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
回测
"""
from futu import *
from talib.abstract import *
import numpy as np
import pandas as pd
import datetime
import time
import os
import json
import copy
import math
import sqlite3
from re import sub
import itertools
class Tools(object):
def cuo_die_0(self, inputs, item, hands):
cd_l_3 = (inputs['close'].values[item]-inputs['close'].values[item-1])/(inputs['close'].values[item-1]*hands)
cd_m_3 = cd_l_3
cd_n_3 = cd_m_3
cd_o_3 = (1+cd_n_3)/(1+cd_m_3)-1
cd_p_3 = 0
cd_o_max = []
cd_p_max = []
cd_o_max.append(cd_o_3)
cd_p_max.append(cd_p_3)
return cd_l_3, cd_m_3, cd_n_3, cd_o_3, cd_p_3, cd_o_max, cd_p_max
def cuo_die(self, inputs, item, hands, cd_m_3, cd_n_3, cd_p_3, cd_o_max, cd_p_max):
cd_l_3 = (inputs['close'].values[item]-inputs['close'].values[item-1])/(inputs['close'].values[item-1]*hands)
cd_m_3 = (1+cd_m_3)*(1+cd_l_3)-1
cd_n_3 = max(cd_n_3, cd_m_3)
cd_o_3 = (1+cd_n_3)/(1+cd_m_3)-1
if 0==cd_o_3:
cd_p_3 = 0
else:
cd_p_3 += 1
cd_o_max.append(cd_o_3)
cd_p_max.append(cd_p_3)
return cd_l_3, cd_m_3, cd_n_3, cd_o_3, cd_p_3, cd_o_max, cd_p_max
def yong_jin(self, cost, cost2):
cost = cost * 0.00171 + 0.05
cost2 = cost2 * 0.00171 + 0.05
return max(cost, 0.1007, cost2)
hs_300 = ['SZ.000596', 'SZ.000625', 'SZ.002736', 'SZ.002739', 'SZ.002773', 'SZ.300433', 'SZ.000002', 'SZ.000001', 'SZ.000538',
'SZ.000568', 'SZ.000425', 'SZ.000627', 'SZ.000651', 'SZ.000656', 'SZ.000671', 'SZ.000708', 'SZ.000703', 'SZ.000709', 'SZ.000723',
'SZ.000786', 'SZ.000776', 'SZ.000728', 'SZ.000066', 'SZ.000768', 'SZ.000783', 'SZ.000069', 'SZ.000063', 'SZ.000876', 'SZ.000858',
'SZ.000860', 'SZ.000895', 'SZ.000938', 'SZ.000961', 'SZ.000977', 'SZ.000157', 'SH.600570', 'SZ.000100', 'SZ.002001', 'SZ.002008',
'SZ.002024', 'SZ.002027', 'SZ.002032', 'SZ.002044', 'SZ.002050', 'SZ.000725', 'SZ.002120', 'SZ.002129', 'SZ.000338', 'SZ.002142',
'SZ.002146', 'SZ.002153', 'SZ.002157', 'SZ.002179', 'SZ.002230', 'SZ.002236', 'SZ.002241', 'SZ.002252', 'SZ.002271', 'SH.601888',
'SZ.002304', 'SZ.002311', 'SZ.002352', 'SZ.002371', 'SZ.002410', 'SZ.002415', 'SZ.002456', 'SZ.002460', 'SZ.002463', 'SZ.002466',
'SZ.002468', 'SZ.002475', 'SZ.002202', 'SZ.002493', 'SZ.002508', 'SH.601992', 'SZ.002555', 'SZ.002558', 'SZ.002601', 'SZ.002602',
'SZ.002607', 'SZ.002624', 'SH.601231', 'SZ.002673', 'SZ.000333', 'SZ.002714', 'SZ.002594', 'SZ.000166', 'SZ.001979', 'SZ.002841', 'SZ.002916']
m_tools = Tools()
def strategy(inputs, item_x):
m_stdev = []
job = ''
cost, hands = 0, 0
num = 0
for item in range(2, len(inputs)-1):
stdev = (inputs['close'].values[item]-inputs['close'].values[item-1])/(inputs['close'].values[item-1]*hands)
if (0!=hands) and (0==num):
cd_l_3, cd_m_3, cd_n_3, cd_o_3, cd_p_3, cd_o_max, cd_p_max = m_tools.cuo_die_0(inputs=inputs, item=item, hands=hands)
num += 1
elif 0!=hands:
cd_l_3, cd_m_3, cd_n_3, cd_o_3, cd_p_3, cd_o_max, cd_p_max = \
m_tools.cuo_die(inputs=inputs, item=item, hands=hands, cd_m_3=cd_m_3, cd_n_3=cd_n_3, cd_p_3=cd_p_3, cd_o_max=cd_o_max, cd_p_max=cd_p_max)
# max(cd_o_max)最大搓跌,max(cd_p_max)最长搓跌期
if (max(cd_o_max) >= item_x[2]) or (max(cd_p_max) >= item_x[0]):
m_stdev.append(stdev)
cost, hands, num = 0, 0, 0
job = '搓跌超限,卖空!'
continue
if (inputs['cci'].values[item-2]<-100) and (inputs['cci'].values[item-1]>-100):
job = '开始按日收盘价少量买进,直到出现其他信号'
cost += inputs['close'].values[item]
if hands!=0:
m_stdev.append(stdev)
hands += 1
continue
if ('开始按日收盘价少量买进,直到出现其他信号'==job):
cost2 = hands*inputs['close'].values[item]
m_yong_jin = m_tools.yong_jin(cost=cost, cost2=cost2)
if (cost2-cost-m_yong_jin) >= (cost*item_x[1]):
if hands!=0:
m_stdev.append(stdev)
cost, hands, num = 0, 0, 0
job = '止盈,卖空!'
continue
else:
cost += inputs['close'].values[item]
if hands!=0:
m_stdev.append(stdev)
hands += 1
return m_stdev
# 建立连接
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
# 获取用于回测的list,即包含在 (沪深300 - 医药股),且历史K线额度内
m_list = m_tools.hs_300
now = datetime.date.today()
start = str(now - timedelta(days=3650))
end = str(now - timedelta(days=100))
m_list2 = copy.deepcopy(m_list)
final_dict = {}
while m_list:
each_hz = m_list[0]
try:
inputs = []
ret, data, page_req_key = quote_ctx.request_history_kline(each_hz, start=start, end=end, max_count=1000)
inputs.append(data)
while (page_req_key != None):
ret, data, page_req_key = quote_ctx.request_history_kline(each_hz, start=start, end=end, max_count=1000,page_req_key=page_req_key)
inputs.append(data)
inputs = pd.concat(inputs)
# real = CCI(high, low, close, timeperiod=14)
inputs['cci'] = CCI(inputs, timeperiod=14)
inputs = inputs[14:]
except Exception as e:
time.sleep(31)
continue
else:
final_dict[each_hz] = inputs
m_list.remove(each_hz)
# 结束后记得关闭当条连接,防止连接条数用尽
quote_ctx.close()
# 元组进行笛卡尔积:
# 搓跌期
item_cd_day = []
# 止盈
item_num = []
# 搓跌率
item_cd_rate = []
# 3个元组的笛卡尔积非常耗时,可以减少
for each in range(1,50):
item_cd_day.append(each)
for each in range(1,20):
item_num.append(0+each*0.02)
for each in range(1,20):
item_cd_rate.append(0+each*0.02)
item_output = itertools.product(item_cd_day,item_num,item_cd_rate)
conn=sqlite3.connect('to_ali.db')
c=conn.cursor()
for item_x in item_output:
num_0 = 0
data = {'code': [],
'最大搓跌期': [],
'止盈': [],
'最大搓跌': [],
'夏普比率': [],}
for each_hz, value in final_dict.items():
m_stdev = strategy(inputs=value, item_x=item_x)
m = []
for each in m_stdev:
m.append(each-0.04/250)
xia_pu = math.sqrt(250)*np.average(m)/np.std(m,ddof = 1)
if xia_pu>=0.78:
data['code'].append(each_hz)
data['最大搓跌期'].append(item_x[0])
data['止盈'].append(item_x[1])
data['最大搓跌'].append(item_x[2])
data['夏普比率'].append(xia_pu)
if data['code']:
df = pd.DataFrame(data)
num_0 += 1
if num_0:
df.to_sql(name=f'cd{item_x[0]}-{item_x[1]}-{item_x[2]}-xia_pu', con=conn, if_exists="replace")
print(f'当前时间: {str(datetime.datetime.now())[:19]}, 最大搓跌期 {item_x[0]} 天, 止盈 = {item_x[1]}, 最大搓跌 {item_x[2]} 已存入数据库\n')
#获取表名,保存在tab_name列表
c.execute("select name from sqlite_master where type='table'")
tab_name=c.fetchall()
tab_name=[line[0] for line in tab_name if ('-xia_pu' in line[0])]
print(f'数据表个数: {len(tab_name)}')
m_max = 0
result = []
m_error = []
for each in tab_name:
# re.sub 这个方法将需要的SQL内容替换掉
sql = "select count(*) from 'pid'"
try:
m = pd.read_sql(sub("pid", each, sql), conn).values[0][0]
except Exception as e:
m_error.append(each)
continue
if m > m_max:
result = [each, ]
m_max = m
elif m==m_max:
result.append(each)
for each in result:
sql = "select * from 'pid'"
m = pd.read_sql(sub("pid", each, sql), conn)
print(m)
print(f'夏普比率>=0.78的股票有 {len(m)} 支')
print(f'冠军为:\n')
print(result[0])
print('error:\n')
print(m_error)
conn.close()
|
[
"sqlite3.connect",
"numpy.average",
"numpy.std",
"itertools.product",
"math.sqrt",
"time.sleep",
"datetime.datetime.now",
"copy.deepcopy",
"pandas.DataFrame",
"re.sub",
"datetime.date.today",
"pandas.concat"
] |
[((4728, 4749), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (4747, 4749), False, 'import datetime\n'), ((4837, 4858), 'copy.deepcopy', 'copy.deepcopy', (['m_list'], {}), '(m_list)\n', (4850, 4858), False, 'import copy\n'), ((5940, 5994), 'itertools.product', 'itertools.product', (['item_cd_day', 'item_num', 'item_cd_rate'], {}), '(item_cd_day, item_num, item_cd_rate)\n', (5957, 5994), False, 'import itertools\n'), ((5998, 6026), 'sqlite3.connect', 'sqlite3.connect', (['"""to_ali.db"""'], {}), "('to_ali.db')\n", (6013, 6026), False, 'import sqlite3\n'), ((5314, 5331), 'pandas.concat', 'pd.concat', (['inputs'], {}), '(inputs)\n', (5323, 5331), True, 'import pandas as pd\n'), ((6720, 6738), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (6732, 6738), True, 'import pandas as pd\n'), ((7657, 7678), 're.sub', 'sub', (['"""pid"""', 'each', 'sql'], {}), "('pid', each, sql)\n", (7660, 7678), False, 'from re import sub\n'), ((5501, 5515), 'time.sleep', 'time.sleep', (['(31)'], {}), '(31)\n', (5511, 5515), False, 'import time\n'), ((6433, 6450), 'numpy.std', 'np.std', (['m'], {'ddof': '(1)'}), '(m, ddof=1)\n', (6439, 6450), True, 'import numpy as np\n'), ((6404, 6418), 'math.sqrt', 'math.sqrt', (['(250)'], {}), '(250)\n', (6413, 6418), False, 'import math\n'), ((6419, 6432), 'numpy.average', 'np.average', (['m'], {}), '(m)\n', (6429, 6432), True, 'import numpy as np\n'), ((6906, 6929), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6927, 6929), False, 'import datetime\n'), ((7361, 7382), 're.sub', 'sub', (['"""pid"""', 'each', 'sql'], {}), "('pid', each, sql)\n", (7364, 7382), False, 'from re import sub\n')]
|
from math import erf
from numpy import array as vec
from numpy.linalg import norm as vec_size
from pandas import DataFrame
def approx(a, m, x):
return ((a+x)**m-(a-x)**m)/((a+x)**m+(a-x)**m)
def quadratic_error(a, m, x):
err = approx(a, m, x)-erf(x)
return err**2
def average_quadratic_error(a, m):
step = 0.1
integral = integrate_quad_error(a, m, step)
return integral/(a-step)
def integrate_quad_error(a, m, step):
point = 0
summ = 0
while point < a-step:
value = quadratic_error(a, m, point)
df = quadratic_error(a, m, point+step)
summ += value*step+0.5*df*step
point += step
return summ
def derive_2d(f, point, h):
x = point[0]
y = point[1]
z = f(x, y)
dx = f(x+h, y)-z
dy = f(x, y+h)-z
return vec((dx/h, dy/h))
def find_min_error_2d(start, max_iters):
print(f"Start: {str(start)}")
path = []
gamma = 0.1 # Step size multiplier
precision = 0.000001 # Desired precision of result 0.0000001 1e-7
dx = 0.001 # 0.0000001
current = start
next = current
for i in range(max_iters):
if i % 10 == 0:
print(f"{i}/{max_iters}")
current = next
next = current- gamma*derive_2d(average_quadratic_error, current, dx)
err = average_quadratic_error(next[0], next[1])
path.append((next[0], next[1], err))
step = vec_size(next-current)
if abs(step) <= precision:
break
return next, path
def main():
start = (2.3, 2.5)
max_iters = 2000
point, path = find_min_error_2d(start, max_iters)
df = DataFrame(path)
df.to_csv(f"./data/algebraic_2d.csv", header=["a", "m", "error"], index = False)
print(f"Closest point is {point}")
if __name__ == '__main__':
main()
|
[
"pandas.DataFrame",
"numpy.array",
"numpy.linalg.norm",
"math.erf"
] |
[((808, 829), 'numpy.array', 'vec', (['(dx / h, dy / h)'], {}), '((dx / h, dy / h))\n', (811, 829), True, 'from numpy import array as vec\n'), ((1634, 1649), 'pandas.DataFrame', 'DataFrame', (['path'], {}), '(path)\n', (1643, 1649), False, 'from pandas import DataFrame\n'), ((254, 260), 'math.erf', 'erf', (['x'], {}), '(x)\n', (257, 260), False, 'from math import erf\n'), ((1412, 1436), 'numpy.linalg.norm', 'vec_size', (['(next - current)'], {}), '(next - current)\n', (1420, 1436), True, 'from numpy.linalg import norm as vec_size\n')]
|
import datetime
from pathlib import Path
from typing import Any, Callable, Iterable, List, NamedTuple, Optional
import numpy as np
from tqdm import tqdm
from vcap import BaseCapsule, NodeDescription
from vcap.testing.input_output_validation import make_detection_node
from capsules import CapsuleDir
from workers import CapsuleThreadPool
class BenchmarkSuite:
class Result(NamedTuple):
capsule_name: str
num_workers: int
num_samples: int
fps: float
def __init__(self, capsule_dir: Path, num_workers: Iterable[int],
image_func: Optional[Callable[[], np.ndarray]] = None):
"""
:param capsule_dir: Directory containing unpackaged capsules to test
:param num_workers: Iterable containing the different num_worker values
to test
:param image_func: Function that returns an image. Can return the same
image over and over, or different images
"""
self.capsule_dir = CapsuleDir(capsule_dir)
self.num_workers = num_workers
# Generate a random image to run the benchmark on.
# Generating an image for each process_frame has a big overhead,
# limiting speed to < 100 FPS.
self.rng = np.random.RandomState(1337)
self.image = self.rng.randint(0, 255, (1920, 1080, 3),
dtype=np.uint8)
self.image.flags.writeable = False
self.capsule_dir.package_capsules()
def test(self, num_samples: int) -> List[Result]:
results: List[self.Result] = []
total_tests = len(self.capsule_dir) * len(list(self.num_workers))
with tqdm(total=total_tests) as progress_bar:
for capsule in self.capsule_dir:
for num_workers in self.num_workers:
worker_pool = CapsuleThreadPool(num_workers)
duration = self.perform_test(capsule, worker_pool,
num_samples)
result = self.Result(
capsule_name=capsule.name,
num_workers=num_workers,
num_samples=num_samples,
fps=num_samples / duration.total_seconds()
)
results.append(result)
progress_bar.update(1)
worker_pool.shutdown()
capsule.close()
return results
def generate_input_kwargs(self, capsule):
if capsule.input_type.size is NodeDescription.Size.NONE:
input_node = None
else:
input_node = make_detection_node(self.image.shape, capsule.input_type)
# Set node size to cover entire frame
height, width, _ = self.image.shape
input_node.coords = [[0, 0],
[width, 0],
[width, height],
[0, height]]
if capsule.input_type.size is NodeDescription.Size.ALL:
input_node = [input_node]
return {"frame": self.image,
"detection_node": input_node,
"options": capsule.default_options,
"state": capsule.stream_state()}
def perform_test(self, capsule: BaseCapsule,
worker_pool: CapsuleThreadPool, num_samples: int) \
-> datetime.timedelta:
# Warm things up, such as getting model on GPU if capsule uses it
warmup_results = worker_pool.map(
lambda kwargs: capsule.process_frame(**kwargs),
[self.generate_input_kwargs(capsule) for _ in range(50)]
)
for _ in warmup_results:
pass
# Generate test args before starting the test, so that the
# benchmark is purely just for the capsule
test_inputs = [self.generate_input_kwargs(capsule)
for _ in range(num_samples)]
# Begin the benchmark
start_time = datetime.datetime.now()
results = worker_pool.map(
lambda kwargs: capsule.process_frame(**kwargs),
test_inputs)
for _ in results:
pass
end_time = datetime.datetime.now()
duration = end_time - start_time
return duration
|
[
"capsules.CapsuleDir",
"tqdm.tqdm",
"workers.CapsuleThreadPool",
"datetime.datetime.now",
"vcap.testing.input_output_validation.make_detection_node",
"numpy.random.RandomState"
] |
[((994, 1017), 'capsules.CapsuleDir', 'CapsuleDir', (['capsule_dir'], {}), '(capsule_dir)\n', (1004, 1017), False, 'from capsules import CapsuleDir\n'), ((1248, 1275), 'numpy.random.RandomState', 'np.random.RandomState', (['(1337)'], {}), '(1337)\n', (1269, 1275), True, 'import numpy as np\n'), ((4023, 4046), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4044, 4046), False, 'import datetime\n'), ((4231, 4254), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4252, 4254), False, 'import datetime\n'), ((1664, 1687), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_tests'}), '(total=total_tests)\n', (1668, 1687), False, 'from tqdm import tqdm\n'), ((2650, 2707), 'vcap.testing.input_output_validation.make_detection_node', 'make_detection_node', (['self.image.shape', 'capsule.input_type'], {}), '(self.image.shape, capsule.input_type)\n', (2669, 2707), False, 'from vcap.testing.input_output_validation import make_detection_node\n'), ((1837, 1867), 'workers.CapsuleThreadPool', 'CapsuleThreadPool', (['num_workers'], {}), '(num_workers)\n', (1854, 1867), False, 'from workers import CapsuleThreadPool\n')]
|
import pandas as pd
import numpy as np
from .Team import Team
from .FootballModel import FootballModel
from ..utils import array_sum_to_one, exists, to_percent
from .ResultType import ResultType
class Game:
'''
'''
def __init__(self, model: FootballModel, team_1: str, team_2: str, max_goals=20):
self.team_1 = Team(name=team_1)
self.team_2 = Team(name=team_2)
self.model = model
self.max_goals = max_goals
def format_game(self, team_1: Team, team_2: Team):
'''
'''
game = pd.DataFrame(
data={'team': team_1.name, 'opponent': team_2.name},
index=[1])
return game
def is_team_1(self, team: Team):
'''
'''
return team.name == self.team_1.name
def set_team_goals_proba(self, team: Team):
'''
'''
if self.is_team_1(team):
team_1, team_2 = self.team_1, self.team_2
else:
team_1, team_2 = self.team_2, self.team_1
game = self.format_game(team_1=team_1, team_2=team_2)
team.avg_goals = self.model.predict_avg_score(game)
team.compute_proba_goals(max_goals=self.max_goals)
def compute_result_proba(self):
'''
'''
self.proba_team_1 = np.sum(np.tril(self.result_proba_matrix, -1))
self.proba_draw = np.sum(np.diag(self.result_proba_matrix))
self.proba_team_2 = np.sum(np.triu(self.result_proba_matrix, 1))
self.proba_team_1 = to_percent(self.proba_team_1)
self.proba_draw = to_percent(self.proba_draw)
self.proba_team_2 = to_percent(self.proba_team_2)
print(self.proba_team_1)
# result_proba_list = [self.proba_team_1,
# self.proba_draw,
# self.proba_team_2]
# self.result_proba = array_sum_to_one(result_proba_list)
# del result_proba_list
def set_result_attr(self, result_type: ResultType, winner, looser):
'''
'''
self.result_type = result_type
self.winner = winner
self.looser = looser
def set_result(self):
'''
'''
if not exists(var=self.result):
return
if (self.result[0] > self.result[1]):
self.set_result_attr(result_type=ResultType.WIN_TEAM_1,
winner=self.team_1,
looser=self.team_2)
elif (self.result[0] == self.result[1]):
self.set_result_attr(result_type=ResultType.DRAW,
winner=None,
looser=None)
else:
self.set_result_attr(result_type=ResultType.WIN_TEAM_2,
winner=self.team_2,
looser=self.team_1)
def is_winner(self, team: Team):
'''
'''
return team == self.winner
def is_looser(self, team: Team):
'''
'''
return team == self.looser
def and_the_winner_is(self):
'''
'''
self.result = np.where(self.result_proba_matrix ==
np.amax(self.result_proba_matrix))
self.set_result()
def compute_result(self):
'''
'''
self.set_team_goals_proba(self.team_1)
self.set_team_goals_proba(self.team_2)
self.result_proba_matrix = np.outer(self.team_1.proba_goals,
self.team_2.proba_goals)
self.compute_result_proba()
self.and_the_winner_is()
for index, proba in np.ndenumerate(self.result_proba_matrix):
self.result_proba_matrix[index] = to_percent(proba)
|
[
"numpy.ndenumerate",
"numpy.diag",
"numpy.outer",
"numpy.tril",
"pandas.DataFrame",
"numpy.amax",
"numpy.triu"
] |
[((549, 625), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'team': team_1.name, 'opponent': team_2.name}", 'index': '[1]'}), "(data={'team': team_1.name, 'opponent': team_2.name}, index=[1])\n", (561, 625), True, 'import pandas as pd\n'), ((3420, 3478), 'numpy.outer', 'np.outer', (['self.team_1.proba_goals', 'self.team_2.proba_goals'], {}), '(self.team_1.proba_goals, self.team_2.proba_goals)\n', (3428, 3478), True, 'import numpy as np\n'), ((3622, 3662), 'numpy.ndenumerate', 'np.ndenumerate', (['self.result_proba_matrix'], {}), '(self.result_proba_matrix)\n', (3636, 3662), True, 'import numpy as np\n'), ((1284, 1321), 'numpy.tril', 'np.tril', (['self.result_proba_matrix', '(-1)'], {}), '(self.result_proba_matrix, -1)\n', (1291, 1321), True, 'import numpy as np\n'), ((1356, 1389), 'numpy.diag', 'np.diag', (['self.result_proba_matrix'], {}), '(self.result_proba_matrix)\n', (1363, 1389), True, 'import numpy as np\n'), ((1426, 1462), 'numpy.triu', 'np.triu', (['self.result_proba_matrix', '(1)'], {}), '(self.result_proba_matrix, 1)\n', (1433, 1462), True, 'import numpy as np\n'), ((3174, 3207), 'numpy.amax', 'np.amax', (['self.result_proba_matrix'], {}), '(self.result_proba_matrix)\n', (3181, 3207), True, 'import numpy as np\n')]
|
"""
permutation-flowshop repository
This module has two examples on how to setup and run
the algorithm for Permutation Flowshop scheduling problems.
The first one uses random generated data, while the second
uses one of the instances from the Taillard benchmark set.
"""
import numpy as np
import benchmark
from iterated_greedy import IteratedGreedy
def example1_random_data():
"""Execute the algorithm with randomly generated data."""
# Generate a (20, 5) array with integer numbers
rnd_data = np.random.randint(size=(20,5), low=5, high=80)
print(rnd_data)
ig = IteratedGreedy(rnd_data) # Create problem instance
ig.run(5000) # Run the default algorithm for 5000ms (5 seconds)
# Print results to console
print("Best makespan", ig.best_solution.makespan,"iterations:", ig.iterations)
print("Job sequence:", ig.best_solution.sequence)
def example2_taillard():
"""Execute the algorithm with the first Taillard instance."""
# Load instance
instance = benchmark.import_taillard()[0]
print(instance)
ig = IteratedGreedy(instance)
# Run the algorithm with local search on partial solutions
# and removing 5 jobs at each iteration
ig.local_search_partial_solution = True
ig.num_jobs_remove = 5
ig.run(10000)
# Print results to console
print("Best makespan", ig.best_solution.makespan,"iterations:", ig.iterations)
print("Job sequence:", ig.best_solution.sequence)
if __name__ == "__main__":
example1_random_data()
example2_taillard()
|
[
"benchmark.import_taillard",
"iterated_greedy.IteratedGreedy",
"numpy.random.randint"
] |
[((512, 559), 'numpy.random.randint', 'np.random.randint', ([], {'size': '(20, 5)', 'low': '(5)', 'high': '(80)'}), '(size=(20, 5), low=5, high=80)\n', (529, 559), True, 'import numpy as np\n'), ((589, 613), 'iterated_greedy.IteratedGreedy', 'IteratedGreedy', (['rnd_data'], {}), '(rnd_data)\n', (603, 613), False, 'from iterated_greedy import IteratedGreedy\n'), ((1071, 1095), 'iterated_greedy.IteratedGreedy', 'IteratedGreedy', (['instance'], {}), '(instance)\n', (1085, 1095), False, 'from iterated_greedy import IteratedGreedy\n'), ((1010, 1037), 'benchmark.import_taillard', 'benchmark.import_taillard', ([], {}), '()\n', (1035, 1037), False, 'import benchmark\n')]
|
# -*- coding: utf-8 -*-
from .common import *
from ccxt.base.errors import AuthenticationError, ExchangeError, ExchangeNotAvailable, RequestTimeout
from requests.exceptions import ConnectionError, HTTPError, ReadTimeout
from socket import gaierror, timeout
from urllib3.exceptions import MaxRetryError, NewConnectionError, ReadTimeoutError
import ccxt
import numpy as np
db_suffix = '.ww'
net_errors = (AuthenticationError, ExchangeError, ExchangeNotAvailable, RequestTimeout,
ConnectionError, HTTPError, ReadTimeout,
gaierror, timeout,
MaxRetryError, NewConnectionError, ReadTimeoutError)
secs_in_hour, tabs = 3600, 2
net_errors_counter = 0
def exchange(exchange_id):
"""
todo
:param exchange_id:
:return:
"""
argv = locals()
exchange_obj = None
try:
auth = setup(exchange_id.upper(), config_path='bin/.keys')
exchange_obj = getattr(ccxt, exchange_id)(dict(auth))
exchange_obj.options['warnOnFetchOpenOrdersWithoutSymbol'] = False
if not int(setup()['offline']):
exchange_obj.loadMarkets()
except net_errors:
return _net_except(exchange_obj, exchange, argv, format_exc())
except:
logg(format_exc(), exchange_id)
return exchange_obj
def symbols(exchange_obj, btc_only=True):
"""
todo
:param exchange_obj:
:param btc_only:
:return:
"""
argv = locals()
tmp = {}
try:
wait()
tmp = {d['symbol']: (d['limits']['amount']['max'],
d['limits']['amount']['min'],
d['precision']['amount'],
d['limits']['price']['max'],
d['limits']['price']['min'],
d['precision']['price'],)
for d in exchange_obj.fetch_markets()
if d['active']}
if btc_only:
return {k: v for k, v in tmp.items()
if (k[:4] == 'BTC/' or k[-4:] == '/BTC')
and 'BNB' not in k}
except net_errors:
return _net_except(exchange_obj, symbols, argv, format_exc())
except:
logg(format_exc(), exchange_obj.id)
return tmp
def history(exchange_obj, symbol, cutoff=None, hours=1):
"""
Many thanks to "xmatthias":
https://github.com/xmatthias
https://github.com/ccxt/ccxt/issues/5697
:param exchange_obj:
:param symbol:
:param cutoff:
:param hours:
:return:
"""
argv = locals()
void = np.array([], dtype='float64').reshape(0, 3)
try:
if cutoff is None:
cutoff = time()
since = int(1E3 * (cutoff - hours * secs_in_hour))
until = int(1E3 * cutoff)
wait()
data = exchange_obj.fetch_trades(symbol, since=since)
if not len(data):
return void
old_id = data[-1]['id']
while data[-1]['timestamp'] < until and not halt():
wait()
tmp = exchange_obj.fetch_trades(symbol, params={
'fromId': old_id}, limit=1000)
new_id = tmp[-1]['id']
if len(tmp) and new_id != old_id:
data.extend(tmp)
old_id = data[-1]['id']
else:
break
hh = [(e, a, p) for t, (e, a, p) in sorted(
{int(d['id']): (d['timestamp'] / 1E3,
[1, -1][d['side'] == 'sell'] * d['amount'],
d['price']) for d in data}.items()
) if since < 1E3 * e <= until]
if len(hh):
return np.array(hh)
except net_errors:
return _net_except(exchange_obj, history, argv, format_exc())
except:
logg(format_exc(), exchange_obj.id)
return void
def book(exchange_obj, symbol, margin=0):
"""
todo
:param exchange_obj:
:param symbol:
:param margin:
:return:
"""
argv = locals()
void = np.array([], dtype='float64').reshape(0, 2)
try:
wait()
req = exchange_obj.fetch_order_book(symbol, limit=500)
asks = [(p, a) for p, a in sorted(req['asks'])]
bids = [(p, -a) for p, a in sorted(req['bids'], reverse=True)]
if margin > 0:
if type(margin) == float:
h_ask = (1 + margin / 100) * asks[0][0]
l_bid = (1 - margin / 100) * bids[0][0]
asks = [(p, a) for p, a in asks if p <= h_ask]
bids = [(p, a) for p, a in bids if p >= l_bid]
else:
asks, bids = asks[:margin], bids[:margin]
bb = sorted(asks + bids, reverse=True)
if len(bb):
return np.array(bb)
except IndexError:
pass
except net_errors:
return _net_except(exchange_obj, book, argv, format_exc())
except:
logg(format_exc(), exchange_obj.id)
return void
def balance(exchange_obj):
"""
todo
:param exchange_obj:
:return:
"""
argv = locals()
tmp = {'BTC': (0., 0.)}
try:
wait()
req = exchange_obj.fetch_balance()
for currency, available in req['free'].items():
on_orders = req['used'][currency]
if available + on_orders > 0:
tmp[currency] = (available, on_orders)
if exchange_obj.id == 'bittrex' and 'BTXCRD' in tmp:
del tmp['BTXCRD']
except net_errors:
return _net_except(exchange_obj, balance, argv, format_exc())
except:
logg(format_exc(), exchange_obj.id)
return tmp
def fire(exchange_obj, symbol, amount, price, order_type='limit'):
"""
todo
:param exchange_obj:
:param symbol:
:param amount:
:param price:
:param order_type:
:return:
"""
argv = locals()
req = {}
try:
if not setup()['runMode'].upper() in ['LIVE', 'TEST']:
return 'PLAY_' + str(int(1E3 * time())), price
params = (symbol, order_type, 'buy', amount, price) if amount > 0 else (
symbol, order_type, 'sell', -amount, price)
wait()
req = exchange_obj.create_order(*params)
_cached(exchange_obj, req)
except net_errors:
return _net_except(exchange_obj, fire, argv, format_exc())
except:
logg(format_exc(), exchange_obj.id)
return req['id'], req['price']
def orders(exchange_obj, id_only=True):
"""
todo
:param exchange_obj:
:param id_only:
:return:
"""
argv = locals()
tmp = set()
try:
if not setup()['runMode'].upper() in ['LIVE', 'TEST']:
return tmp
orders_cache = _cached(exchange_obj)['data']
eligible = {order_dict['symbol'] for order_dict in orders_cache.values()}
for ss in eligible:
wait()
for order_dict in exchange_obj.fetch_open_orders(symbol=ss):
if order_dict['status'] == 'open':
side = -1 if order_dict['side'] == 'sell' else 1
tmp.add((order_dict['id'], order_dict['timestamp'],
order_dict['symbol'], side * order_dict['amount'],
order_dict['price']))
if id_only and len(tmp):
return set(list(zip(*tmp))[0])
except net_errors:
return _net_except(exchange_obj, orders, argv, format_exc())
except:
logg(format_exc(), exchange_obj.id)
return tmp
def cancel(exchange_obj, order_id):
"""
todo
:param exchange_obj:
:param order_id:
:return:
"""
argv = locals()
minus_sign = '-'
try:
if not setup()['runMode'].upper() in ['LIVE', 'TEST']:
return minus_sign + order_id
orders_cache = _cached(exchange_obj)['data']
if order_id not in orders_cache:
return minus_sign
cached_order = orders_cache[order_id]
symbol = cached_order['symbol']
if cached_order['status'] == 'open':
wait()
exchange_obj.cancel_order(order_id, symbol)
cached_order['status'] = 'canceled'
_cached(exchange_obj, cached_order)
return minus_sign + order_id.upper()
except net_errors:
return _net_except(exchange_obj, cancel, argv, format_exc())
except:
logg(format_exc(), exchange_obj.id)
return minus_sign
def _cached(exchange_obj, order_data=None, before_millis=None):
"""
todo
:param exchange_obj:
:param order_data:
:param before_millis:
:return:
"""
argv = locals()
db_file = exchange_obj.id + db_suffix
tmp = {}
try:
if before_millis is None:
before_millis = exchange_obj.milliseconds() - 7 * 24 * secs_in_hour * 1000
exchange_obj.purge_cached_orders(before_millis)
template = {'data': {}, 'last': 0., }
tmp = disk(db_file)
if not tmp.keys() == template.keys():
tmp = template
if order_data is not None:
tmp['data'][order_data['id']] = order_data
now = time()
if now - tmp['last'] > secs_in_hour:
wait()
tmp['data'].update({order_dict['id']: order_dict
for order_dict in exchange_obj.fetch_open_orders()})
tmp['last'] = now
debug(msgg(701), exchange_obj.id, tabs)
tmp['data'] = {k: v for k, v in tmp['data'].items()
if v['timestamp'] >= before_millis}
disk(db_file, tmp)
except net_errors:
return _net_except(exchange_obj, _cached, argv, format_exc())
except:
logg(format_exc(), exchange_obj.id)
return tmp
def _net_except(exchange_obj, func_obj, func_params, errmsg, delay=12):
"""
todo
:param exchange_obj:
:param func_obj:
:param func_params:
:param errmsg:
:param delay:
:return:
"""
global net_errors_counter
try:
if net_errors_counter < 5:
debug(msgg(702, delay), exchange_obj.id)
wait(seconds=delay)
net_errors_counter += 1
return func_obj(**func_params)
else:
logg(errmsg, exchange_obj.id)
net_errors_counter = 0
except:
logg(format_exc(), exchange_obj.id)
|
[
"numpy.array"
] |
[((2572, 2601), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float64"""'}), "([], dtype='float64')\n", (2580, 2601), True, 'import numpy as np\n'), ((3633, 3645), 'numpy.array', 'np.array', (['hh'], {}), '(hh)\n', (3641, 3645), True, 'import numpy as np\n'), ((3994, 4023), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""float64"""'}), "([], dtype='float64')\n", (4002, 4023), True, 'import numpy as np\n'), ((4716, 4728), 'numpy.array', 'np.array', (['bb'], {}), '(bb)\n', (4724, 4728), True, 'import numpy as np\n')]
|
"""Simulating time series, with aperiodic activity."""
import numpy as np
from scipy.stats import zscore
from scipy.linalg import toeplitz, cholesky
from neurodsp.filt import filter_signal, infer_passtype
from neurodsp.filt.fir import compute_filter_length
from neurodsp.filt.checks import check_filter_definition
from neurodsp.utils import remove_nans
from neurodsp.utils.checks import check_param_range
from neurodsp.utils.data import create_times, compute_nsamples
from neurodsp.utils.decorators import normalize
from neurodsp.spectral import rotate_powerlaw
from neurodsp.sim.transients import sim_synaptic_kernel
###################################################################################################
###################################################################################################
@normalize
def sim_poisson_pop(n_seconds, fs, n_neurons=1000, firing_rate=2):
"""Simulate a Poisson population.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
n_neurons : int, optional, default: 1000
Number of neurons in the simulated population.
firing_rate : float, optional, default: 2
Firing rate of individual neurons in the population.
Returns
-------
sig : 1d array
Simulated population activity.
Notes
-----
The simulated signal is essentially white noise, but satisfies the Poisson
property, i.e. mean(X) = var(X).
The lambda parameter of the Poisson process (total rate) is determined as
firing rate * number of neurons, i.e. summation of Poisson processes is still
a Poisson processes.
Note that the Gaussian approximation for a sum of Poisson processes is only
a good approximation for large lambdas.
Examples
--------
Simulate a Poisson population:
>>> sig = sim_poisson_pop(n_seconds=1, fs=500, n_neurons=1000, firing_rate=2)
"""
# Poisson population rate signal scales with # of neurons and individual rate
lam = n_neurons * firing_rate
# Variance is equal to the mean
sig = np.random.normal(loc=lam, scale=lam**0.5, size=compute_nsamples(n_seconds, fs))
# Enforce that sig is non-negative in cases of low firing rate
sig[np.where(sig < 0.)] = 0.
return sig
@normalize
def sim_synaptic_current(n_seconds, fs, n_neurons=1000, firing_rate=2.,
tau_r=0., tau_d=0.01, t_ker=None):
"""Simulate a signal as a synaptic current, which has 1/f characteristics with a knee.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
n_neurons : int, optional, default: 1000
Number of neurons in the simulated population.
firing_rate : float, optional, default: 2
Firing rate of individual neurons in the population.
tau_r : float, optional, default: 0.
Rise time of synaptic kernel, in seconds.
tau_d : float, optional, default: 0.01
Decay time of synaptic kernel, in seconds.
t_ker : float, optional
Length of time of the simulated synaptic kernel, in seconds.
Returns
-------
sig : 1d array
Simulated synaptic current.
Notes
-----
- This simulation is based on the one used in [1]_.
- The resulting signal is most similar to unsigned intracellular current or conductance change.
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2017). Inferring synaptic
excitation/inhibition balance from field potentials. NeuroImage, 158, 70–78.
DOI: https://doi.org/10.1016/j.neuroimage.2017.06.078
Examples
--------
Simulate a synaptic current signal:
>>> sig = sim_synaptic_current(n_seconds=1, fs=500)
"""
# If not provided, compute t_ker as a function of decay time constant
if t_ker is None:
t_ker = 5. * tau_d
# Simulate an extra bit because the convolution will trim & turn off normalization
sig = sim_poisson_pop((n_seconds + t_ker), fs, n_neurons, firing_rate,
mean=None, variance=None)
ker = sim_synaptic_kernel(t_ker, fs, tau_r, tau_d)
sig = np.convolve(sig, ker, 'valid')[:compute_nsamples(n_seconds, fs)]
return sig
@normalize
def sim_knee(n_seconds, fs, chi1, chi2, knee):
"""Simulate a signal whose power spectrum has a 1/f structure with a knee.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
chi1 : float
Power law exponent before the knee.
chi2 : float
Power law exponent added to chi1 after the knee.
knee : float
Location of the knee in Hz.
Returns
-------
sig : 1d array
Time series with the desired power spectrum.
Notes
-----
This simulated time series has a power spectrum that follows the Lorentzian equation:
`P(f) = 1 / (f**chi1 * (f**chi2 + knee))`
- This simulation creates this power spectrum shape using a sum of sinusoids.
- The slope of the log power spectrum before the knee is chi1 whereas after the knee it is chi2,
but only when the sign of chi1 and chi2 are the same.
Examples
--------
Simulate a time series with chi1 of -1, chi2 of -2, and knee of 100:
>> sim_knee(n_seconds=10, fs=1000, chi1=-1, chi2=-2, knee=100)
"""
times = create_times(n_seconds, fs)
n_samples = compute_nsamples(n_seconds, fs)
# Create frequencies for the power spectrum, which will be freqs of the summed cosines
freqs = np.linspace(0, fs/2, num=int(n_samples//2 + 1), endpoint=True)
# Drop the DC component
freqs = freqs[1:]
# Map the frequencies under the (square root) Lorentzian
# This will give us the amplitude coefficients for the sinusoids
cosine_coeffs = np.array([np.sqrt(1 / (freq ** -chi1 * (freq ** (-chi2 - chi1) + knee))) \
for freq in freqs])
# Add sinusoids with a random phase shift
sig = np.sum(np.array([cosine_coeffs[ell] * \
np.cos(2 * np.pi * freq * times + 2 * np.pi * np.random.rand()) \
for ell, freq in enumerate(freqs)]), axis=0)
return sig
@normalize
def sim_random_walk(n_seconds, fs, theta=1., mu=0., sigma=5.):
"""Simulate a mean-reverting random walk, as an Ornstein-Uhlenbeck process.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
theta : float, optional, default: 1.0
Memory scale parameter. Larger theta values create faster fluctuations.
mu : float, optional, default: 0.0
Mean of the random walk.
sigma : float, optional, default: 5.0
Standard deviation of the random walk.
Returns
-------
sig : 1d array
Simulated random walk signal.
Notes
-----
The random walk is simulated as a discretized Ornstein-Uhlenbeck process:
`dx = theta*(x-mu)*dt + sigma*dWt`
Where:
- mu : mean
- sigma : standard deviation
- theta : memory scale
- dWt : increments of Wiener process, i.e. white noise
See the wikipedia page [1]_ for the integral solution.
References
----------
.. [1] https://en.wikipedia.org/wiki/Ornstein-Uhlenbeck_process#Formal_solution
Examples
--------
Simulate a Ornstein-Uhlenbeck random walk:
>>> sig = sim_random_walk(n_seconds=1, fs=500, theta=1.)
"""
times = create_times(n_seconds, fs)
x0 = mu
dt = times[1] - times[0]
ws = np.random.normal(size=len(times))
ex = np.exp(-theta * times)
ws[0] = 0.
sig = x0 * ex + mu * (1. - ex) + sigma * ex * \
np.cumsum(np.exp(theta * times) * np.sqrt(dt) * ws)
return sig
@normalize
def sim_powerlaw(n_seconds, fs, exponent=-2.0, f_range=None, **filter_kwargs):
"""Simulate a power law time series, with a specified exponent.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
exponent : float, optional, default: -2
Desired power-law exponent, of the form P(f)=f^exponent.
f_range : list of [float, float] or None, optional
Frequency range to filter simulated data, as [f_lo, f_hi], in Hz.
**filter_kwargs : kwargs, optional
Keyword arguments to pass to `filter_signal`.
Returns
-------
sig : 1d array
Time-series with the desired power law exponent.
Notes
-----
- Powerlaw data with exponents is created by spectrally rotating white noise [1]_.
References
----------
.. [1] <NAME>., & <NAME>. (1995). On Generating Power Law Noise.
Astronomy and Astrophysics, 300, 707–710.
Examples
--------
Simulate a power law signal, with an exponent of -2 (brown noise):
>>> sig = sim_powerlaw(n_seconds=1, fs=500, exponent=-2.0)
Simulate a power law signal, with a highpass filter applied at 2 Hz:
>>> sig = sim_powerlaw(n_seconds=1, fs=500, exponent=-1.5, f_range=(2, None))
"""
# Compute the number of samples for the simulated time series
n_samples = compute_nsamples(n_seconds, fs)
# Get the number of samples to simulate for the signal
# If signal is to be filtered, with FIR, add extra to compensate for edges
if f_range and filter_kwargs.get('filter_type', None) != 'iir':
pass_type = infer_passtype(f_range)
filt_len = compute_filter_length(fs, pass_type,
*check_filter_definition(pass_type, f_range),
n_seconds=filter_kwargs.get('n_seconds', None),
n_cycles=filter_kwargs.get('n_cycles', 3))
n_samples += filt_len + 1
# Simulate the powerlaw data
sig = _create_powerlaw(n_samples, fs, exponent)
if f_range is not None:
sig = filter_signal(sig, fs, infer_passtype(f_range), f_range,
remove_edges=True, **filter_kwargs)
# Drop the edges, that were compensated for, if not using FIR filter
if not filter_kwargs.get('filter_type', None) == 'iir':
sig, _ = remove_nans(sig)
return sig
@normalize
def sim_frac_gaussian_noise(n_seconds, fs, chi=0, hurst=None):
"""Simulate a timeseries as fractional gaussian noise.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
chi: float, optional, default: 0
Desired power law exponent of the spectrum of the signal.
Must be in the range (-1, 1).
hurst : float, optional, default: None
Desired Hurst parameter, which must be in the range (0, 1).
If provided, this value overwrites the `chi` parameter.
Returns
-------
sig: 1d array
Simulated fractional gaussian noise time series.
Notes
-----
The time series can be specified with either a desired power law exponent,
or alternatively with a specified Hurst parameter.
The Hurst parameter is not the Hurst exponent as defined in rescaled range analysis.
The Hurst parameter is defined for self-similar processes such that Y(at) = a^H Y(t)
for all a > 0, where this equality holds in distribution.
The relationship between the power law exponent chi and the Hurst parameter
for fractional gaussian noise is chi = 2 * hurst - 1.
For more information, consult [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2002). Fractal characterization of
complexity in temporal physiological signals. Physiological Measurement, 23(1), R1–R38.
DOI: https://doi.org/10.1088/0967-3334/23/1/201
Examples
--------
Simulate fractional gaussian noise with a power law decay of 0 (white noise):
>>> sig = sim_frac_gaussian_noise(n_seconds=1, fs=500, chi=0)
Simulate fractional gaussian noise with a Hurst parameter of 0.5 (also white noise):
>>> sig = sim_frac_gaussian_noise(n_seconds=1, fs=500, hurst=0.5)
"""
if hurst is not None:
check_param_range(hurst, 'hurst', (0, 1))
else:
check_param_range(chi, 'chi', (-1, 1))
# Infer the hurst parameter from chi
hurst = (-chi + 1.) / 2
# Compute the number of samples for the simulated time series
n_samples = compute_nsamples(n_seconds, fs)
# Define helper function for computing the auto-covariance
def autocov(hurst):
return lambda k: 0.5 * (np.abs(k - 1) ** (2 * hurst) - 2 * \
k ** (2 * hurst) + (k + 1) ** (2 * hurst))
# Build the autocovariance matrix
gamma = np.arange(0, n_samples)
gamma = np.apply_along_axis(autocov(hurst), 0, gamma)
autocov_matrix = toeplitz(gamma)
# Use the Cholesky factor to transform white noise to get the desired time series
white_noise = np.random.randn(n_samples)
cholesky_factor = cholesky(autocov_matrix, lower=True)
sig = cholesky_factor @ white_noise
return sig
@normalize
def sim_frac_brownian_motion(n_seconds, fs, chi=-2, hurst=None):
"""Simulate a timeseries as fractional brownian motion.
Parameters
----------
n_seconds : float
Simulation time, in seconds.
fs : float
Sampling rate of simulated signal, in Hz.
chi : float, optional, default: -2
Desired power law exponent of the spectrum of the signal.
Must be in the range (-3, -1).
hurst : float, optional, default: None
Desired Hurst parameter, which must be in the range (0, 1).
If provided, this value overwrites the `chi` parameter.
Returns
-------
sig : 1d array
Simulated fractional brownian motion time series.
Notes
-----
The time series can be specified with either a desired power law exponent,
or alternatively with a specified Hurst parameter.
Note that when specifying there can be some bias leading to a steeper than expected
spectrum of the simulated signal. This bias is higher for chi values near to 1,
and may be more severe in shorter signals.
The Hurst parameter is not the Hurst exponent in general. The Hurst parameter
is defined for self-similar processes such that Y(at) = a^H Y(t) for all a > 0,
where this equality holds in distribution.
The relationship between the power law exponent chi and the Hurst parameter
for fractional brownian motion is chi = 2 * hurst + 1
For more information, consult [1]_ and/or [2]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2002). Fractal characterization of
complexity in temporal physiological signals. Physiological Measurement, 23(1), R1–R38.
DOI: https://doi.org/10.1088/0967-3334/23/1/201
.. [2] <NAME>. (2004). Simulation of fractional Brownian motion. 77.
Examples
--------
Simulate fractional brownian motion with a power law exponent of -2 (brown noise):
>>> sig = sim_frac_brownian_motion(n_seconds=1, fs=500, chi=-2)
Simulate fractional brownian motion with a Hurst parameter of 0.5 (also brown noise):
>>> sig = sim_frac_brownian_motion(n_seconds=1, fs=500, hurst=0.5)
"""
if hurst is not None:
check_param_range(hurst, 'hurst', (0, 1))
else:
check_param_range(chi, 'chi', (-3, -1))
# Infer the hurst parameter from chi
hurst = (-chi - 1.) / 2
# Fractional brownian motion is the cumulative sum of fractional gaussian noise
fgn = sim_frac_gaussian_noise(n_seconds, fs, hurst=hurst)
sig = np.cumsum(fgn)
return sig
def _create_powerlaw(n_samples, fs, exponent):
"""Create a power law time series.
Parameters
----------
n_samples : int
The number of samples to simulate.
fs : float
Sampling rate of simulated signal, in Hz.
exponent : float
Desired power-law exponent, of the form P(f)=f^exponent.
Returns
-------
sig : 1d array
Time-series with the desired power law exponent.
Notes
-----
This function creates variable power law exponents by spectrally rotating white noise.
"""
# Start with white noise signal, that we will rotate, in frequency space
sig = np.random.randn(n_samples)
# Compute the FFT
fft_output = np.fft.fft(sig)
freqs = np.fft.fftfreq(len(sig), 1. / fs)
# Rotate spectrum and invert back to time series, with a z-score to normalize
# Delta exponent is divided by two, as the FFT output is in units of amplitude not power
fft_output_rot = rotate_powerlaw(freqs, fft_output, -exponent/2)
sig = zscore(np.real(np.fft.ifft(fft_output_rot)))
return sig
|
[
"numpy.convolve",
"numpy.sqrt",
"numpy.random.rand",
"neurodsp.utils.data.create_times",
"neurodsp.filt.infer_passtype",
"scipy.linalg.cholesky",
"numpy.arange",
"neurodsp.filt.checks.check_filter_definition",
"neurodsp.sim.transients.sim_synaptic_kernel",
"numpy.where",
"numpy.fft.fft",
"numpy.exp",
"neurodsp.utils.remove_nans",
"numpy.abs",
"neurodsp.spectral.rotate_powerlaw",
"numpy.fft.ifft",
"numpy.random.randn",
"neurodsp.utils.checks.check_param_range",
"scipy.linalg.toeplitz",
"numpy.cumsum",
"neurodsp.utils.data.compute_nsamples"
] |
[((4213, 4257), 'neurodsp.sim.transients.sim_synaptic_kernel', 'sim_synaptic_kernel', (['t_ker', 'fs', 'tau_r', 'tau_d'], {}), '(t_ker, fs, tau_r, tau_d)\n', (4232, 4257), False, 'from neurodsp.sim.transients import sim_synaptic_kernel\n'), ((5517, 5544), 'neurodsp.utils.data.create_times', 'create_times', (['n_seconds', 'fs'], {}), '(n_seconds, fs)\n', (5529, 5544), False, 'from neurodsp.utils.data import create_times, compute_nsamples\n'), ((5561, 5592), 'neurodsp.utils.data.compute_nsamples', 'compute_nsamples', (['n_seconds', 'fs'], {}), '(n_seconds, fs)\n', (5577, 5592), False, 'from neurodsp.utils.data import create_times, compute_nsamples\n'), ((7629, 7656), 'neurodsp.utils.data.create_times', 'create_times', (['n_seconds', 'fs'], {}), '(n_seconds, fs)\n', (7641, 7656), False, 'from neurodsp.utils.data import create_times, compute_nsamples\n'), ((7751, 7773), 'numpy.exp', 'np.exp', (['(-theta * times)'], {}), '(-theta * times)\n', (7757, 7773), True, 'import numpy as np\n'), ((9336, 9367), 'neurodsp.utils.data.compute_nsamples', 'compute_nsamples', (['n_seconds', 'fs'], {}), '(n_seconds, fs)\n', (9352, 9367), False, 'from neurodsp.utils.data import create_times, compute_nsamples\n'), ((12614, 12645), 'neurodsp.utils.data.compute_nsamples', 'compute_nsamples', (['n_seconds', 'fs'], {}), '(n_seconds, fs)\n', (12630, 12645), False, 'from neurodsp.utils.data import create_times, compute_nsamples\n'), ((12929, 12952), 'numpy.arange', 'np.arange', (['(0)', 'n_samples'], {}), '(0, n_samples)\n', (12938, 12952), True, 'import numpy as np\n'), ((13032, 13047), 'scipy.linalg.toeplitz', 'toeplitz', (['gamma'], {}), '(gamma)\n', (13040, 13047), False, 'from scipy.linalg import toeplitz, cholesky\n'), ((13153, 13179), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (13168, 13179), True, 'import numpy as np\n'), ((13202, 13238), 'scipy.linalg.cholesky', 'cholesky', (['autocov_matrix'], {'lower': '(True)'}), '(autocov_matrix, lower=True)\n', (13210, 13238), False, 'from scipy.linalg import toeplitz, cholesky\n'), ((15861, 15875), 'numpy.cumsum', 'np.cumsum', (['fgn'], {}), '(fgn)\n', (15870, 15875), True, 'import numpy as np\n'), ((16534, 16560), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (16549, 16560), True, 'import numpy as np\n'), ((16601, 16616), 'numpy.fft.fft', 'np.fft.fft', (['sig'], {}), '(sig)\n', (16611, 16616), True, 'import numpy as np\n'), ((16862, 16911), 'neurodsp.spectral.rotate_powerlaw', 'rotate_powerlaw', (['freqs', 'fft_output', '(-exponent / 2)'], {}), '(freqs, fft_output, -exponent / 2)\n', (16877, 16911), False, 'from neurodsp.spectral import rotate_powerlaw\n'), ((2304, 2323), 'numpy.where', 'np.where', (['(sig < 0.0)'], {}), '(sig < 0.0)\n', (2312, 2323), True, 'import numpy as np\n'), ((4268, 4298), 'numpy.convolve', 'np.convolve', (['sig', 'ker', '"""valid"""'], {}), "(sig, ker, 'valid')\n", (4279, 4298), True, 'import numpy as np\n'), ((9598, 9621), 'neurodsp.filt.infer_passtype', 'infer_passtype', (['f_range'], {}), '(f_range)\n', (9612, 9621), False, 'from neurodsp.filt import filter_signal, infer_passtype\n'), ((12353, 12394), 'neurodsp.utils.checks.check_param_range', 'check_param_range', (['hurst', '"""hurst"""', '(0, 1)'], {}), "(hurst, 'hurst', (0, 1))\n", (12370, 12394), False, 'from neurodsp.utils.checks import check_param_range\n'), ((12414, 12452), 'neurodsp.utils.checks.check_param_range', 'check_param_range', (['chi', '"""chi"""', '(-1, 1)'], {}), "(chi, 'chi', (-1, 1))\n", (12431, 12452), False, 'from neurodsp.utils.checks import check_param_range\n'), ((15525, 15566), 'neurodsp.utils.checks.check_param_range', 'check_param_range', (['hurst', '"""hurst"""', '(0, 1)'], {}), "(hurst, 'hurst', (0, 1))\n", (15542, 15566), False, 'from neurodsp.utils.checks import check_param_range\n'), ((15586, 15625), 'neurodsp.utils.checks.check_param_range', 'check_param_range', (['chi', '"""chi"""', '(-3, -1)'], {}), "(chi, 'chi', (-3, -1))\n", (15603, 15625), False, 'from neurodsp.utils.checks import check_param_range\n'), ((2195, 2226), 'neurodsp.utils.data.compute_nsamples', 'compute_nsamples', (['n_seconds', 'fs'], {}), '(n_seconds, fs)\n', (2211, 2226), False, 'from neurodsp.utils.data import create_times, compute_nsamples\n'), ((4300, 4331), 'neurodsp.utils.data.compute_nsamples', 'compute_nsamples', (['n_seconds', 'fs'], {}), '(n_seconds, fs)\n', (4316, 4331), False, 'from neurodsp.utils.data import create_times, compute_nsamples\n'), ((5974, 6036), 'numpy.sqrt', 'np.sqrt', (['(1 / (freq ** -chi1 * (freq ** (-chi2 - chi1) + knee)))'], {}), '(1 / (freq ** -chi1 * (freq ** (-chi2 - chi1) + knee)))\n', (5981, 6036), True, 'import numpy as np\n'), ((10125, 10148), 'neurodsp.filt.infer_passtype', 'infer_passtype', (['f_range'], {}), '(f_range)\n', (10139, 10148), False, 'from neurodsp.filt import filter_signal, infer_passtype\n'), ((10385, 10401), 'neurodsp.utils.remove_nans', 'remove_nans', (['sig'], {}), '(sig)\n', (10396, 10401), False, 'from neurodsp.utils import remove_nans\n'), ((16935, 16962), 'numpy.fft.ifft', 'np.fft.ifft', (['fft_output_rot'], {}), '(fft_output_rot)\n', (16946, 16962), True, 'import numpy as np\n'), ((9720, 9763), 'neurodsp.filt.checks.check_filter_definition', 'check_filter_definition', (['pass_type', 'f_range'], {}), '(pass_type, f_range)\n', (9743, 9763), False, 'from neurodsp.filt.checks import check_filter_definition\n'), ((7860, 7881), 'numpy.exp', 'np.exp', (['(theta * times)'], {}), '(theta * times)\n', (7866, 7881), True, 'import numpy as np\n'), ((7884, 7895), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (7891, 7895), True, 'import numpy as np\n'), ((12766, 12779), 'numpy.abs', 'np.abs', (['(k - 1)'], {}), '(k - 1)\n', (12772, 12779), True, 'import numpy as np\n'), ((6236, 6252), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6250, 6252), True, 'import numpy as np\n')]
|
# ==============================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import os
import numpy as np
import torch
from torch import nn
import random
def seed_everything(seed=1029):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def set_device(gpu=-1):
if gpu >= 0 and torch.cuda.is_available():
device = torch.device("cuda:" + str(gpu))
else:
device = torch.device("cpu")
return device
def set_optimizer(optimizer):
if isinstance(optimizer, str):
if optimizer.lower() == "adam":
optimizer = "Adam"
elif optimizer.lower() == "rmsprop":
optimizer = "RMSprop"
elif optimizer.lower() == "sgd":
optimizer = "SGD"
return getattr(torch.optim, optimizer)
def set_loss(loss):
if isinstance(loss, str):
if loss in ["bce", "binary_crossentropy", "binary_cross_entropy"]:
loss = "binary_cross_entropy"
else:
raise NotImplementedError("loss={} is not supported.".format(loss))
return loss
def set_regularizer(reg):
reg_pair = [] # of tuples (p_norm, weight)
if isinstance(reg, float):
reg_pair.append((2, reg))
elif isinstance(reg, str):
try:
if reg.startswith("l1(") or reg.startswith("l2("):
reg_pair.append((int(reg[1]), float(reg.rstrip(")").split("(")[-1])))
elif reg.startswith("l1_l2"):
l1_reg, l2_reg = reg.rstrip(")").split("(")[-1].split(",")
reg_pair.append((1, float(l1_reg)))
reg_pair.append((2, float(l2_reg)))
else:
raise NotImplementedError
except:
raise NotImplementedError("regularizer={} is not supported.".format(reg))
return reg_pair
def set_activation(activation):
if isinstance(activation, str):
if activation.lower() == "relu":
return nn.ReLU()
elif activation.lower() == "sigmoid":
return nn.Sigmoid()
elif activation.lower() == "tanh":
return nn.Tanh()
else:
return getattr(nn, activation)()
else:
return activation
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
""" Pads sequences (list of list) to the ndarray of same length
This is an equivalent implementation of tf.keras.preprocessing.sequence.pad_sequences
for Pytorch
"""
assert padding in ["pre", "post"], "Invalid padding={}.".format(padding)
assert truncating in ["pre", "post"], "Invalid truncating={}.".format(truncating)
if maxlen is None:
maxlen = max(len(x) for x in sequences)
arr = np.full((len(sequences), maxlen), value, dtype=dtype)
for idx, x in enumerate(sequences):
if len(x) == 0:
continue # empty list
if truncating == 'pre':
trunc = x[-maxlen:]
else:
trunc = x[:maxlen]
trunc = np.asarray(trunc, dtype=dtype)
if padding == 'pre':
arr[idx, -len(trunc):] = trunc
else:
arr[idx, :len(trunc)] = trunc
return arr
|
[
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"numpy.asarray",
"random.seed",
"torch.cuda.is_available",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.device"
] |
[((833, 850), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (844, 850), False, 'import random\n'), ((900, 920), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (914, 920), True, 'import numpy as np\n'), ((925, 948), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (942, 948), False, 'import torch\n'), ((953, 981), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (975, 981), False, 'import torch\n'), ((1073, 1098), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1096, 1098), False, 'import torch\n'), ((1177, 1196), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1189, 1196), False, 'import torch\n'), ((3788, 3818), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (3798, 3818), True, 'import numpy as np\n'), ((2694, 2703), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2701, 2703), False, 'from torch import nn\n'), ((2769, 2781), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2779, 2781), False, 'from torch import nn\n'), ((2844, 2853), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2851, 2853), False, 'from torch import nn\n')]
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: <EMAIL>
import cv2
import numpy as np
from .glm import ortho
class Camera:
def __init__(self, width=1600, height=1200):
# Focal Length
# equivalent 50mm
focal = np.sqrt(width * width + height * height)
self.focal_x = focal
self.focal_y = focal
# Principal Point Offset
self.principal_x = width / 2
self.principal_y = height / 2
# Axis Skew
self.skew = 0
# Image Size
self.width = width
self.height = height
self.near = 1
self.far = 10
# Camera Center
self.center = np.array([0, 0, 1.6])
self.direction = np.array([0, 0, -1])
self.right = np.array([1, 0, 0])
self.up = np.array([0, 1, 0])
self.ortho_ratio = None
def sanity_check(self):
self.center = self.center.reshape([-1])
self.direction = self.direction.reshape([-1])
self.right = self.right.reshape([-1])
self.up = self.up.reshape([-1])
assert len(self.center) == 3
assert len(self.direction) == 3
assert len(self.right) == 3
assert len(self.up) == 3
@staticmethod
def normalize_vector(v):
v_norm = np.linalg.norm(v)
return v if v_norm == 0 else v / v_norm
def get_real_z_value(self, z):
z_near = self.near
z_far = self.far
z_n = 2.0 * z - 1.0
z_e = 2.0 * z_near * z_far / (z_far + z_near - z_n * (z_far - z_near))
return z_e
def get_rotation_matrix(self):
rot_mat = np.eye(3)
s = self.right
s = self.normalize_vector(s)
rot_mat[0, :] = s
u = self.up
u = self.normalize_vector(u)
rot_mat[1, :] = -u
rot_mat[2, :] = self.normalize_vector(self.direction)
return rot_mat
def get_translation_vector(self):
rot_mat = self.get_rotation_matrix()
trans = -np.dot(rot_mat, self.center)
return trans
def get_intrinsic_matrix(self):
int_mat = np.eye(3)
int_mat[0, 0] = self.focal_x
int_mat[1, 1] = self.focal_y
int_mat[0, 1] = self.skew
int_mat[0, 2] = self.principal_x
int_mat[1, 2] = self.principal_y
return int_mat
def get_projection_matrix(self):
ext_mat = self.get_extrinsic_matrix()
int_mat = self.get_intrinsic_matrix()
return np.matmul(int_mat, ext_mat)
def get_extrinsic_matrix(self):
rot_mat = self.get_rotation_matrix()
int_mat = self.get_intrinsic_matrix()
trans = self.get_translation_vector()
extrinsic = np.eye(4)
extrinsic[:3, :3] = rot_mat
extrinsic[:3, 3] = trans
return extrinsic[:3, :]
def set_rotation_matrix(self, rot_mat):
self.direction = rot_mat[2, :]
self.up = -rot_mat[1, :]
self.right = rot_mat[0, :]
def set_intrinsic_matrix(self, int_mat):
self.focal_x = int_mat[0, 0]
self.focal_y = int_mat[1, 1]
self.skew = int_mat[0, 1]
self.principal_x = int_mat[0, 2]
self.principal_y = int_mat[1, 2]
def set_projection_matrix(self, proj_mat):
res = cv2.decomposeProjectionMatrix(proj_mat)
int_mat, rot_mat, camera_center_homo = res[0], res[1], res[2]
camera_center = camera_center_homo[0:3] / camera_center_homo[3]
camera_center = camera_center.reshape(-1)
int_mat = int_mat / int_mat[2][2]
self.set_intrinsic_matrix(int_mat)
self.set_rotation_matrix(rot_mat)
self.center = camera_center
self.sanity_check()
def get_gl_matrix(self):
z_near = self.near
z_far = self.far
rot_mat = self.get_rotation_matrix()
int_mat = self.get_intrinsic_matrix()
trans = self.get_translation_vector()
extrinsic = np.eye(4)
extrinsic[:3, :3] = rot_mat
extrinsic[:3, 3] = trans
axis_adj = np.eye(4)
axis_adj[2, 2] = -1
axis_adj[1, 1] = -1
model_view = np.matmul(axis_adj, extrinsic)
projective = np.zeros([4, 4])
projective[:2, :2] = int_mat[:2, :2]
projective[:2, 2:3] = -int_mat[:2, 2:3]
projective[3, 2] = -1
projective[2, 2] = (z_near + z_far)
projective[2, 3] = (z_near * z_far)
if self.ortho_ratio is None:
ndc = ortho(0, self.width, 0, self.height, z_near, z_far)
perspective = np.matmul(ndc, projective)
else:
perspective = ortho(-self.width * self.ortho_ratio / 2,
self.width * self.ortho_ratio / 2,
-self.height * self.ortho_ratio / 2,
self.height * self.ortho_ratio / 2, z_near,
z_far)
return perspective, model_view
def KRT_from_P(proj_mat, normalize_K=True):
res = cv2.decomposeProjectionMatrix(proj_mat)
K, Rot, camera_center_homog = res[0], res[1], res[2]
camera_center = camera_center_homog[0:3] / camera_center_homog[3]
trans = -Rot.dot(camera_center)
if normalize_K:
K = K / K[2][2]
return K, Rot, trans
def MVP_from_P(proj_mat, width, height, near=0.1, far=10000):
'''
Convert OpenCV camera calibration matrix to OpenGL projection and model view matrix
:param proj_mat: OpenCV camera projeciton matrix
:param width: Image width
:param height: Image height
:param near: Z near value
:param far: Z far value
:return: OpenGL projection matrix and model view matrix
'''
res = cv2.decomposeProjectionMatrix(proj_mat)
K, Rot, camera_center_homog = res[0], res[1], res[2]
camera_center = camera_center_homog[0:3] / camera_center_homog[3]
trans = -Rot.dot(camera_center)
K = K / K[2][2]
extrinsic = np.eye(4)
extrinsic[:3, :3] = Rot
extrinsic[:3, 3:4] = trans
axis_adj = np.eye(4)
axis_adj[2, 2] = -1
axis_adj[1, 1] = -1
model_view = np.matmul(axis_adj, extrinsic)
zFar = far
zNear = near
projective = np.zeros([4, 4])
projective[:2, :2] = K[:2, :2]
projective[:2, 2:3] = -K[:2, 2:3]
projective[3, 2] = -1
projective[2, 2] = (zNear + zFar)
projective[2, 3] = (zNear * zFar)
ndc = ortho(0, width, 0, height, zNear, zFar)
perspective = np.matmul(ndc, projective)
return perspective, model_view
|
[
"numpy.eye",
"cv2.decomposeProjectionMatrix",
"numpy.sqrt",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.matmul",
"numpy.linalg.norm"
] |
[((5737, 5776), 'cv2.decomposeProjectionMatrix', 'cv2.decomposeProjectionMatrix', (['proj_mat'], {}), '(proj_mat)\n', (5766, 5776), False, 'import cv2\n'), ((6439, 6478), 'cv2.decomposeProjectionMatrix', 'cv2.decomposeProjectionMatrix', (['proj_mat'], {}), '(proj_mat)\n', (6468, 6478), False, 'import cv2\n'), ((6685, 6694), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6691, 6694), True, 'import numpy as np\n'), ((6772, 6781), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6778, 6781), True, 'import numpy as np\n'), ((6850, 6880), 'numpy.matmul', 'np.matmul', (['axis_adj', 'extrinsic'], {}), '(axis_adj, extrinsic)\n', (6859, 6880), True, 'import numpy as np\n'), ((6935, 6951), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (6943, 6951), True, 'import numpy as np\n'), ((7206, 7232), 'numpy.matmul', 'np.matmul', (['ndc', 'projective'], {}), '(ndc, projective)\n', (7215, 7232), True, 'import numpy as np\n'), ((875, 915), 'numpy.sqrt', 'np.sqrt', (['(width * width + height * height)'], {}), '(width * width + height * height)\n', (882, 915), True, 'import numpy as np\n'), ((1309, 1330), 'numpy.array', 'np.array', (['[0, 0, 1.6]'], {}), '([0, 0, 1.6])\n', (1317, 1330), True, 'import numpy as np\n'), ((1357, 1377), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (1365, 1377), True, 'import numpy as np\n'), ((1400, 1419), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1408, 1419), True, 'import numpy as np\n'), ((1439, 1458), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (1447, 1458), True, 'import numpy as np\n'), ((1938, 1955), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (1952, 1955), True, 'import numpy as np\n'), ((2283, 2292), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2289, 2292), True, 'import numpy as np\n'), ((2772, 2781), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2778, 2781), True, 'import numpy as np\n'), ((3157, 3184), 'numpy.matmul', 'np.matmul', (['int_mat', 'ext_mat'], {}), '(int_mat, ext_mat)\n', (3166, 3184), True, 'import numpy as np\n'), ((3387, 3396), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3393, 3396), True, 'import numpy as np\n'), ((3968, 4007), 'cv2.decomposeProjectionMatrix', 'cv2.decomposeProjectionMatrix', (['proj_mat'], {}), '(proj_mat)\n', (3997, 4007), False, 'import cv2\n'), ((4652, 4661), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4658, 4661), True, 'import numpy as np\n'), ((4753, 4762), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4759, 4762), True, 'import numpy as np\n'), ((4843, 4873), 'numpy.matmul', 'np.matmul', (['axis_adj', 'extrinsic'], {}), '(axis_adj, extrinsic)\n', (4852, 4873), True, 'import numpy as np\n'), ((4898, 4914), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (4906, 4914), True, 'import numpy as np\n'), ((2663, 2691), 'numpy.dot', 'np.dot', (['rot_mat', 'self.center'], {}), '(rot_mat, self.center)\n', (2669, 2691), True, 'import numpy as np\n'), ((5269, 5295), 'numpy.matmul', 'np.matmul', (['ndc', 'projective'], {}), '(ndc, projective)\n', (5278, 5295), True, 'import numpy as np\n')]
|
"""An attempt to implement a fishers exact test in numba.
Gave up after a day because some ofthe distributions required
are written in fortran in scipy.special.
Seems like soon numba-scipy may make this effort much easier.
For now will require use of cython"""
from numba import njit
import numpy as np
from scipy.special import comb
# @njit()
def binary_search(n, n1, n2, side, epsilon, pexact, mode):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeometric_pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeometric_pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeometric_pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeometric_pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeometric_pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeometric_pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
def fisher_exact(table, alternative='two-sided'):
#table = [[a, b], [c, d]]
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1] # a + b
n2 = c[1, 0] + c[1, 1] # c + d
n = c[0, 0] + c[1, 0] # a + c
if alternative == 'less':
pvalue = hypergeometric_cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeometric_cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
#print(n, n1, n2)
pexact = hypergeometric_pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeometric_pmf(mode, n1 + n2, n1, n)
#print(pexact, pmode)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
print('LT', mode, c[0, 0], n1 + n2, n1, n)
plower = hypergeometric_cdf(c[0, 0], n1 + n2, n1, n)
if hypergeometric_pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper", epsilon, pexact, mode)
pvalue = plower + hypergeometric_sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeometric_sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeometric_pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower", epsilon, pexact, mode)
pvalue = pupper + hypergeometric_cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
# @njit(types.intp(types.intp, types.intp), cache=True)
def comb_jit(N, k):
"""
Numba jitted function that computes N choose k. Return `0` if the
outcome exceeds the maximum value of `np.intp` or if N < 0, k < 0,
or k > N.
Parameters
----------
N : scalar(int)
k : scalar(int)
Returns
-------
val : scalar(int)
"""
# From scipy.special._comb_int_long
# github.com/scipy/scipy/blob/v1.0.0/scipy/special/_comb.pyx
INTP_MAX = np.iinfo(np.intp).max
if N < 0 or k < 0 or k > N:
return 0
if k == 0:
return 1
if k == 1:
return N
if N == INTP_MAX:
val = 0
M = N + 1
nterms = min(k, N - k)
val = 1
for j in range(1, nterms + 1):
# Overflow check
if val > (INTP_MAX // (M - j)):
val = 0
break
val *= M - j
val //= j
if val != 0:
return val
M = N + 1
nterms = min(k, N - k)
numerator = 1
denominator = 1
for j in range(1, nterms + 1):
numerator *= M - j
denominator *= j
val = numerator // denominator
if val == 0 and comb(N, k) != 0:
print('comb0_Nk', N, k, '\n\t', numerator, denominator)
raise ValueError
return val
def hypergeometric_pmf(k, M, n, N):
"""scipy parameterization
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
"""
a = comb_jit(n, k)
b = comb_jit(M - n, N - k)
c = comb_jit(M, N)
res = np.exp(np.log(a)+np.log(b)-np.log(c))
if np.isnan(res) and not np.isnan(stats.hypergeom.pmf(k, M, n, N)):
print('NAN', k, M, n, N)
print('\tABC', a, b, c)
print('\tA_NK', a, comb(n, k))
print('\tB_NK',b, comb(M-n, N-k))
print('\tC_NK',c, comb(M, N))
return res
def hypergeom_logpmf(k, M, n, N):
tot, good = M, n
bad = tot - good
result = (betaln(good+1, 1) + betaln(bad+1, 1) + betaln(tot-N+1, N+1) -
betaln(k+1, good-k+1) - betaln(N-k+1, bad-N+k+1) -
betaln(tot+1, 1))
return result
def hypergeom_pmf(self, k, M, n, N):
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return np.exp(hypergeom_logpmf(k, M, n, N))
def hypergeometric_cdf(k, M, n, N):
c = np.log(comb(M, N))
tot = 0
for kk in range(k+1):
a = np.log(comb(n, kk))
b = np.log(comb(M - n, N - kk))
tot += np.exp(a+b-c)
return tot
def hypergeometric_sf(k, M, n, N):
tot = 0
c = np.log(comb(M, N))
for kk in range(k+1, N+1):
a = np.log(comb(n, kk))
b = np.log(comb(M - n, N - kk))
tot += np.exp(a+b-c)
return tot
#M, n, N = [20, 7, 12]
#x = np.arange(0, n+1)
#stats.hypergeom.pmf(x[3], M, n, N)
#hypergeometric_pmf(x[3], M, n, N)
def _betaln(p,q):
return lgamma(p) + lgamma(q) - lgamma(p + q)
|
[
"numpy.abs",
"numpy.log",
"numpy.asarray",
"numpy.iinfo",
"numpy.any",
"numpy.exp",
"numpy.isnan",
"scipy.special.comb",
"numpy.maximum"
] |
[((1721, 1754), 'numpy.asarray', 'np.asarray', (['table'], {'dtype': 'np.int64'}), '(table, dtype=np.int64)\n', (1731, 1754), True, 'import numpy as np\n'), ((1905, 1918), 'numpy.any', 'np.any', (['(c < 0)'], {}), '(c < 0)\n', (1911, 1918), True, 'import numpy as np\n'), ((4534, 4551), 'numpy.iinfo', 'np.iinfo', (['np.intp'], {}), '(np.intp)\n', (4542, 4551), True, 'import numpy as np\n'), ((5683, 5696), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (5691, 5696), True, 'import numpy as np\n'), ((6413, 6423), 'scipy.special.comb', 'comb', (['M', 'N'], {}), '(M, N)\n', (6417, 6423), False, 'from scipy.special import comb\n'), ((6551, 6568), 'numpy.exp', 'np.exp', (['(a + b - c)'], {}), '(a + b - c)\n', (6557, 6568), True, 'import numpy as np\n'), ((6644, 6654), 'scipy.special.comb', 'comb', (['M', 'N'], {}), '(M, N)\n', (6648, 6654), False, 'from scipy.special import comb\n'), ((6774, 6791), 'numpy.exp', 'np.exp', (['(a + b - c)'], {}), '(a + b - c)\n', (6780, 6791), True, 'import numpy as np\n'), ((5210, 5220), 'scipy.special.comb', 'comb', (['N', 'k'], {}), '(N, k)\n', (5214, 5220), False, 'from scipy.special import comb\n'), ((5665, 5674), 'numpy.log', 'np.log', (['c'], {}), '(c)\n', (5671, 5674), True, 'import numpy as np\n'), ((5840, 5850), 'scipy.special.comb', 'comb', (['n', 'k'], {}), '(n, k)\n', (5844, 5850), False, 'from scipy.special import comb\n'), ((5878, 5896), 'scipy.special.comb', 'comb', (['(M - n)', '(N - k)'], {}), '(M - n, N - k)\n', (5882, 5896), False, 'from scipy.special import comb\n'), ((5920, 5930), 'scipy.special.comb', 'comb', (['M', 'N'], {}), '(M, N)\n', (5924, 5930), False, 'from scipy.special import comb\n'), ((6483, 6494), 'scipy.special.comb', 'comb', (['n', 'kk'], {}), '(n, kk)\n', (6487, 6494), False, 'from scipy.special import comb\n'), ((6515, 6534), 'scipy.special.comb', 'comb', (['(M - n)', '(N - kk)'], {}), '(M - n, N - kk)\n', (6519, 6534), False, 'from scipy.special import comb\n'), ((6706, 6717), 'scipy.special.comb', 'comb', (['n', 'kk'], {}), '(n, kk)\n', (6710, 6717), False, 'from scipy.special import comb\n'), ((6738, 6757), 'scipy.special.comb', 'comb', (['(M - n)', '(N - kk)'], {}), '(M - n, N - kk)\n', (6742, 6757), False, 'from scipy.special import comb\n'), ((5645, 5654), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (5651, 5654), True, 'import numpy as np\n'), ((5655, 5664), 'numpy.log', 'np.log', (['b'], {}), '(b)\n', (5661, 5664), True, 'import numpy as np\n'), ((2997, 3019), 'numpy.abs', 'np.abs', (['(pexact - pmode)'], {}), '(pexact - pmode)\n', (3003, 3019), True, 'import numpy as np\n'), ((3022, 3047), 'numpy.maximum', 'np.maximum', (['pexact', 'pmode'], {}), '(pexact, pmode)\n', (3032, 3047), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.optimize import minimize
from os import path
import matplotlib.pyplot as plt
import sys
from MCPM import utils
from MCPM.cpmfitsource import CpmFitSource
def fun_3(inputs, cpm_source, t_E):
"""3-parameter function for optimisation; t_E - fixed"""
t_0 = inputs[0]
u_0 = inputs[1]
t_E = t_E
f_s = inputs[2]
if u_0 < 0. or t_E < 0. or f_s < 0.:
return 1.e6
model = cpm_source.pspl_model(t_0, u_0, t_E, f_s)
cpm_source.run_cpm(model)
#print(t_0, u_0, t_E, f_s, cpm_source.residuals_rms)
return cpm_source.residuals_rms
def fun_4(inputs, cpm_source):
"""4-parameter function for optimisation"""
t_0 = inputs[0]
u_0 = inputs[1]
t_E = inputs[2]
f_s = inputs[3]
if u_0 < 0. or t_E < 0. or f_s < 0.:
return 1.e6
model = cpm_source.pspl_model(t_0, u_0, t_E, f_s)
cpm_source.run_cpm(model)
#print(t_0, u_0, t_E, f_s, cpm_source.residuals_rms)
return cpm_source.residuals_rms
if __name__ == "__main__":
# We want to extract the light curve of ob160980
channel = 52
campaign = 92
ra = 271.354292
dec = -28.005583
half_size = 2
n_select = 10
l2 = 10**8.5
start = np.array([7556., 0.14, 21., 300.])
start_3 = np.array([7556., .1, 150.])
t_E = 21.
tol = 0.001
#method = 'Nelder-Mead' # only these 2 make sense
method = 'Powell'
n_remove = 10
cpm_source = CpmFitSource(ra=ra, dec=dec, campaign=campaign,
channel=channel)
cpm_source.get_predictor_matrix() #n_pixel=100)
cpm_source.set_l2_l2_per_pixel(l2=l2)
cpm_source.set_pixels_square(half_size)
cpm_source.select_highest_prf_sum_pixels(n_select)
# Optimize model parameters:
args = (cpm_source, t_E)
out = minimize(fun_3, start_3, args=args, tol=tol, method=method)
print(out)
# plot the best model
model = cpm_source.pspl_model(out.x[0], out.x[1], t_E, out.x[2])
cpm_source.run_cpm(model)
print("RMS: {:.4f} {:}".format(cpm_source.residuals_rms, np.sum(cpm_source.residuals_mask)))
cpm_source.run_cpm_and_plot_model(model, plot_residuals=True, f_s = out.x[2])
#plt.show()
plt.close()
# you may want to plot residuals as a function of position:
if False:
plt.scatter(cpm_source.x_positions[mask], cpm_source.y_positions[mask], c=np.abs(cpm_source.residuals[mask]))
plt.show()
plt.close()
# Remove most outlying points:
if True:
mask = cpm_source.residuals_mask
limit = np.sort(np.abs(cpm_source.residuals[mask]))[-n_remove]
cpm_source.mask_bad_epochs_residuals(limit)
model = cpm_source.pspl_model(out.x[0], out.x[1], t_E, out.x[2])
cpm_source.run_cpm(model)
print("RMS: {:.4f} {:}".format(cpm_source.residuals_rms, np.sum(cpm_source.residuals_mask)))
# Optimize model parameters once more:
if True:
out = minimize(fun_3, out.x, args=args, tol=tol, method=method)
print(out)
model = cpm_source.pspl_model(out.x[0], out.x[1], t_E, out.x[2])
cpm_source.run_cpm(model)
print("RMS: {:.4f} {:}".format(cpm_source.residuals_rms, np.sum(cpm_source.residuals_mask)))
# plot it:
if True:
cpm_source.run_cpm_and_plot_model(model, plot_residuals=True, f_s = out.x[2])
#plt.savefig('ob160980.png')
plt.show()
plt.close()
cpm_source.plot_pixel_residuals()
#plt.savefig('ob160980_pixel_res.png')
plt.show()
plt.close()
# Optimize model parameters with 4 fitted parameters:
if False:
args = (cpm_source)
out = minimize(fun_4, start, args=args, tol=tol, method=method)
print(out)
#print(out.nfev)
#print("{:.5f} {:.4f} {:.4f} ==> {:.4f}".format(out.x[0], out.x[1], out.x[2], out.fun))
#model = transform_model(out.x[0], out.x[1], out.x[2], model_dt, model_flux, cpm_source.pixel_time)
model = cpm_source.pspl_model(out.x[0], out.x[1], out.x[2], out.x[3])
cpm_source.run_cpm(model)
mask = cpm_source.residuals_mask
plt.plot(cpm_source.pixel_time[mask], cpm_source.residuals[mask]+model[mask], '.')
plt.show()
|
[
"numpy.abs",
"scipy.optimize.minimize",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.sum",
"MCPM.cpmfitsource.CpmFitSource",
"matplotlib.pyplot.show"
] |
[((1262, 1299), 'numpy.array', 'np.array', (['[7556.0, 0.14, 21.0, 300.0]'], {}), '([7556.0, 0.14, 21.0, 300.0])\n', (1270, 1299), True, 'import numpy as np\n'), ((1311, 1341), 'numpy.array', 'np.array', (['[7556.0, 0.1, 150.0]'], {}), '([7556.0, 0.1, 150.0])\n', (1319, 1341), True, 'import numpy as np\n'), ((1485, 1549), 'MCPM.cpmfitsource.CpmFitSource', 'CpmFitSource', ([], {'ra': 'ra', 'dec': 'dec', 'campaign': 'campaign', 'channel': 'channel'}), '(ra=ra, dec=dec, campaign=campaign, channel=channel)\n', (1497, 1549), False, 'from MCPM.cpmfitsource import CpmFitSource\n'), ((1834, 1893), 'scipy.optimize.minimize', 'minimize', (['fun_3', 'start_3'], {'args': 'args', 'tol': 'tol', 'method': 'method'}), '(fun_3, start_3, args=args, tol=tol, method=method)\n', (1842, 1893), False, 'from scipy.optimize import minimize\n'), ((2244, 2255), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2253, 2255), True, 'import matplotlib.pyplot as plt\n'), ((2465, 2475), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2473, 2475), True, 'import matplotlib.pyplot as plt\n'), ((2484, 2495), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2493, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2993, 3050), 'scipy.optimize.minimize', 'minimize', (['fun_3', 'out.x'], {'args': 'args', 'tol': 'tol', 'method': 'method'}), '(fun_3, out.x, args=args, tol=tol, method=method)\n', (3001, 3050), False, 'from scipy.optimize import minimize\n'), ((3783, 3840), 'scipy.optimize.minimize', 'minimize', (['fun_4', 'start'], {'args': 'args', 'tol': 'tol', 'method': 'method'}), '(fun_4, start, args=args, tol=tol, method=method)\n', (3791, 3840), False, 'from scipy.optimize import minimize\n'), ((4244, 4333), 'matplotlib.pyplot.plot', 'plt.plot', (['cpm_source.pixel_time[mask]', '(cpm_source.residuals[mask] + model[mask])', '"""."""'], {}), "(cpm_source.pixel_time[mask], cpm_source.residuals[mask] + model[\n mask], '.')\n", (4252, 4333), True, 'import matplotlib.pyplot as plt\n'), ((4335, 4345), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4343, 4345), True, 'import matplotlib.pyplot as plt\n'), ((2101, 2134), 'numpy.sum', 'np.sum', (['cpm_source.residuals_mask'], {}), '(cpm_source.residuals_mask)\n', (2107, 2134), True, 'import numpy as np\n'), ((3476, 3486), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3484, 3486), True, 'import matplotlib.pyplot as plt\n'), ((3499, 3510), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3508, 3510), True, 'import matplotlib.pyplot as plt\n'), ((3633, 3643), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3641, 3643), True, 'import matplotlib.pyplot as plt\n'), ((3656, 3667), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3665, 3667), True, 'import matplotlib.pyplot as plt\n'), ((2421, 2455), 'numpy.abs', 'np.abs', (['cpm_source.residuals[mask]'], {}), '(cpm_source.residuals[mask])\n', (2427, 2455), True, 'import numpy as np\n'), ((2614, 2648), 'numpy.abs', 'np.abs', (['cpm_source.residuals[mask]'], {}), '(cpm_source.residuals[mask])\n', (2620, 2648), True, 'import numpy as np\n'), ((2886, 2919), 'numpy.sum', 'np.sum', (['cpm_source.residuals_mask'], {}), '(cpm_source.residuals_mask)\n', (2892, 2919), True, 'import numpy as np\n'), ((3252, 3285), 'numpy.sum', 'np.sum', (['cpm_source.residuals_mask'], {}), '(cpm_source.residuals_mask)\n', (3258, 3285), True, 'import numpy as np\n')]
|
# This script generates the scoring and schema files
# necessary to operationalize your model
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
import json
import numpy as np
import os
import base64, io
from PIL import Image
from azure.storage.blob import BlockBlobService
# Prepare the web service definition by authoring
# init() and run() functions. Test the functions
# before deploying the web service.
model = None
def init():
# Get the path to the model asset
# local_path = get_local_path('mymodel.model.link')
# Load model using appropriate library and function
global model
global labels
# model = model_load_function(local_path)
model_name = 'yourmodel.h5'
from keras.models import load_model
model = load_model(model_name)
labels = {0: 'iscloud', 1: 'ismine', 2: 'isnone'}
def run(input_array):
base64ImgString = input_array[0]
pil_img = base64ToPilImg(base64ImgString)
image_np = load_image_into_numpy_array(pil_img)
image_np_expanded = np.expand_dims(image_np, axis=0)
x = image_np
x = np.expand_dims(x, axis=0)
y = model.predict(x)
result = '{"class": ' + json.dumps(labels[np.argmax(y[0])]) + ' , "score": ' + json.dumps(float(np.max(y[0]))) + '}' #
resultString = '{"output":' + result + '}'
return resultString
def generate_api_schema():
import os
print("create schema")
sample_input = "sample data text"
inputs = {"input_df": SampleDefinition(DataTypes.STANDARD, sample_input)}
os.makedirs('outputs', exist_ok=True)
print(generate_schema(inputs=inputs, filepath="outputs/schema.json", run_func=run))
def base64ToPilImg(base64ImgString):
if base64ImgString.startswith('b\''):
base64ImgString = base64ImgString[2:-1]
base64Img = base64ImgString.encode('utf-8')
decoded_img = base64.b64decode(base64Img)
img_buffer = io.BytesIO(decoded_img)
pil_img = Image.open(img_buffer).convert('RGB')
return pil_img
def pilImgToBase64(pilImg):
pilImg = pilImg.convert('RGB') #not sure this is necessary
imgio = io.BytesIO()
pilImg.save(imgio, 'PNG')
imgio.seek(0)
dataimg = base64.b64encode(imgio.read())
return dataimg.decode('utf-8')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Implement test code to run in IDE or Azure ML Workbench
if __name__ == '__main__':
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
# Import the logger only for Workbench runs
#from azureml.logging import get_azureml_logger
#logger = get_azureml_logger()
init()
pilImg = Image.open("yourimage.jpg")
base64ImgString = pilImgToBase64(pilImg)
np_imgstring = np.array([base64ImgString], dtype=np.unicode)
inputs = {"input_array": SampleDefinition(DataTypes.NUMPY, np_imgstring)}
resultString = run(np_imgstring)
print("resultString = " + str(resultString))
# Genereate the schema
generate_schema(run_func=run, inputs=inputs, filepath='service_schema.json')
print("Schema generated.")
|
[
"PIL.Image.open",
"keras.models.load_model",
"os.makedirs",
"azureml.api.realtime.services.generate_schema",
"io.BytesIO",
"base64.b64decode",
"numpy.argmax",
"numpy.max",
"numpy.array",
"numpy.expand_dims",
"azureml.api.schema.sampleDefinition.SampleDefinition"
] |
[((907, 929), 'keras.models.load_model', 'load_model', (['model_name'], {}), '(model_name)\n', (917, 929), False, 'from keras.models import load_model\n'), ((1173, 1205), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (1187, 1205), True, 'import numpy as np\n'), ((1233, 1258), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1247, 1258), True, 'import numpy as np\n'), ((1678, 1715), 'os.makedirs', 'os.makedirs', (['"""outputs"""'], {'exist_ok': '(True)'}), "('outputs', exist_ok=True)\n", (1689, 1715), False, 'import os\n'), ((2008, 2035), 'base64.b64decode', 'base64.b64decode', (['base64Img'], {}), '(base64Img)\n', (2024, 2035), False, 'import base64, io\n'), ((2055, 2078), 'io.BytesIO', 'io.BytesIO', (['decoded_img'], {}), '(decoded_img)\n', (2065, 2078), False, 'import base64, io\n'), ((2260, 2272), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2270, 2272), False, 'import base64, io\n'), ((3029, 3056), 'PIL.Image.open', 'Image.open', (['"""yourimage.jpg"""'], {}), "('yourimage.jpg')\n", (3039, 3056), False, 'from PIL import Image\n'), ((3123, 3168), 'numpy.array', 'np.array', (['[base64ImgString]'], {'dtype': 'np.unicode'}), '([base64ImgString], dtype=np.unicode)\n', (3131, 3168), True, 'import numpy as np\n'), ((3371, 3447), 'azureml.api.realtime.services.generate_schema', 'generate_schema', ([], {'run_func': 'run', 'inputs': 'inputs', 'filepath': '"""service_schema.json"""'}), "(run_func=run, inputs=inputs, filepath='service_schema.json')\n", (3386, 3447), False, 'from azureml.api.realtime.services import generate_schema\n'), ((1621, 1671), 'azureml.api.schema.sampleDefinition.SampleDefinition', 'SampleDefinition', (['DataTypes.STANDARD', 'sample_input'], {}), '(DataTypes.STANDARD, sample_input)\n', (1637, 1671), False, 'from azureml.api.schema.sampleDefinition import SampleDefinition\n'), ((1727, 1803), 'azureml.api.realtime.services.generate_schema', 'generate_schema', ([], {'inputs': 'inputs', 'filepath': '"""outputs/schema.json"""', 'run_func': 'run'}), "(inputs=inputs, filepath='outputs/schema.json', run_func=run)\n", (1742, 1803), False, 'from azureml.api.realtime.services import generate_schema\n'), ((3199, 3246), 'azureml.api.schema.sampleDefinition.SampleDefinition', 'SampleDefinition', (['DataTypes.NUMPY', 'np_imgstring'], {}), '(DataTypes.NUMPY, np_imgstring)\n', (3215, 3246), False, 'from azureml.api.schema.sampleDefinition import SampleDefinition\n'), ((2094, 2116), 'PIL.Image.open', 'Image.open', (['img_buffer'], {}), '(img_buffer)\n', (2104, 2116), False, 'from PIL import Image\n'), ((1386, 1398), 'numpy.max', 'np.max', (['y[0]'], {}), '(y[0])\n', (1392, 1398), True, 'import numpy as np\n'), ((1332, 1347), 'numpy.argmax', 'np.argmax', (['y[0]'], {}), '(y[0])\n', (1341, 1347), True, 'import numpy as np\n')]
|
from io import FileIO
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Union, List, Iterable
from .common import open_file, coerce_matrix
def read_mdf(file: Union[str, FileIO, Path], raw: bool = False, tall: bool = False
) -> Union[np.ndarray, pd.DataFrame, pd.Series]:
"""Reads Emme's official matrix "binary serialization" format, created using ``inro.emme.matrix.MatrixData.save()``.
There is no official extension for this type of file; '.mdf' is recommended. '.emxd' is also sometimes encountered.
Args:
file (Union[str, FileIO, Path]): The file to read.
raw (bool, optional): Defaults to ``False``. If ``True``, returns an unlabelled ndarray. Otherwise, a DataFrame
will be returned.
tall (bool, optional): Defaults to ``False``. If ``True``, a 1D data structure will be returned. If
``raw=False``, a Series will be returned, otherwise a 1D ndarray.
Returns:
numpy.ndarray, pandas.DataFrame, or pandas.Series: The matrix stored in the file.
"""
with open_file(file, mode='rb') as file_handler:
magic, version, dtype_index, ndim = np.fromfile(file_handler, np.uint32, count=4)
if magic != 0xC4D4F1B2 or version != 1 or not (0 < dtype_index <= 4) or not (0 < ndim <= 2):
raise IOError("Unexpected file header: magic number: %X, version: %d, data type: %d, dimensions: %d."
% (magic, version, dtype_index, ndim))
shape = np.fromfile(file_handler, np.uint32, count=ndim)
index_list = []
for n_items in shape:
indices = np.fromfile(file_handler, np.int32, n_items)
index_list.append(indices)
dtype = {1: np.float32, 2: np.float64, 3: np.int32, 4: np.uint32}[dtype_index]
flat_length = shape.prod() # Multiply the shape tuple
matrix = np.fromfile(file_handler, dtype, count=flat_length)
if raw and tall:
return matrix
matrix.shape = shape
if raw:
return matrix
if ndim == 1:
return pd.Series(matrix, index=index_list[0])
elif ndim == 2:
matrix = pd.DataFrame(matrix, index=index_list[0], columns=index_list[1])
return matrix.stack() if tall else matrix
raise NotImplementedError() # This should never happen
def to_mdf(matrix: Union[pd.DataFrame, pd.Series], file: Union[str, FileIO, Path]):
"""Writes a matrix to Emme's official "binary serialization" format, which can be loaded in Emme using
``inro.emme.matrix.MatrixData.load()``. There is no official extension for this type of file; '.mdf' is recommended.
Args:
matrix (Union[pandas.DataFrame, panda.Series]): The matrix to write to disk. If a Series is given, it MUST have
a MultiIndex with exactly 2 levels to unstack.
file (Union[str, File, Path]): The path or file handler to write to.
"""
if isinstance(matrix, pd.Series):
row_index = matrix.index.get_level_values(0).unique()
column_index = matrix.index.get_level_values(1).unique()
elif isinstance(matrix, pd.DataFrame):
row_index = matrix.index
column_index = matrix.columns
else:
raise TypeError("Only labelled matrix objects are supported")
with open_file(file, mode='wb') as writer:
data = coerce_matrix(matrix, allow_raw=False)
np.array([0xC4D4F1B2, 1, 1, 2], dtype=np.uint32).tofile(writer) # Header
np.array(data.shape, dtype=np.uint32).tofile(writer) # Shape
np.array(row_index, dtype=np.int32).tofile(writer)
np.array(column_index, dtype=np.int32).tofile(writer)
data.tofile(writer)
def peek_mdf(file: Union[str, FileIO, Path], as_index: bool = True) -> Union[List[List[int]], List[pd.Index]]:
"""Partially opens an MDF file to get the zone system of its rows and its columns.
Args:
file (Union[str, FileIO, Path]): The file to read.
as_index (bool, optional): Defaults to ``True``. Set to ``True`` to return a pandas.Index object rather than
List[int]
Returns:
List[int] or List[pandas.Index]: One item for each dimension. If ``as_index=True``, the items will be pandas.Index objects, otherwise they will be List[int]
"""
with open_file(file, mode='rb') as file_handler:
magic, version, dtype_index, ndim = np.fromfile(file_handler, np.uint32, count=4)
if magic != 0xC4D4F1B2 or version != 1 or not (0 < dtype_index <= 4) or not (0 < ndim <= 2):
raise IOError("Unexpected file header: magic number: %X, version: %d, data type: %d, dimensions: %d."
% (magic, version, dtype_index, ndim))
shape = np.fromfile(file_handler, np.uint32, count=ndim)
index_list = []
for n_items in shape:
indices = np.fromfile(file_handler, np.int32, n_items)
index_list.append(indices)
if not as_index:
return index_list
return [pd.Index(zones) for zones in index_list]
def read_emx(file: Union[str, FileIO, Path], zones: Union[int, Iterable[int], pd.Index] = None,
tall: bool = False) -> Union[np.ndarray, pd.DataFrame, pd.Series]:
"""Reads an "internal" Emme matrix (found in `<Emme Project>/Database/emmemat`); with an '.emx' extension. This data
format does not contain information about zones. Its size is determined by the dimensions of the Emmebank
(``Emmebank.dimensions['centroids']``), regardless of the number of zones actually used in all scenarios.
Args:
file (Union[str, File, Path]): The file to read.
zones (Union[int, Iterable[int], pandas.Index], optional): Defaults to ``None``. An Index or Iterable will be
interpreted as the zone labels for the matrix rows and columns; returning a DataFrame or Series (depending
on ``tall``). If an integer is provided, the returned ndarray will be truncated to this 'number of zones'.
Otherwise, the returned ndarray will be size to the maximum number of zone dimensioned by the Emmebank.
tall (bool, optional): Defaults to ``False``. If True, a 1D data structure will be returned. If ``zone_index``
is provided, a Series will be returned, otherwise a 1D ndarray.
Returns:
numpy.ndarray, pandas.DataFrame, or pandas.Series.
Examples:
For a project with 20 zones:
>>> matrix = read_emx("Database/emmemat/mf1.emx")
>>> print type(matrix), matrix.shape
(numpy.ndarray, (20, 20))
>>> matrix = read_emx("Database/emmemat/mf1.emx", zones=10)
>>> print type(matrix), matrix.shape
(numpy.ndarray, (10, 10))
>>> matrix = read_emx("Database/emmemat/mf1.emx", zones=range(10))
>>> print type(matrix), matrix.shape
<class 'pandas.core.frame.DataFrame'> (10, 10)
>>> matrix = read_emx("Database/emmemat/mf1.emx", zones=range(10), tall=True)
>>> print type(matrix), matrix.shape
<class 'pandas.core.series.Series'> 100
"""
with open_file(file, mode='rb') as reader:
data = np.fromfile(reader, dtype=np.float32)
n = int(len(data) ** 0.5)
assert len(data) == n ** 2
if zones is None and tall:
return data
data.shape = n, n
if isinstance(zones, (int, np.int_)):
data = data[:zones, :zones]
if tall:
data.shape = zones * zones
return data
return data
elif zones is None:
return data
zones = pd.Index(zones)
n = len(zones)
data = data[:n, :n]
matrix = pd.DataFrame(data, index=zones, columns=zones)
return matrix.stack() if tall else matrix
def to_emx(matrix: Union[pd.DataFrame, pd.Series, np.ndarray], file: Union[str, FileIO, Path], emmebank_zones: int):
"""Writes an "internal" Emme matrix (found in `<Emme Project>/Database/emmemat`); with an '.emx' extension. The
number of zones that the Emmebank is dimensioned for must be known in order for the file to be written correctly.
Args:
matrix (Union[pandas.DataFrame, pandas.Series, numpy.ndarray]): The matrix to write to disk. If a Series is
given, it MUST have a MultiIndex with exactly 2 levels to unstack.
file (Union[basestring, File]): The path or file handler to write to.
emmebank_zones (int): The number of zones the target Emmebank is dimensioned for.
"""
assert emmebank_zones > 0
with open_file(file, mode='wb') as writer:
data = coerce_matrix(matrix)
n = data.shape[0]
if n > emmebank_zones:
out = data[:emmebank_zones, :emmebank_zones].astype(np.float32)
else:
out = np.zeros([emmebank_zones, emmebank_zones], dtype=np.float32)
out[:n, :n] = data
out.tofile(writer)
|
[
"pandas.Series",
"numpy.fromfile",
"pandas.Index",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame"
] |
[((1176, 1221), 'numpy.fromfile', 'np.fromfile', (['file_handler', 'np.uint32'], {'count': '(4)'}), '(file_handler, np.uint32, count=4)\n', (1187, 1221), True, 'import numpy as np\n'), ((1520, 1568), 'numpy.fromfile', 'np.fromfile', (['file_handler', 'np.uint32'], {'count': 'ndim'}), '(file_handler, np.uint32, count=ndim)\n', (1531, 1568), True, 'import numpy as np\n'), ((1898, 1949), 'numpy.fromfile', 'np.fromfile', (['file_handler', 'dtype'], {'count': 'flat_length'}), '(file_handler, dtype, count=flat_length)\n', (1909, 1949), True, 'import numpy as np\n'), ((4433, 4478), 'numpy.fromfile', 'np.fromfile', (['file_handler', 'np.uint32'], {'count': '(4)'}), '(file_handler, np.uint32, count=4)\n', (4444, 4478), True, 'import numpy as np\n'), ((4777, 4825), 'numpy.fromfile', 'np.fromfile', (['file_handler', 'np.uint32'], {'count': 'ndim'}), '(file_handler, np.uint32, count=ndim)\n', (4788, 4825), True, 'import numpy as np\n'), ((7193, 7230), 'numpy.fromfile', 'np.fromfile', (['reader'], {'dtype': 'np.float32'}), '(reader, dtype=np.float32)\n', (7204, 7230), True, 'import numpy as np\n'), ((7661, 7676), 'pandas.Index', 'pd.Index', (['zones'], {}), '(zones)\n', (7669, 7676), True, 'import pandas as pd\n'), ((7746, 7792), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'zones', 'columns': 'zones'}), '(data, index=zones, columns=zones)\n', (7758, 7792), True, 'import pandas as pd\n'), ((1646, 1690), 'numpy.fromfile', 'np.fromfile', (['file_handler', 'np.int32', 'n_items'], {}), '(file_handler, np.int32, n_items)\n', (1657, 1690), True, 'import numpy as np\n'), ((2117, 2155), 'pandas.Series', 'pd.Series', (['matrix'], {'index': 'index_list[0]'}), '(matrix, index=index_list[0])\n', (2126, 2155), True, 'import pandas as pd\n'), ((4903, 4947), 'numpy.fromfile', 'np.fromfile', (['file_handler', 'np.int32', 'n_items'], {}), '(file_handler, np.int32, n_items)\n', (4914, 4947), True, 'import numpy as np\n'), ((5060, 5075), 'pandas.Index', 'pd.Index', (['zones'], {}), '(zones)\n', (5068, 5075), True, 'import pandas as pd\n'), ((8859, 8919), 'numpy.zeros', 'np.zeros', (['[emmebank_zones, emmebank_zones]'], {'dtype': 'np.float32'}), '([emmebank_zones, emmebank_zones], dtype=np.float32)\n', (8867, 8919), True, 'import numpy as np\n'), ((2201, 2265), 'pandas.DataFrame', 'pd.DataFrame', (['matrix'], {'index': 'index_list[0]', 'columns': 'index_list[1]'}), '(matrix, index=index_list[0], columns=index_list[1])\n', (2213, 2265), True, 'import pandas as pd\n'), ((3445, 3493), 'numpy.array', 'np.array', (['[3302289842, 1, 1, 2]'], {'dtype': 'np.uint32'}), '([3302289842, 1, 1, 2], dtype=np.uint32)\n', (3453, 3493), True, 'import numpy as np\n'), ((3527, 3564), 'numpy.array', 'np.array', (['data.shape'], {'dtype': 'np.uint32'}), '(data.shape, dtype=np.uint32)\n', (3535, 3564), True, 'import numpy as np\n'), ((3598, 3633), 'numpy.array', 'np.array', (['row_index'], {'dtype': 'np.int32'}), '(row_index, dtype=np.int32)\n', (3606, 3633), True, 'import numpy as np\n'), ((3657, 3695), 'numpy.array', 'np.array', (['column_index'], {'dtype': 'np.int32'}), '(column_index, dtype=np.int32)\n', (3665, 3695), True, 'import numpy as np\n')]
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from data_lit import Data_Augmentation
import numpy as np
#physical_devices = tf.config.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0], True)
class Initializer:
def __init__(self, units, train_tokenizer, max_length_train, label_tokenizer, encoder, decoder):
self.data = Data_Augmentation()
self.train_tokenizer = train_tokenizer
self. max_len = max_length_train
self.units = units
self.label_tokenizer = label_tokenizer
self.enc = encoder
self.dec = decoder
# Remove the <start> and <end> tags from the sentences
def Expand(self, sentence):
return sentence.split("<start>")[-1].split("<end>")[0]
# proceed for real time prediction.
'''
sentence: is the sentence given by the chatbot user
'''
def test(self, sentence):
sentence = self.data.preprocess_sentence(sentence)
whole = [] # collect the " " split sentence words
for i in sentence.split(' '):
# throw an exception if user input word not present in the vocabulary of the train data
try:
self.train_tokenizer.word_index[i]
except Exception as e:
return('Please say it clearly')
whole.append(self.train_tokenizer.word_index[i])
sentence = pad_sequences([whole], maxlen=self.max_len, padding='post')
sentence = tf.convert_to_tensor(sentence)
enc_hidden_start = [tf.zeros((1, self.units))] # initial hidden state provide to the encoder
enc_hidden, enc_output = self.enc(sentence, enc_hidden_start)
dec_output = enc_output
dec_input = tf.expand_dims([self.label_tokenizer.word_index['<start>']], 0)
answer = '' # store the answer string
# loop for predict word by word from the decoder
for i in range(1, self.max_len):
pred, dec_output, attention_weight = self.dec(dec_input, dec_output, enc_hidden)
answer += self.label_tokenizer.index_word[np.argmax(pred[0])] + " " # add the predicted value after convert index to word
if self.label_tokenizer.index_word[np.argmax(pred[0])] == '<end>':
return self.Expand(answer)
dec_input = tf.expand_dims([np.argmax(pred[0])], 0) # after a loop the decoder input is equal to the index of the previous predected word.
return self.Expand(answer)
if __name__ == '__main__':
print('oot sssd proc')
|
[
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"data_lit.Data_Augmentation",
"numpy.argmax",
"tensorflow.convert_to_tensor",
"tensorflow.expand_dims",
"tensorflow.zeros"
] |
[((524, 543), 'data_lit.Data_Augmentation', 'Data_Augmentation', ([], {}), '()\n', (541, 543), False, 'from data_lit import Data_Augmentation\n'), ((1576, 1635), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[whole]'], {'maxlen': 'self.max_len', 'padding': '"""post"""'}), "([whole], maxlen=self.max_len, padding='post')\n", (1589, 1635), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1656, 1686), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['sentence'], {}), '(sentence)\n', (1676, 1686), True, 'import tensorflow as tf\n'), ((1918, 1981), 'tensorflow.expand_dims', 'tf.expand_dims', (["[self.label_tokenizer.word_index['<start>']]", '(0)'], {}), "([self.label_tokenizer.word_index['<start>']], 0)\n", (1932, 1981), True, 'import tensorflow as tf\n'), ((1718, 1743), 'tensorflow.zeros', 'tf.zeros', (['(1, self.units)'], {}), '((1, self.units))\n', (1726, 1743), True, 'import tensorflow as tf\n'), ((2284, 2302), 'numpy.argmax', 'np.argmax', (['pred[0]'], {}), '(pred[0])\n', (2293, 2302), True, 'import numpy as np\n'), ((2414, 2432), 'numpy.argmax', 'np.argmax', (['pred[0]'], {}), '(pred[0])\n', (2423, 2432), True, 'import numpy as np\n'), ((2533, 2551), 'numpy.argmax', 'np.argmax', (['pred[0]'], {}), '(pred[0])\n', (2542, 2551), True, 'import numpy as np\n')]
|
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__version__ = "1.0.1"
__maintainer__ = "Rabaa"
__email__ = "<EMAIL>"
import numpy as np
import sys
## Class: TestParticle
# Functions: Default Constructor, DataDissection, IdentifyResonance, PrintData
class TestParticle:
def __init__(self): # Attributes defined
self.Resonant = False
self.ResonanceType = 'n:n'
self.Name = 'N/A'
self.ResonanceCenter = -999
self.ResonanceAmplitude = -999
self.AverageSMA = -999 # Average SemiMajor axist
self.AverageEccentricity = -999
self.AverageInclination = -999
self.Kozai = False
self.SMAamplitude = -999
self.SMACenter = -999
self.Index = -1
############################################ FUNCTIONS #################################################
############################################ DATA DISSECTION #################################################
# Expects: typeOfData, IndexCount
# Will do: Alter the Resonance & Kozai attributes of the class, given the write orbital elements
def DataDissection(self, typeOfData, IndexCount):
self.Index = IndexCount
TestParticleSample = sys.argv[1] # User to choose a test sample using terminal
with open('tp' + TestParticleSample + ".out") as f: # Counting number of lines
for line, l in enumerate(f):
pass
NumberOfLines = line
# Taking the test point's data from the .out file sequentially
TestParticleTime, Index, SemiMajorAxis, Eccentricity, Inclination, Omega, omega, AngularPosition, LongitudeTP = np.genfromtxt(
'tp' + TestParticleSample + ".out", unpack=True)
Longitude = np.genfromtxt(
"LN.out", usecols= 8, unpack=True)
NumberOfLines = (NumberOfLines / (max(Index)+1)) -1 # Dividing the total number of lines by number of test particles, to get steps of one test particle.
# Matching the orbitals with the index we need
TestParticleTime = TestParticleTime[Index == IndexCount]
SemiMajorAxis = SemiMajorAxis[Index == IndexCount]
Eccentricity = Eccentricity[Index == IndexCount]
Inclination = Inclination[Index == IndexCount]
Omega = Omega[Index == IndexCount]
omega = omega[Index == IndexCount]
AngularPosition = AngularPosition[Index == IndexCount]
# Calculating Lambda, Pomega
Lambda = (Omega + omega + AngularPosition) % 360 # The Lambda for test particles
Pomega = (Omega + omega) % 360 # The longitude if pericenter in degrees
# Flags "Specific ones"
IsItResonant = False # Is it in resonance?
ResonanceAmplitude = -999 # The Resonance Amplitude
ResonanceCenter = -999 # The Resonance Center
ResonanceName = -999 # The Resonance name "Ration"
IsItKozai = False # Is it Kozai resonance?
SMAAmplitude = -999 # SemiMajor amplitude
SMACenter = -999 # SemiMajor center
# Flags "General ones"
IsIt = False # Resonance / Kozai ?
Amplitude = -999 # Phi / SMA
Center = -999 # Phi / SMA
Name = -999 # Name of the test particle
# General flags will be used in the coming loop, Specific flags will then be set at the end, to distinguish Kozai / Resonance
# list of resonances to check: pp and qq for pp:qq resonance
pp = [2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 7, 7, 7, 7, 8, 8, 9, 9, 9, 10]
qq = [1, 1, 2, 1, 3, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 3, 1, 2, 4, 1]
for jj in np.arange(0, len(pp)): # First Loop
ResSemiMajorAxis = 30.1 * (float(pp[jj]) / float(qq[jj])) ** (
2. / 3.) # Kepler's Third Law to calculate semimajor axis of the resonance
# Searching within 2 AUs from the resonance center
if IsIt == 0 and (ResSemiMajorAxis + 2) > np.average(SemiMajorAxis) > (ResSemiMajorAxis - 2):
phi = (float(pp[jj]) * Lambda - float(qq[jj]) * Longitude - (float(pp[jj]) - float(qq[jj])) * Pomega) % 360
AngleRange = np.arange(0, 360, 15) # Array of angles 15 degrees increment each step
Window = int(0)
Loop = 0
if typeOfData == 0:
# Dividing the timeline to 10 separate windows Detecting resonance on smaller scales
WindowStep = int(NumberOfLines / 10)
IsItArray = np.zeros(int(len(
phi) / WindowStep)) # Array of 10 binary elements to check for resonance each step '10%' set to zero
CenterArray = np.zeros(int(len(
phi) / WindowStep)) # Array of 10 binary elements to check the res angle each step '10%' set to zero
while Window + WindowStep < len(phi):
# Average of the semi-major axis from Current Window -> Next Window
WindowAverage = np.average(SemiMajorAxis[Window:Window + WindowStep])
if (ResSemiMajorAxis + 2) > WindowAverage > (
ResSemiMajorAxis - 2): # Within 2 AUs of Window Average
WindowPhi = phi[Window:Window + WindowStep] # Phi of next window
AnglePresent = np.zeros(len(AngleRange)) + 1
for step in np.arange(0, len(
AngleRange) - 1): # find out where the res angle doesn't go for 15 degrees, proxy for AnglePresent
if len(WindowPhi[
(WindowPhi > AngleRange[step]) * (WindowPhi < (AngleRange[step + 1]))]) == 0:
AnglePresent[step] = 0
IsItArray[Loop] = np.average(AnglePresent) * 180.
CenterArray[Loop] = np.average(
AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])
else:
IsItArray[Loop] = 180.
Window += WindowStep # Increment Window
Loop += 1 # Increment Loop
if len(IsItArray[
IsItArray < 180.]) > 8: # If 8 out of 10 Windows classified as Resonant
IsIt = True
Amplitude = np.average(IsItArray)
Center = np.average(CenterArray)
Name = str(pp[jj]) + ':' + str(qq[jj])
MaxCenter = max(CenterArray)
MinCenter = min(CenterArray)
if (MaxCenter - MinCenter) > 210: # If the centers are too large in difference, it is not resonant
IsIt = False
Amplitude = -999
Center = -999
break
else:
Amplitude = -999
Center = -999
else:
# If checking for Kozai, we only want one window
WindowStep = int(NumberOfLines)
IsItArray = np.zeros(int(len(
omega) / WindowStep)) # For Kozai we check SMA
CenterArray = np.zeros(int(len(
omega) / WindowStep))
while Window + WindowStep < len(SemiMajorAxis):
# WindowSMA = SemiMajorAxis[Window:Window + WindowStep] # SMA of next window
AnglePresent = np.zeros(len(AngleRange)) + 1
for step in np.arange(0, len(
AngleRange) - 1): # find out where the res angle doesn't go for 15 degrees, proxy for AnglePresent
if len(omega[
(omega > AngleRange[step]) * (omega < (AngleRange[step + 1]))]) == 0:
AnglePresent[step] = 0
IsItArray[Loop] = np.average(AnglePresent) * 180.
CenterArray[Loop] = np.average(
AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])
Window += WindowStep # Increment Window
Loop += 1 # Increment Loop
if len(IsItArray[
IsItArray < 180.]) == 1: # If the Window classified as Kozai
IsIt = True
Amplitude = np.average(IsItArray)
Center = np.average(CenterArray)
Name = str(pp[jj]) + ':' + str(qq[jj])
else:
Amplitude = -999
Center = -999
if typeOfData == 0: # Type 0 means we are looking if it was Resonant
IsItResonant = IsIt
ResonanceAmplitude = Amplitude
ResonanceCenter = Center
ResonanceName = Name
self.Resonant = IsItResonant
self.ResonanceAmplitude = ResonanceAmplitude
self.ResonanceCenter = ResonanceCenter
self.ResonanceType = ResonanceName
else: # Else 1 means we are looking if it was Kozai
IsItKozai = IsIt
SMAAmplitude = Amplitude
SMACenter = Center
self.Kozai = IsItKozai
self.SMAamplitude = SMAAmplitude
self.SMACenter = SMACenter
# End Else
self.Name = TestParticleSample
self.AverageEccentricity = np.average(Eccentricity)
self.AverageInclination = np.average(Inclination)
self.AverageSMA = np.average(SemiMajorAxis)
return
############################################ IDENTIFY RESONANCE ##############################################
# Expects: IndexCount
# Will do: First call to function DataDissection to check if resonant, if resonant, will do second call to check for Kozai
def IdentifyResonance(self, IndexCount):
type = 0 # Indicated that the variable Resonant is what we want from DataDissection function
self.DataDissection(type, IndexCount)
if self.Resonant == True:
type = 1 # Indicated that the variable Kozai is what we want from DataDissection function
self.DataDissection(type, IndexCount)
############################################## PRINT DATA ##############################################
# Expects: IndexCount
# Will do: Print Data Into a '.out' file Names tp + 'number you entered' + .out
def PrintData(self, IndexCount ):
TestParticleSample = sys.argv[1]
TestParticleTime, Index, SemiMajorAxis, Eccentricity, Inclination, Omega, omega, AngularPosition, Longitude = np.genfromtxt(
"tp" + TestParticleSample + ".out", unpack=True)
TextFile.write((str(self.Index) + " " +str(SemiMajorAxis[IndexCount]) + " " + str(Eccentricity[IndexCount]) + " " + str(Inclination[IndexCount]) + " " + str(Omega[IndexCount]) + " " + str(omega[IndexCount]) + " " + str(AngularPosition[IndexCount]) + " " + str(self.Name) + " " + str(self.AverageSMA) + " " + str(self.AverageEccentricity) + " " + str(self.AverageInclination) + " " + str(self.ResonanceCenter) + " " + str(self.ResonanceAmplitude) + " " + str(self.SMACenter) + " " + str(self.SMAamplitude) + " " + '\n'))
# Main function
if __name__ == '__main__':
TestParticleSample = sys.argv[1] # User to enter the number indicating the file number
Index = np.genfromtxt('tp' + TestParticleSample + ".out", usecols=1, unpack=True)
NumberOfTPs = max(Index) # Assuming there is more than one Testparticle, all with different timesteps, in the same file
TextFile = open("TestParticleResonance"+ TestParticleSample +".out", "a+")
TextFile.write("# SMA0 Ecc0 Inc0 Node0 ArgPeri0 MeanAnom0 Name AverageSMA AverageEcc AverageInc LibrationCenter LibrationAmp KozaiCenter KozaiAmp" + '\n')
IndexCount = 0
for IndexCount in range(0, int(NumberOfTPs)+1 ):
Tp = TestParticle() # Initialise the test particle
Tp.IdentifyResonance(IndexCount) # Identify its resonant / kozai status
Tp.PrintData(IndexCount) # print the results
print(TestParticleSample) # ensure it is done
|
[
"numpy.genfromtxt",
"numpy.arange",
"numpy.average"
] |
[((11684, 11757), 'numpy.genfromtxt', 'np.genfromtxt', (["('tp' + TestParticleSample + '.out')"], {'usecols': '(1)', 'unpack': '(True)'}), "('tp' + TestParticleSample + '.out', usecols=1, unpack=True)\n", (11697, 11757), True, 'import numpy as np\n'), ((1668, 1730), 'numpy.genfromtxt', 'np.genfromtxt', (["('tp' + TestParticleSample + '.out')"], {'unpack': '(True)'}), "('tp' + TestParticleSample + '.out', unpack=True)\n", (1681, 1730), True, 'import numpy as np\n'), ((1765, 1812), 'numpy.genfromtxt', 'np.genfromtxt', (['"""LN.out"""'], {'usecols': '(8)', 'unpack': '(True)'}), "('LN.out', usecols=8, unpack=True)\n", (1778, 1812), True, 'import numpy as np\n'), ((9699, 9723), 'numpy.average', 'np.average', (['Eccentricity'], {}), '(Eccentricity)\n', (9709, 9723), True, 'import numpy as np\n'), ((9758, 9781), 'numpy.average', 'np.average', (['Inclination'], {}), '(Inclination)\n', (9768, 9781), True, 'import numpy as np\n'), ((9808, 9833), 'numpy.average', 'np.average', (['SemiMajorAxis'], {}), '(SemiMajorAxis)\n', (9818, 9833), True, 'import numpy as np\n'), ((10931, 10993), 'numpy.genfromtxt', 'np.genfromtxt', (["('tp' + TestParticleSample + '.out')"], {'unpack': '(True)'}), "('tp' + TestParticleSample + '.out', unpack=True)\n", (10944, 10993), True, 'import numpy as np\n'), ((4152, 4173), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(15)'], {}), '(0, 360, 15)\n', (4161, 4173), True, 'import numpy as np\n'), ((3947, 3972), 'numpy.average', 'np.average', (['SemiMajorAxis'], {}), '(SemiMajorAxis)\n', (3957, 3972), True, 'import numpy as np\n'), ((5028, 5081), 'numpy.average', 'np.average', (['SemiMajorAxis[Window:Window + WindowStep]'], {}), '(SemiMajorAxis[Window:Window + WindowStep])\n', (5038, 5081), True, 'import numpy as np\n'), ((6479, 6500), 'numpy.average', 'np.average', (['IsItArray'], {}), '(IsItArray)\n', (6489, 6500), True, 'import numpy as np\n'), ((6534, 6557), 'numpy.average', 'np.average', (['CenterArray'], {}), '(CenterArray)\n', (6544, 6557), True, 'import numpy as np\n'), ((8230, 8305), 'numpy.average', 'np.average', (['(AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])'], {}), '(AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])\n', (8240, 8305), True, 'import numpy as np\n'), ((8656, 8677), 'numpy.average', 'np.average', (['IsItArray'], {}), '(IsItArray)\n', (8666, 8677), True, 'import numpy as np\n'), ((8711, 8734), 'numpy.average', 'np.average', (['CenterArray'], {}), '(CenterArray)\n', (8721, 8734), True, 'import numpy as np\n'), ((5958, 6033), 'numpy.average', 'np.average', (['(AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])'], {}), '(AnglePresent[AnglePresent != 0] * AngleRange[AnglePresent != 0])\n', (5968, 6033), True, 'import numpy as np\n'), ((8154, 8178), 'numpy.average', 'np.average', (['AnglePresent'], {}), '(AnglePresent)\n', (8164, 8178), True, 'import numpy as np\n'), ((5878, 5902), 'numpy.average', 'np.average', (['AnglePresent'], {}), '(AnglePresent)\n', (5888, 5902), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
def draw_bbox(image, x1, y1, x2, y2, caption, color=(203, 232, 0)):
b = np.array([x1, y1, x2, y2]).astype(int)
cv2.rectangle(image, (x1, y1), (x2, y2), color=color, thickness=5)
if caption:
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 255, 255), 1)
return True
def annotate(image, img_anno, transform_fnc=None, draw_org=False, color=(203, 232, 0)):
img = image.copy()
for obj in img_anno:
x1, y1, x2, y2, label = obj
if draw_org:
draw_bbox(img, x1, y1, x2, y2, "", (255, 255, 255))
if transform_fnc:
x1, y1 = transform_fnc(x1, y1)
x2, y2 = transform_fnc(x2, y2)
draw_bbox(img, x1, y1, x2, y2, label, color)
return img
|
[
"cv2.rectangle",
"numpy.array",
"cv2.putText"
] |
[((150, 216), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x1, y1)', '(x2, y2)'], {'color': 'color', 'thickness': '(5)'}), '(image, (x1, y1), (x2, y2), color=color, thickness=5)\n', (163, 216), False, 'import cv2\n'), ((242, 335), 'cv2.putText', 'cv2.putText', (['image', 'caption', '(b[0], b[1] - 10)', 'cv2.FONT_HERSHEY_PLAIN', '(1.5)', '(0, 0, 0)', '(2)'], {}), '(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1.5,\n (0, 0, 0), 2)\n', (253, 335), False, 'import cv2\n'), ((340, 439), 'cv2.putText', 'cv2.putText', (['image', 'caption', '(b[0], b[1] - 10)', 'cv2.FONT_HERSHEY_PLAIN', '(1.5)', '(255, 255, 255)', '(1)'], {}), '(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1.5,\n (255, 255, 255), 1)\n', (351, 439), False, 'import cv2\n'), ((107, 133), 'numpy.array', 'np.array', (['[x1, y1, x2, y2]'], {}), '([x1, y1, x2, y2])\n', (115, 133), True, 'import numpy as np\n')]
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from queue import Queue, Full, Empty
import threading
import numpy as np
import torch
from datetime import datetime
from time import sleep
from collections import deque, Counter, defaultdict, OrderedDict
from .utils import *
__all__ = ["MemoryReceiver"]
def _initialize_batch_entry(batchsize, v, use_cuda=True):
if isinstance(v, np.ndarray):
shape = v.shape
if v.dtype == 'int32' or v.dtype == 'int64':
entry = torch.LongTensor(batchsize, *shape)
else:
# entry = np.zeros((batchsize, ) + shape, dtype=v.dtype)
entry = torch.FloatTensor(batchsize, *shape)
elif isinstance(v, torch.FloatTensor):
shape = v.size()
entry = torch.FloatTensor(batchsize, *shape)
elif isinstance(v, list):
entry = np.zeros((batchsize, len(v)), dtype=type(v[0]))
elif isinstance(v, float):
entry = torch.FloatTensor(batchsize)
elif isinstance(v, int):
entry = torch.LongTensor(batchsize)
elif isinstance(v, str) or isinstance(v, bytes):
entry = [None] * batchsize
else:
entry = np.zeros((batchsize), dtype=type(v))
# Make it pinned memory
if use_cuda and (isinstance(entry, torch.FloatTensor) or isinstance(entry, torch.LongTensor)):
entry = entry.pin_memory()
return entry
def _initialize_batch_cpu(batch_cpu, batch_gpu, k, v, batchsize, use_cuda=True):
if k not in batch_cpu:
entry = _initialize_batch_entry(batchsize, v, use_cuda=use_cuda)
batch_cpu[k] = entry
else:
entry = batch_cpu[k]
if isinstance(entry, np.ndarray):
shape = entry.shape
elif isinstance(entry, list):
shape = (len(entry),)
else:
shape = entry.size()
if shape[0] < batchsize:
# Batch size becomes larger, re-initialize.
entry = _initialize_batch_entry(batchsize, v, use_cuda=use_cuda)
batch_cpu[k] = entry
if k in batch_gpu: del batch_gpu[k]
return entry, shape
def _cpu2gpu(batch_cpu, batch_gpu, allow_incomplete_batch=False):
for batch_cpu_t, batch_gpu_t in zip(batch_cpu, batch_gpu):
batchsize = batch_cpu_t["_batchsize"]
batch_gpu_t["_batchsize"] = batchsize
for k in batch_cpu_t.keys():
if isinstance(batch_cpu_t[k], (torch.FloatTensor, torch.LongTensor)):
if allow_incomplete_batch:
if len(batch_cpu_t[k].size()) == 1:
batch_gpu_t[k] = batch_cpu_t[k][:batchsize].cuda(non_blocking=True)
else:
batch_gpu_t[k] = batch_cpu_t[k][:batchsize, :].cuda(non_blocking=True)
else:
if isinstance(batch_cpu_t[k], torch.FloatTensor):
if k not in batch_gpu_t:
batch_gpu_t[k] = torch.cuda.FloatTensor(batch_cpu_t[k].size())
batch_gpu_t[k].copy_(batch_cpu_t[k], non_blocking=True)
elif isinstance(batch_cpu_t[k], torch.LongTensor):
if k not in batch_gpu_t:
batch_gpu_t[k] = torch.cuda.LongTensor(batch_cpu_t[k].size())
batch_gpu_t[k].copy_(batch_cpu_t[k], non_blocking=True)
else:
batch_gpu_t[k] = batch_cpu_t[k]
def _make_batch(batch, q, use_cuda=True, allow_incomplete_batch=False):
''' Lots of hacks in this function, need to fix in the future.'''
if "cpu" not in batch:
batch.update({ "cpu" : [], "gpu" : [] })
# For input q:
# len(q) == T
# len(q[t]) == batchsize
# q[t][batch_id] is a dict, e.g., q[t][batch_id] = { "s" : np.array, "a" : int, "r" : float }
# For output:
# batch_cpu is a list of dict, e.g., batch_cpu[t] = { "s" : FloatTensor(batchsize, channel, w, h), "a" : FloatTensor(batchsize) }
T = len(q)
batchsize = len(q[0])
# Time span of the batch.
if len(batch["cpu"]) != T:
batch["cpu"] = [dict() for i in range(T)]
batch["gpu"] = [dict() for i in range(T)]
batch_cpu = batch["cpu"]
batch_gpu = batch["gpu"]
for q_t, batch_cpu_t, batch_gpu_t in zip(q, batch_cpu, batch_gpu):
batch_cpu_t["_batchsize"] = batchsize
for k, v in q_t[0].items():
entry, shape = _initialize_batch_cpu(batch_cpu_t, batch_gpu_t, k, v, batchsize, use_cuda=use_cuda)
if len(shape) == 1:
for i in range(batchsize):
entry[i] = q_t[i][k]
else:
# TODO: Remove this once np.array to torch assignment has been implemented.
if isinstance(q_t[0][k], np.ndarray):
for i in range(batchsize):
entry[i, :] = torch.from_numpy(q_t[i][k])
else:
for i in range(batchsize):
entry[i, :] = q_t[i][k]
# Put things on cuda.
if use_cuda:
_cpu2gpu(batch_cpu, batch_gpu, allow_incomplete_batch=allow_incomplete_batch)
class Pool:
def __init__(self, num_pool):
# Open a thread to assemble the batch.
self.num_pool = num_pool
self.pool = [ dict() for i in range(self.num_pool) ]
self.empty_entries = Queue()
for i in range(num_pool):
self.pool[i]["_idx"] = i
self.empty_entries.put(i)
def reserve(self):
idx = self.empty_entries.get()
return self.pool[idx]
def release(self, batch):
self.empty_entries.put(batch["_idx"])
class SeqStats:
def __init__(self, name="seq", seq_limits=None):
# Stats.
self.stats_seq = Counter()
self.clear_stats()
self.name = name
if seq_limits is None:
self.limits = [1, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600, 1800, 2000, 2500, 3000, 4000, 5000, float("inf")]
else:
self.limits = seq_limits
if not np.isinf(self.limits[-1]):
self.limits.append(float("inf"))
def feed(self, seqs):
for seq_num in seqs:
bin_idx = None
for i, limit in enumerate(self.limits[1:]):
if int(seq_num) < limit:
bin_idx = i
break
if seq_num.item() > self.max_seq:
self.max_seq = seq_num
if seq_num.item() < self.min_seq:
self.min_seq = seq_num
name = "[" + str(self.limits[bin_idx]) + ", " + str(self.limits[bin_idx + 1]) + ")"
self.stats_seq[name] += 1
def print_stats(self, reset=False):
total_counts = sum(self.stats_seq.values())
if total_counts > 0:
print("Distribution of %s [min = %d / max = %d / #count = %d]:" % (self.name, self.min_seq, self.max_seq, total_counts))
s = ""
for r in sorted(self.stats_seq.keys(), key=lambda x : float(x.split(",")[0][1:])):
s += "%s: %d [%.2lf%%]\n" % (r, self.stats_seq[r], 100.0 * self.stats_seq[r] / total_counts)
print(s)
else:
print("Distribution of %s [#count = %d]:" % (self.name, total_counts))
if reset: self.clear_stats()
def clear_stats(self):
self.stats_seq.clear()
self.max_seq = 0
self.min_seq = float('inf')
class Timer:
def __init__(self):
self.reset()
def __call__(self, name):
self.curr_name = name
return self
def __enter__(self):
self.before[self.curr_name] = datetime.now()
def __exit__(self, t, value, traceback):
after = datetime.now()
elapsed = (after - self.before[self.curr_name]).total_seconds() * 1000
self.records[self.curr_name][0] += elapsed
self.records[self.curr_name][1] += 1
def summary(self):
rets = []
for name, record in self.records.items():
cumtime, count = record
aver_time = float(cumtime) / count
rets.append("[%s] %.3f ms [%d]" % (name, aver_time, count))
return rets
def reset(self):
self.records = defaultdict(lambda : [0, 0])
self.before = { }
class BatchStats:
def __init__(self, seq_limits=None):
# Stats.
self.stats_agent = Counter()
self.seq_stats = SeqStats(seq_limits=seq_limits)
def add_stats(self, batch):
for agent_name in batch["_agent_name"]:
self.stats_agent[agent_name] += 1
self.seq_stats.feed(batch["_seq"])
def print_stats(self):
sum_counter = sum(self.stats_agent.values())
num_agents = len(self.stats_agent)
avg_counter = sum_counter / num_agents
print("Agent Stats: %.3f[%d/%d]" % (avg_counter, sum_counter, num_agents))
print(self.stats_agent.most_common(20))
self.seq_stats.print_stats()
def clear_stats(self):
self.stats_agent.clear()
self.seq_stats.clear_stats()
def ZMQDecoder(receive_data):
sender_name, m = receive_data
if sender_name is None:
if m is None:
# Done with the loop
return "exit"
else:
# No package for now
# send existing data if there is any.
return "nopackage"
try:
m = loads(m.buffer)
except:
# If there is anything wrong with the decoding, return "nopackage"
return "nopackage"
sender_name = sender_name.bytes
for data in m:
data["_sender"] = sender_name
return m
class MemoryReceiver:
def __init__(self, name, ch, batch_assembler, batch_queue,
prompt=None, decoder=ZMQDecoder, allow_incomplete_batch=False,
seq_limits=None, replier=None):
self.name = name
self.ch = ch
self.batch_assembler = batch_assembler
self.loop_count = 0
self.done_flag = None
self.use_cuda = torch.cuda.is_available()
self.batch_queue = batch_queue
self.prompt = prompt
self.allow_incomplete_batch = allow_incomplete_batch
# BatchStats:
self.batch_stats = BatchStats(seq_limits=seq_limits)
self.decoder = decoder
self.timer = Timer()
# XXX Not a good design. Need to refactor
self.replier = replier
# Open a thread to assemble the batch.
# TODO: For some reason, single threaded version is substantially
# faster than two-threaded version, which is supposed to hide
# the latency when receiving the data and build the batch.
self.pool = Pool(2)
threading.Thread(target=self._receive).start()
def on_data(self, m):
qs = self.batch_assembler.feed(m)
if qs is not None:
# print("MemoryReceiver[%s] Receive batch!" % self.name)
if self.prompt is not None:
queue_size = self.batch_queue.qsize()
print(self.prompt["on_draw_batch"] + str(queue_size), end="")
sys.stdout.flush()
self._make_and_send_batch(qs)
def on_incomplete_batch(self):
''' Incomplete batch '''
if self.batch_assembler.sample_count() == 0 or not self.allow_incomplete_batch: return
qs = self.batch_assembler.get_batch(incomplete=True)
if qs is not None:
self._make_and_send_batch(qs)
def _make_and_send_batch(self, qs):
batch = self.pool.reserve()
if self.prompt is not None:
print(self.prompt["on_make_batch"], end="")
sys.stdout.flush()
_make_batch(batch, qs, use_cuda=self.use_cuda, allow_incomplete_batch=self.allow_incomplete_batch)
self.batch_stats.add_stats(batch["cpu"][0])
queue_put(self.batch_queue, (self, batch), done_flag=self.done_flag, fail_comment="BatchConnector.on_data.queue_put failed, retrying")
def _preprocess(self, raw_data):
''' Return a list contains all the data to be fed to the assembler
If return [], then no data are received.
If return None, then we should exit.
'''
if self.decoder:
with self.timer("decode"):
m = self.decoder(raw_data)
if isinstance(m, str):
# No package for now, send existing data if there is any.
if m == 'nopackage': return []
else: return None
else:
if raw_data is None: return []
else: m = raw_data
# Send to multiple threads for batch.
ret = []
with self.timer("collect"):
for data in m:
data["_key"] = self._get_key(data)
if not "_sender" in data:
data["_sender"] = data["_key"]
ret.append(data)
return ret
def _get_key(self, data):
return "%s-%d-%d" % (data["_agent_name"], data["_game_counter"], data["_seq"])
def _receive(self):
check_interval = 200
counter = 0
while True:
with self.timer("receive"):
raw_data = self.ch.Receive()
m = self._preprocess(raw_data)
if m is None: break
if len(m) == 0:
self.on_incomplete_batch()
continue
for data in m:
self.loop_count += 1
self.on_data(data)
counter += 1
if counter % check_interval == 0:
# print("MemoryReceiver: %s, #data = %d" % (", ".join(self.timer.summary()), len(m)))
if check_done_flag(self.done_flag): break
print("Exit from MemoryReceiver._receive")
def print_stats(self):
queue_size = self.batch_queue.qsize()
print("Queue size: %d" % queue_size)
self.batch_stats.print_stats()
self.batch_stats.clear_stats()
def Step(self, batch, reply):
# Send reply back and release
if self.replier is not None:
self.replier.reply(batch["cpu"][0], reply)
self.pool.release(batch)
if self.prompt is not None:
print(self.prompt["on_release_batch"], end="")
sys.stdout.flush()
|
[
"torch.LongTensor",
"torch.from_numpy",
"collections.Counter",
"datetime.datetime.now",
"torch.cuda.is_available",
"collections.defaultdict",
"threading.Thread",
"queue.Queue",
"numpy.isinf",
"torch.FloatTensor"
] |
[((5438, 5445), 'queue.Queue', 'Queue', ([], {}), '()\n', (5443, 5445), False, 'from queue import Queue, Full, Empty\n'), ((5838, 5847), 'collections.Counter', 'Counter', ([], {}), '()\n', (5845, 5847), False, 'from collections import deque, Counter, defaultdict, OrderedDict\n'), ((7729, 7743), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7741, 7743), False, 'from datetime import datetime\n'), ((7806, 7820), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7818, 7820), False, 'from datetime import datetime\n'), ((8308, 8336), 'collections.defaultdict', 'defaultdict', (['(lambda : [0, 0])'], {}), '(lambda : [0, 0])\n', (8319, 8336), False, 'from collections import deque, Counter, defaultdict, OrderedDict\n'), ((8468, 8477), 'collections.Counter', 'Counter', ([], {}), '()\n', (8475, 8477), False, 'from collections import deque, Counter, defaultdict, OrderedDict\n'), ((10097, 10122), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10120, 10122), False, 'import torch\n'), ((648, 683), 'torch.LongTensor', 'torch.LongTensor', (['batchsize', '*shape'], {}), '(batchsize, *shape)\n', (664, 683), False, 'import torch\n'), ((787, 823), 'torch.FloatTensor', 'torch.FloatTensor', (['batchsize', '*shape'], {}), '(batchsize, *shape)\n', (804, 823), False, 'import torch\n'), ((908, 944), 'torch.FloatTensor', 'torch.FloatTensor', (['batchsize', '*shape'], {}), '(batchsize, *shape)\n', (925, 944), False, 'import torch\n'), ((6151, 6176), 'numpy.isinf', 'np.isinf', (['self.limits[-1]'], {}), '(self.limits[-1])\n', (6159, 6176), True, 'import numpy as np\n'), ((10786, 10824), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._receive'}), '(target=self._receive)\n', (10802, 10824), False, 'import threading\n'), ((1086, 1114), 'torch.FloatTensor', 'torch.FloatTensor', (['batchsize'], {}), '(batchsize)\n', (1103, 1114), False, 'import torch\n'), ((1160, 1187), 'torch.LongTensor', 'torch.LongTensor', (['batchsize'], {}), '(batchsize)\n', (1176, 1187), False, 'import torch\n'), ((4945, 4972), 'torch.from_numpy', 'torch.from_numpy', (['q_t[i][k]'], {}), '(q_t[i][k])\n', (4961, 4972), False, 'import torch\n')]
|
from __future__ import print_function
import itertools
import numpy as np
import numba.unittest_support as unittest
from numba import types, jit, typeof
from .support import MemoryLeakMixin, TestCase, tag
def getitem_usecase(a, b):
return a[b]
def setitem_usecase(a, idx, b):
a[idx] = b
class TestFancyIndexing(MemoryLeakMixin, TestCase):
def generate_advanced_indices(self, N, many=True):
choices = [np.int16([0, N - 1, -2])]
if many:
choices += [np.uint16([0, 1, N - 1]),
np.bool_([0, 1, 1, 0])]
return choices
def generate_basic_index_tuples(self, N, maxdim, many=True):
"""
Generate basic index tuples with 0 to *maxdim* items.
"""
# Note integers can be considered advanced indices in certain
# cases, so we avoid them here.
# See "Combining advanced and basic indexing"
# in http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
if many:
choices = [slice(None, None, None),
slice(1, N - 1, None),
slice(0, None, 2),
slice(N - 1, None, -2),
slice(-N + 1, -1, None),
slice(-1, -N, -2),
]
else:
choices = [slice(0, N - 1, None),
slice(-1, -N, -2)]
for ndim in range(maxdim + 1):
for tup in itertools.product(choices, repeat=ndim):
yield tup
def generate_advanced_index_tuples(self, N, maxdim, many=True):
"""
Generate advanced index tuples by generating basic index tuples
and adding a single advanced index item.
"""
# (Note Numba doesn't support advanced indices with more than
# one advanced index array at the moment)
choices = list(self.generate_advanced_indices(N, many=many))
for i in range(maxdim + 1):
for tup in self.generate_basic_index_tuples(N, maxdim - 1, many):
for adv in choices:
yield tup[:i] + (adv,) + tup[i:]
def generate_advanced_index_tuples_with_ellipsis(self, N, maxdim, many=True):
"""
Same as generate_advanced_index_tuples(), but also insert an
ellipsis at various points.
"""
for tup in self.generate_advanced_index_tuples(N, maxdim, many):
for i in range(len(tup) + 1):
yield tup[:i] + (Ellipsis,) + tup[i:]
def check_getitem_indices(self, arr, indices):
pyfunc = getitem_usecase
cfunc = jit(nopython=True)(pyfunc)
orig = arr.copy()
orig_base = arr.base or arr
for index in indices:
expected = pyfunc(arr, index)
# Sanity check: if a copy wasn't made, this wasn't advanced
# but basic indexing, and shouldn't be tested here.
assert expected.base is not orig_base
got = cfunc(arr, index)
# Note Numba may not return the same array strides and
# contiguity as Numpy
self.assertEqual(got.shape, expected.shape)
self.assertEqual(got.dtype, expected.dtype)
np.testing.assert_equal(got, expected)
# Check a copy was *really* returned by Numba
if got.size:
got.fill(42)
np.testing.assert_equal(arr, orig)
def test_getitem_tuple(self):
# Test many variations of advanced indexing with a tuple index
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_index_tuples(N, ndim)
self.check_getitem_indices(arr, indices)
def test_getitem_tuple_and_ellipsis(self):
# Same, but also insert an ellipsis at a random point
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_index_tuples_with_ellipsis(N, ndim,
many=False)
self.check_getitem_indices(arr, indices)
@tag('important')
def test_getitem_array(self):
# Test advanced indexing with a single array index
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_indices(N)
self.check_getitem_indices(arr, indices)
def check_setitem_indices(self, arr, indices):
pyfunc = setitem_usecase
cfunc = jit(nopython=True)(pyfunc)
for index in indices:
src = arr[index]
expected = np.zeros_like(arr)
got = np.zeros_like(arr)
pyfunc(expected, index, src)
cfunc(got, index, src)
# Note Numba may not return the same array strides and
# contiguity as Numpy
self.assertEqual(got.shape, expected.shape)
self.assertEqual(got.dtype, expected.dtype)
np.testing.assert_equal(got, expected)
def test_setitem_tuple(self):
# Test many variations of advanced indexing with a tuple index
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_index_tuples(N, ndim)
self.check_setitem_indices(arr, indices)
def test_setitem_tuple_and_ellipsis(self):
# Same, but also insert an ellipsis at a random point
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_index_tuples_with_ellipsis(N, ndim,
many=False)
self.check_setitem_indices(arr, indices)
def test_setitem_array(self):
# Test advanced indexing with a single array index
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32) + 10
indices = self.generate_advanced_indices(N)
self.check_setitem_indices(arr, indices)
if __name__ == '__main__':
unittest.main()
|
[
"numba.unittest_support.main",
"numpy.testing.assert_equal",
"numpy.int16",
"itertools.product",
"numba.jit",
"numpy.bool_",
"numpy.uint16",
"numpy.zeros_like",
"numpy.arange"
] |
[((6184, 6199), 'numba.unittest_support.main', 'unittest.main', ([], {}), '()\n', (6197, 6199), True, 'import numba.unittest_support as unittest\n'), ((430, 454), 'numpy.int16', 'np.int16', (['[0, N - 1, -2]'], {}), '([0, N - 1, -2])\n', (438, 454), True, 'import numpy as np\n'), ((1465, 1504), 'itertools.product', 'itertools.product', (['choices'], {'repeat': 'ndim'}), '(choices, repeat=ndim)\n', (1482, 1504), False, 'import itertools\n'), ((2621, 2639), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2624, 2639), False, 'from numba import types, jit, typeof\n'), ((3230, 3268), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['got', 'expected'], {}), '(got, expected)\n', (3253, 3268), True, 'import numpy as np\n'), ((4598, 4616), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4601, 4616), False, 'from numba import types, jit, typeof\n'), ((4708, 4726), 'numpy.zeros_like', 'np.zeros_like', (['arr'], {}), '(arr)\n', (4721, 4726), True, 'import numpy as np\n'), ((4745, 4763), 'numpy.zeros_like', 'np.zeros_like', (['arr'], {}), '(arr)\n', (4758, 4763), True, 'import numpy as np\n'), ((5065, 5103), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['got', 'expected'], {}), '(got, expected)\n', (5088, 5103), True, 'import numpy as np\n'), ((497, 521), 'numpy.uint16', 'np.uint16', (['[0, 1, N - 1]'], {}), '([0, 1, N - 1])\n', (506, 521), True, 'import numpy as np\n'), ((547, 569), 'numpy.bool_', 'np.bool_', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (555, 569), True, 'import numpy as np\n'), ((3397, 3431), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['arr', 'orig'], {}), '(arr, orig)\n', (3420, 3431), True, 'import numpy as np\n'), ((3583, 3603), 'numpy.arange', 'np.arange', (['(N ** ndim)'], {}), '(N ** ndim)\n', (3592, 3603), True, 'import numpy as np\n'), ((3910, 3930), 'numpy.arange', 'np.arange', (['(N ** ndim)'], {}), '(N ** ndim)\n', (3919, 3930), True, 'import numpy as np\n'), ((4337, 4357), 'numpy.arange', 'np.arange', (['(N ** ndim)'], {}), '(N ** ndim)\n', (4346, 4357), True, 'import numpy as np\n'), ((5255, 5275), 'numpy.arange', 'np.arange', (['(N ** ndim)'], {}), '(N ** ndim)\n', (5264, 5275), True, 'import numpy as np\n'), ((5581, 5601), 'numpy.arange', 'np.arange', (['(N ** ndim)'], {}), '(N ** ndim)\n', (5590, 5601), True, 'import numpy as np\n'), ((5986, 6006), 'numpy.arange', 'np.arange', (['(N ** ndim)'], {}), '(N ** ndim)\n', (5995, 6006), True, 'import numpy as np\n')]
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Inference for onset conditioned model.
A histogram summary will be written for every example processed, and the
resulting MIDI and pianoroll images will also be written for every example.
The final summary value is the mean score for all examples.
"""
import collections
import functools
import os
import time
import imageio
from magenta.models.onsets_frames_transcription import constants
from magenta.models.onsets_frames_transcription import data
from magenta.models.onsets_frames_transcription import infer_util
from magenta.models.onsets_frames_transcription import train_util
from note_seq import midi_io
from note_seq import sequences_lib
from note_seq.protobuf import music_pb2
import numpy as np
import six
import tensorflow.compat.v1 as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('master', '',
'Name of the TensorFlow runtime to use.')
tf.app.flags.DEFINE_string('config', 'onsets_frames',
'Name of the config to use.')
tf.app.flags.DEFINE_string('model_dir', None, 'Path to look for checkpoints.')
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'Filename of the checkpoint to use. If not specified, will use the latest '
'checkpoint')
tf.app.flags.DEFINE_string('examples_path', None,
'Path to test examples TFRecord.')
tf.app.flags.DEFINE_string(
'output_dir', '~/tmp/onsets_frames/infer',
'Path to store output midi files and summary events.')
tf.app.flags.DEFINE_string(
'hparams', '',
'A comma-separated list of `name=value` hyperparameter values.')
tf.app.flags.DEFINE_boolean(
'shuffle_examples', False, 'Whether to shuffle examples.')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged: '
'DEBUG, INFO, WARN, ERROR, or FATAL.')
tf.app.flags.DEFINE_boolean('preprocess_examples', False,
'Whether or not to run preprocessing on examples.')
def model_inference(model_fn,
model_dir,
checkpoint_path,
data_fn,
hparams,
examples_path,
output_dir,
summary_writer,
master,
preprocess_examples,
shuffle_examples):
"""Runs inference for the given examples."""
tf.logging.info('model_dir=%s', model_dir)
tf.logging.info('checkpoint_path=%s', checkpoint_path)
tf.logging.info('examples_path=%s', examples_path)
tf.logging.info('output_dir=%s', output_dir)
estimator = train_util.create_estimator(
model_fn, model_dir, hparams, master=master)
transcription_data = functools.partial(
data_fn, examples=examples_path, preprocess_examples=preprocess_examples,
is_training=False, shuffle_examples=shuffle_examples,
skip_n_initial_records=0)
input_fn = infer_util.labels_to_features_wrapper(transcription_data)
start_time = time.time()
infer_times = []
num_frames = []
file_num = 0
all_metrics = collections.defaultdict(list)
for predictions in estimator.predict(
input_fn, checkpoint_path=checkpoint_path, yield_single_examples=False):
# Remove batch dimension for convenience.
for k in predictions.keys():
if predictions[k].shape[0] != 1:
raise ValueError(
'All predictions must have batch size 1, but shape of '
'{} was: {}'.format(k, + predictions[k].shape[0]))
predictions[k] = predictions[k][0]
end_time = time.time()
infer_time = end_time - start_time
infer_times.append(infer_time)
num_frames.append(predictions['frame_predictions'].shape[0])
tf.logging.info(
'Infer time %f, frames %d, frames/sec %f, running average %f',
infer_time, num_frames[-1], num_frames[-1] / infer_time,
np.sum(num_frames) / np.sum(infer_times))
tf.logging.info('Scoring sequence %s', predictions['sequence_ids'])
sequence_prediction = music_pb2.NoteSequence.FromString(
predictions['sequence_predictions'])
sequence_label = music_pb2.NoteSequence.FromString(
predictions['sequence_labels'])
# Make filenames UNIX-friendly.
filename_chars = six.ensure_text(predictions['sequence_ids'], 'utf-8')
filename_chars = [c if c.isalnum() else '_' for c in filename_chars]
filename_safe = ''.join(filename_chars).rstrip()
filename_safe = '{:04d}_{}'.format(file_num, filename_safe[:200])
file_num += 1
output_file = os.path.join(output_dir, filename_safe + '.mid')
tf.logging.info('Writing inferred midi file to %s', output_file)
midi_io.sequence_proto_to_midi_file(sequence_prediction, output_file)
label_output_file = os.path.join(output_dir, filename_safe + '_label.mid')
tf.logging.info('Writing label midi file to %s', label_output_file)
midi_io.sequence_proto_to_midi_file(sequence_label, label_output_file)
# Also write a pianoroll showing acoustic model output vs labels.
pianoroll_output_file = os.path.join(
output_dir, filename_safe + '_pianoroll.png')
tf.logging.info('Writing acoustic logit/label file to %s',
pianoroll_output_file)
# Calculate frames based on the sequence. Includes any postprocessing done
# to turn raw onsets/frames predictions into the final sequence.
# TODO(fjord): This work is duplicated in metrics.py.
sequence_frame_predictions = sequences_lib.sequence_to_pianoroll(
sequence_prediction,
frames_per_second=data.hparams_frames_per_second(hparams),
min_pitch=constants.MIN_MIDI_PITCH,
max_pitch=constants.MAX_MIDI_PITCH).active
with tf.gfile.GFile(pianoroll_output_file, mode='w') as f:
imageio.imwrite(
f,
infer_util.posterior_pianoroll_image(
predictions['onset_probs'],
predictions['onset_labels'],
predictions['frame_probs'],
predictions['frame_labels'],
sequence_frame_predictions),
format='png')
# Update histogram and current scalar for metrics.
with tf.Graph().as_default(), tf.Session().as_default():
for k, v in predictions.items():
if not k.startswith('metrics/'):
continue
all_metrics[k].extend(v)
histogram_name = k + '_histogram'
metric_summary = tf.summary.histogram(histogram_name, all_metrics[k])
summary_writer.add_summary(metric_summary.eval(), global_step=file_num)
scalar_name = k
metric_summary = tf.summary.scalar(scalar_name, np.mean(all_metrics[k]))
summary_writer.add_summary(metric_summary.eval(), global_step=file_num)
summary_writer.flush()
start_time = time.time()
# Write final mean values for all metrics.
with tf.Graph().as_default(), tf.Session().as_default():
for k, v in all_metrics.items():
final_scalar_name = 'final/' + k
metric_summary = tf.summary.scalar(
final_scalar_name, np.mean(all_metrics[k]))
summary_writer.add_summary(metric_summary.eval())
summary_writer.flush()
def run(config_map, data_fn):
"""Run the infer script."""
output_dir = os.path.expanduser(FLAGS.output_dir)
config = config_map[FLAGS.config]
hparams = config.hparams
hparams.parse(FLAGS.hparams)
# Batch size should always be 1 for inference.
hparams.batch_size = 1
tf.logging.info(hparams)
tf.gfile.MakeDirs(output_dir)
summary_writer = tf.summary.FileWriter(logdir=output_dir)
with tf.Session():
run_config = '\n\n'.join([
'model_dir: ' + FLAGS.model_dir,
'checkpoint_path: ' + str(FLAGS.checkpoint_path),
'examples_path: ' + FLAGS.examples_path,
str(hparams),
])
run_config_summary = tf.summary.text(
'run_config',
tf.constant(run_config, name='run_config'),
collections=[])
summary_writer.add_summary(run_config_summary.eval())
model_inference(
model_fn=config.model_fn,
model_dir=FLAGS.model_dir,
checkpoint_path=FLAGS.checkpoint_path,
data_fn=data_fn,
hparams=hparams,
examples_path=FLAGS.examples_path,
output_dir=output_dir,
summary_writer=summary_writer,
preprocess_examples=FLAGS.preprocess_examples,
master=FLAGS.master,
shuffle_examples=FLAGS.shuffle_examples)
|
[
"magenta.models.onsets_frames_transcription.infer_util.labels_to_features_wrapper",
"tensorflow.compat.v1.gfile.GFile",
"magenta.models.onsets_frames_transcription.data.hparams_frames_per_second",
"tensorflow.compat.v1.Session",
"numpy.mean",
"note_seq.protobuf.music_pb2.NoteSequence.FromString",
"six.ensure_text",
"os.path.expanduser",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.app.flags.DEFINE_string",
"tensorflow.compat.v1.summary.histogram",
"note_seq.midi_io.sequence_proto_to_midi_file",
"tensorflow.compat.v1.constant",
"magenta.models.onsets_frames_transcription.train_util.create_estimator",
"time.time",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.app.flags.DEFINE_boolean",
"os.path.join",
"magenta.models.onsets_frames_transcription.infer_util.posterior_pianoroll_image",
"numpy.sum",
"functools.partial",
"collections.defaultdict"
] |
[((1389, 1475), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""master"""', '""""""', '"""Name of the TensorFlow runtime to use."""'], {}), "('master', '',\n 'Name of the TensorFlow runtime to use.')\n", (1415, 1475), True, 'import tensorflow.compat.v1 as tf\n'), ((1499, 1586), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""config"""', '"""onsets_frames"""', '"""Name of the config to use."""'], {}), "('config', 'onsets_frames',\n 'Name of the config to use.')\n", (1525, 1586), True, 'import tensorflow.compat.v1 as tf\n'), ((1610, 1688), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""model_dir"""', 'None', '"""Path to look for checkpoints."""'], {}), "('model_dir', None, 'Path to look for checkpoints.')\n", (1636, 1688), True, 'import tensorflow.compat.v1 as tf\n'), ((1689, 1836), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_path"""', 'None', '"""Filename of the checkpoint to use. If not specified, will use the latest checkpoint"""'], {}), "('checkpoint_path', None,\n 'Filename of the checkpoint to use. If not specified, will use the latest checkpoint'\n )\n", (1715, 1836), True, 'import tensorflow.compat.v1 as tf\n'), ((1844, 1932), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""examples_path"""', 'None', '"""Path to test examples TFRecord."""'], {}), "('examples_path', None,\n 'Path to test examples TFRecord.')\n", (1870, 1932), True, 'import tensorflow.compat.v1 as tf\n'), ((1956, 2084), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""output_dir"""', '"""~/tmp/onsets_frames/infer"""', '"""Path to store output midi files and summary events."""'], {}), "('output_dir', '~/tmp/onsets_frames/infer',\n 'Path to store output midi files and summary events.')\n", (1982, 2084), True, 'import tensorflow.compat.v1 as tf\n'), ((2090, 2200), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""hparams"""', '""""""', '"""A comma-separated list of `name=value` hyperparameter values."""'], {}), "('hparams', '',\n 'A comma-separated list of `name=value` hyperparameter values.')\n", (2116, 2200), True, 'import tensorflow.compat.v1 as tf\n'), ((2206, 2296), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""shuffle_examples"""', '(False)', '"""Whether to shuffle examples."""'], {}), "('shuffle_examples', False,\n 'Whether to shuffle examples.')\n", (2233, 2296), True, 'import tensorflow.compat.v1 as tf\n'), ((2298, 2435), 'tensorflow.compat.v1.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""log"""', '"""INFO"""', '"""The threshold for what messages will be logged: DEBUG, INFO, WARN, ERROR, or FATAL."""'], {}), "('log', 'INFO',\n 'The threshold for what messages will be logged: DEBUG, INFO, WARN, ERROR, or FATAL.'\n )\n", (2324, 2435), True, 'import tensorflow.compat.v1 as tf\n'), ((2443, 2556), 'tensorflow.compat.v1.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""preprocess_examples"""', '(False)', '"""Whether or not to run preprocessing on examples."""'], {}), "('preprocess_examples', False,\n 'Whether or not to run preprocessing on examples.')\n", (2470, 2556), True, 'import tensorflow.compat.v1 as tf\n'), ((2999, 3041), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""model_dir=%s"""', 'model_dir'], {}), "('model_dir=%s', model_dir)\n", (3014, 3041), True, 'import tensorflow.compat.v1 as tf\n'), ((3044, 3098), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""checkpoint_path=%s"""', 'checkpoint_path'], {}), "('checkpoint_path=%s', checkpoint_path)\n", (3059, 3098), True, 'import tensorflow.compat.v1 as tf\n'), ((3101, 3151), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""examples_path=%s"""', 'examples_path'], {}), "('examples_path=%s', examples_path)\n", (3116, 3151), True, 'import tensorflow.compat.v1 as tf\n'), ((3154, 3198), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""output_dir=%s"""', 'output_dir'], {}), "('output_dir=%s', output_dir)\n", (3169, 3198), True, 'import tensorflow.compat.v1 as tf\n'), ((3214, 3286), 'magenta.models.onsets_frames_transcription.train_util.create_estimator', 'train_util.create_estimator', (['model_fn', 'model_dir', 'hparams'], {'master': 'master'}), '(model_fn, model_dir, hparams, master=master)\n', (3241, 3286), False, 'from magenta.models.onsets_frames_transcription import train_util\n'), ((3318, 3499), 'functools.partial', 'functools.partial', (['data_fn'], {'examples': 'examples_path', 'preprocess_examples': 'preprocess_examples', 'is_training': '(False)', 'shuffle_examples': 'shuffle_examples', 'skip_n_initial_records': '(0)'}), '(data_fn, examples=examples_path, preprocess_examples=\n preprocess_examples, is_training=False, shuffle_examples=\n shuffle_examples, skip_n_initial_records=0)\n', (3335, 3499), False, 'import functools\n'), ((3523, 3580), 'magenta.models.onsets_frames_transcription.infer_util.labels_to_features_wrapper', 'infer_util.labels_to_features_wrapper', (['transcription_data'], {}), '(transcription_data)\n', (3560, 3580), False, 'from magenta.models.onsets_frames_transcription import infer_util\n'), ((3597, 3608), 'time.time', 'time.time', ([], {}), '()\n', (3606, 3608), False, 'import time\n'), ((3679, 3708), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (3702, 3708), False, 'import collections\n'), ((7813, 7849), 'os.path.expanduser', 'os.path.expanduser', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (7831, 7849), False, 'import os\n'), ((8023, 8047), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['hparams'], {}), '(hparams)\n', (8038, 8047), True, 'import tensorflow.compat.v1 as tf\n'), ((8051, 8080), 'tensorflow.compat.v1.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['output_dir'], {}), '(output_dir)\n', (8068, 8080), True, 'import tensorflow.compat.v1 as tf\n'), ((8101, 8141), 'tensorflow.compat.v1.summary.FileWriter', 'tf.summary.FileWriter', ([], {'logdir': 'output_dir'}), '(logdir=output_dir)\n', (8122, 8141), True, 'import tensorflow.compat.v1 as tf\n'), ((4162, 4173), 'time.time', 'time.time', ([], {}), '()\n', (4171, 4173), False, 'import time\n'), ((4525, 4592), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Scoring sequence %s"""', "predictions['sequence_ids']"], {}), "('Scoring sequence %s', predictions['sequence_ids'])\n", (4540, 4592), True, 'import tensorflow.compat.v1 as tf\n'), ((4620, 4690), 'note_seq.protobuf.music_pb2.NoteSequence.FromString', 'music_pb2.NoteSequence.FromString', (["predictions['sequence_predictions']"], {}), "(predictions['sequence_predictions'])\n", (4653, 4690), False, 'from note_seq.protobuf import music_pb2\n'), ((4721, 4786), 'note_seq.protobuf.music_pb2.NoteSequence.FromString', 'music_pb2.NoteSequence.FromString', (["predictions['sequence_labels']"], {}), "(predictions['sequence_labels'])\n", (4754, 4786), False, 'from note_seq.protobuf import music_pb2\n'), ((4854, 4907), 'six.ensure_text', 'six.ensure_text', (["predictions['sequence_ids']", '"""utf-8"""'], {}), "(predictions['sequence_ids'], 'utf-8')\n", (4869, 4907), False, 'import six\n'), ((5140, 5188), 'os.path.join', 'os.path.join', (['output_dir', "(filename_safe + '.mid')"], {}), "(output_dir, filename_safe + '.mid')\n", (5152, 5188), False, 'import os\n'), ((5193, 5257), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Writing inferred midi file to %s"""', 'output_file'], {}), "('Writing inferred midi file to %s', output_file)\n", (5208, 5257), True, 'import tensorflow.compat.v1 as tf\n'), ((5262, 5331), 'note_seq.midi_io.sequence_proto_to_midi_file', 'midi_io.sequence_proto_to_midi_file', (['sequence_prediction', 'output_file'], {}), '(sequence_prediction, output_file)\n', (5297, 5331), False, 'from note_seq import midi_io\n'), ((5357, 5411), 'os.path.join', 'os.path.join', (['output_dir', "(filename_safe + '_label.mid')"], {}), "(output_dir, filename_safe + '_label.mid')\n", (5369, 5411), False, 'import os\n'), ((5416, 5483), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Writing label midi file to %s"""', 'label_output_file'], {}), "('Writing label midi file to %s', label_output_file)\n", (5431, 5483), True, 'import tensorflow.compat.v1 as tf\n'), ((5488, 5558), 'note_seq.midi_io.sequence_proto_to_midi_file', 'midi_io.sequence_proto_to_midi_file', (['sequence_label', 'label_output_file'], {}), '(sequence_label, label_output_file)\n', (5523, 5558), False, 'from note_seq import midi_io\n'), ((5658, 5716), 'os.path.join', 'os.path.join', (['output_dir', "(filename_safe + '_pianoroll.png')"], {}), "(output_dir, filename_safe + '_pianoroll.png')\n", (5670, 5716), False, 'import os\n'), ((5730, 5815), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Writing acoustic logit/label file to %s"""', 'pianoroll_output_file'], {}), "('Writing acoustic logit/label file to %s',\n pianoroll_output_file)\n", (5745, 5815), True, 'import tensorflow.compat.v1 as tf\n'), ((7364, 7375), 'time.time', 'time.time', ([], {}), '()\n', (7373, 7375), False, 'import time\n'), ((8150, 8162), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (8160, 8162), True, 'import tensorflow.compat.v1 as tf\n'), ((6308, 6355), 'tensorflow.compat.v1.gfile.GFile', 'tf.gfile.GFile', (['pianoroll_output_file'], {'mode': '"""w"""'}), "(pianoroll_output_file, mode='w')\n", (6322, 6355), True, 'import tensorflow.compat.v1 as tf\n'), ((8444, 8486), 'tensorflow.compat.v1.constant', 'tf.constant', (['run_config'], {'name': '"""run_config"""'}), "(run_config, name='run_config')\n", (8455, 8486), True, 'import tensorflow.compat.v1 as tf\n'), ((4478, 4496), 'numpy.sum', 'np.sum', (['num_frames'], {}), '(num_frames)\n', (4484, 4496), True, 'import numpy as np\n'), ((4499, 4518), 'numpy.sum', 'np.sum', (['infer_times'], {}), '(infer_times)\n', (4505, 4518), True, 'import numpy as np\n'), ((6408, 6595), 'magenta.models.onsets_frames_transcription.infer_util.posterior_pianoroll_image', 'infer_util.posterior_pianoroll_image', (["predictions['onset_probs']", "predictions['onset_labels']", "predictions['frame_probs']", "predictions['frame_labels']", 'sequence_frame_predictions'], {}), "(predictions['onset_probs'],\n predictions['onset_labels'], predictions['frame_probs'], predictions[\n 'frame_labels'], sequence_frame_predictions)\n", (6444, 6595), False, 'from magenta.models.onsets_frames_transcription import infer_util\n'), ((6999, 7051), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['histogram_name', 'all_metrics[k]'], {}), '(histogram_name, all_metrics[k])\n', (7019, 7051), True, 'import tensorflow.compat.v1 as tf\n'), ((7429, 7439), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (7437, 7439), True, 'import tensorflow.compat.v1 as tf\n'), ((7454, 7466), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (7464, 7466), True, 'import tensorflow.compat.v1 as tf\n'), ((7628, 7651), 'numpy.mean', 'np.mean', (['all_metrics[k]'], {}), '(all_metrics[k])\n', (7635, 7651), True, 'import numpy as np\n'), ((6163, 6202), 'magenta.models.onsets_frames_transcription.data.hparams_frames_per_second', 'data.hparams_frames_per_second', (['hparams'], {}), '(hparams)\n', (6193, 6202), False, 'from magenta.models.onsets_frames_transcription import data\n'), ((6748, 6758), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (6756, 6758), True, 'import tensorflow.compat.v1 as tf\n'), ((6773, 6785), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (6783, 6785), True, 'import tensorflow.compat.v1 as tf\n'), ((7212, 7235), 'numpy.mean', 'np.mean', (['all_metrics[k]'], {}), '(all_metrics[k])\n', (7219, 7235), True, 'import numpy as np\n')]
|
# utils.py
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import os
def extract_tls_info(s):
tls_key_list = ['C', 'ST', 'L', 'O', 'OU', 'CN', 'emailAddress', 'unknown', 'serialNumber']
s = s.split(',')
s = [x.split('/') for x in s]
s = sum(s, [])
res = {}
for x in s:
if '=' not in x:
continue
x = x.split('=')
key = x[0].strip(' ')
value = x[1].strip(' ')
if key in tls_key_list:
res[key] = value
return res
def process_oneHot_by_cnt(dataset, key, threshold=0, vcnt=None):
col = dataset[key]
if vcnt is None:
vcnt = col.value_counts()
if threshold>0:
if isinstance(threshold, int):
vcnt = vcnt[vcnt>=threshold]
else:
if isinstance(threshold, float):
threshold = len(dataset)*threshold
vcnt = vcnt[vcnt>=threshold]
else:
UserWarning("In function process_oneHot_by_cnt `threshold` should be of int or float type. ")
return dataset, None
dtype = np.uint8
for vkey in vcnt.index:
vkey_col = np.array(col==vkey, dtype=dtype)
dataset[key+str(vkey)] = vkey_col
return dataset, vcnt
def process_srcAddress(dataset):
'''process_srcAddress(统计每条数据的 srcAddress 在数据集中的出现次数并除以?,然后添加到数据信息中)
Args:
dataset 原始数据集
Returns:
dataset 添加 srcAddress_count 列后的数据集
'''
addr_list = list(dataset['srcAddress'].values)
count_list = [0]*len(addr_list)
for i in range(len(addr_list)):
count_list[i] = addr_list.count(addr_list[i])
count_list = [x for x in count_list]
dataset['srcAddress_count'] = count_list
return dataset
def process_destAddress(dataset):
'''process_destAddress(统计每条数据的 destAddress 在数据集中的出现次数并除以?,然后添加到数据信息中)
Args:
dataset 原始数据集
Returns:
dataset 添加 destAddress_count 列后的数据集
'''
addr_list = list(dataset['destAddress'].values)
count_list = [0]*len(addr_list)
for i in range(len(addr_list)):
count_list[i] = addr_list.count(addr_list[i])
dataset['destAddress_count'] = count_list
return dataset
def process_port(dataset, col_name):
'''process_port(将端口号除以?)
Args:
dataset 原始数据集
Returns:
dataset 更新端口值后的数据集
'''
# MAX = dataset[col_name].values.max()
# for idx in range(dataset.shape[0]):
# dataset.loc[idx, col_name] = dataset.loc[idx, col_name] * 1.0 / (MAX * 2)
# port_list = list(dataset[col_name].values)
# MAX = max(port_list)
# port_list = [x*1.0/(MAX*2) for x in port_list]
# dataset[col_name] = port_list
return dataset
def process_tlsVersion(dataset):
tls_list = list(dataset['tlsVersion'].values)
tls_dic = {'TLS 1.1': 0.0, 'TLS 1.3': 0.014012143858010275, 'TLSv1': 29.69283276450512, 'UNDETERMINED': 0.13636363636363638, 'TLS 1.2': 0.6491481374530754, 'other': 0.0}
tls_list = [tls_dic.get(x, tls_dic['other']) for x in tls_list]
dataset['tlsVersion'] = tls_list
return dataset
def process_tlsSni(dataset):
tlsSni_list = list(dataset['tlsSni'].values)
prefix = [''] * len(tlsSni_list)
postfix = [''] * len(tlsSni_list)
for idx in range(len(tlsSni_list)):
s = tlsSni_list[idx]
if s == "":
continue
s = s.strip('www.')
point_idx = s.find('.')
if point_idx < 0:
prefix[idx] = s
else:
prefix[idx] = s[:point_idx]
postfix[idx] = s[point_idx+1:]
onehotencoders = []
# prefix
onehotencoder = OneHotEncoder(categories='auto', sparse=False, dtype=np.int8, handle_unknown='ignore')
prefix_onehot = onehotencoder.fit_transform(np.array(prefix).reshape(-1,1))
dataset = pd.concat([dataset, pd.DataFrame(prefix_onehot)], axis=1)
onehotencoders.append({'tlsSni_prefix':onehotencoder})
# postfix
onehotencoder = OneHotEncoder(categories='auto', sparse=False, dtype=np.int8, handle_unknown='ignore')
postfix_onehot = onehotencoder.fit_transform(np.array(postfix).reshape(-1,1))
dataset = pd.concat([dataset, pd.DataFrame(postfix_onehot)], axis=1)
onehotencoders.append({'tlsSni_postfix':onehotencoder})
# remove tlsSni
# dataset = dataset.drop(['tlsSni'], axis=1)
return dataset, onehotencoders
def process_tlsIssuerDn(dataset):
tls_key_list = ['C', 'ST', 'L', 'O', 'OU', 'CN', 'emailAddress', 'unknown', 'serialNumber']
tlsSubject_list = list(dataset['tlsSubject'].values)
tlsIssuerDn_list = list(dataset['tlsIssuerDn'].values)
similarity = [0]*len(tlsIssuerDn_list)
for idx in range(len(tlsIssuerDn_list)):
subj = tlsSubject_list[idx]
issue = tlsIssuerDn_list[idx]
if subj == issue:
similarity[idx] = 1.0
continue
subj = extract_tls_info(subj)
issue = extract_tls_info(issue)
MAX = max([len(subj), len(issue)])
same = 0
for key in tls_key_list:
if subj.get(key, None) and subj.get(key, None) == issue.get(key, None):
same += 1
similarity[idx] = same*1.0 / MAX
dataset['tlsSimilarity'] = similarity
return dataset
def process_tlsSni_type(dataset):
tlsSni_list = list(dataset['tlsSni'].values)
postfix_type = [0] * len(tlsSni_list)
postfix_len = [0] * len(tlsSni_list)
prefix_type = [0] * len(tlsSni_list)
point_count = [0] * len(tlsSni_list)
middle_len = [0] * len(tlsSni_list)
total_len = [0] * len(tlsSni_list)
postfix = [""] * len(tlsSni_list)
for idx in range(len(tlsSni_list)):
s = tlsSni_list[idx]
if '.' in s:
res = s.split('.')
postfix[idx] = res[-1]
postfix_len[idx] = len(res[-1])
prefix_type[idx] = ('www' in res[0])*1
point_count[idx] = len(res)
if 'www' in res[0]:
middle_len[idx] = len(res[1])
else:
middle_len[idx] = len(res[0])
total_len[idx] = len(s)
# dic = {'': 4.1722663408674405, 'me': 5.454545454545454, 'local': 0.0, 'link': 40.0, 'website': 38, 'tv': 0.0, 'net': 0.31277150304083406, 'ms': 0.0, '2': 0.0, 'gdn': 44, 'xyz': 18.461538461538463, 'cc': 0.0, 'ga': 14, 'co': 0.0, 'sb': 33, 'cn': 0.0, 'org': 0.0, 'so': 0.0, '174': 0.0, 'ru': 1696.6666666666667, 'io': 0.18433179723502305, 'com': 0.3168887288440763, 'top': 899.9999999999999, 'im': 0.0, '108': 0.0, 'digit': 0.025, 'other': 0.5360824742268041}
# postfix_type = [dic['digit'] if x.isdigit() else dic.get(x, dic['other']) for x in postfix]
dataset['tlsSni_postfix_type'] = postfix
dataset['tlsSni_postfix_len'] = postfix_len
dataset['tlsSni_prefix_type'] = prefix_type
dataset['tlsSni_point_count'] = point_count
dataset['tlsSni_middle_len'] = middle_len
dataset['tlsSni_total_len'] = total_len
return dataset
def process_tlsSubject_len(dataset):
tlsSubject_list = list(dataset['tlsSubject'].values)
tls_key_list = ['C', 'ST', 'L', 'O', 'OU', 'CN', 'emailAddress', 'unknown', 'serialNumber']
tls_info_len = [[0]*len(tlsSubject_list) for _ in range(len(tls_key_list)+3)]
for idx in range(len(tlsSubject_list)):
tmp = extract_tls_info(tlsSubject_list[idx])
for key_idx in range(len(tls_key_list)):
key = tls_key_list[key_idx]
tls_info_len[key_idx][idx] = len(tmp.get(key, ''))
if tls_info_len[key_idx][idx] != 0:
tls_info_len[-2][idx] += 1
if tmp == {}:
tls_info_len[-1][idx] = 1
tls_info_len[-3][idx] = len(tlsSubject_list[idx])
for key_idx in range(len(tls_key_list)):
key = tls_key_list[key_idx]
dataset[key+"_len"] = tls_info_len[key_idx]
dataset['tlsSubject_total_len'] = tls_info_len[-3]
dataset['tlsSubject_type_count'] = tls_info_len[-2]
dataset['tlsSubject_empty'] = tls_info_len[-1]
return dataset
def process_tlsSubject_other(dataset):
tlsSubject_list = list(dataset['tlsSubject'].values)
tls_XX = [0]*len(tlsSubject_list)
tls_star = [0]*len(tlsSubject_list)
tls_default = [0]*len(tlsSubject_list)
tls_some_state = [0]*len(tlsSubject_list)
for idx in range(len(tlsSubject_list)):
tmp = extract_tls_info(tlsSubject_list[idx])
for key, value in tmp.items():
if value=='XX':
tls_XX[idx] = 1
elif value=='*':
tls_star[idx] = 1
elif 'default' in value.lower():
tls_default[idx] = 1
elif 'Some-State' in value:
tls_some_state[idx] = 1
dataset['tls_XX'] = tls_XX
dataset['tls_star'] = tls_star
dataset['tls_default'] = tls_default
dataset['tls_some_state'] = tls_some_state
return dataset
def process_bytes(dataset):
bytesout_list = list(dataset['bytesOut'].values)
bytesin_list = list(dataset['bytesIn'].values)
pktin_list = list(dataset['pktsIn'].values)
pktout_list = list(dataset['pktsOut'].values)
bytesin_rate = [0] * len(bytesout_list)
bytesout_rate = [0] * len(bytesout_list)
for idx in range(len(bytesout_list)):
if pktout_list[idx] > 0:
bytesout_rate[idx] = bytesout_list[idx] / pktout_list[idx]
else:
bytesout_rate[idx] = 1000000
if pktin_list[idx] > 0:
bytesin_rate[idx] = bytesin_list[idx] / pktin_list[idx]
else:
bytesin_rate[idx] = 1000000
dataset['tls_bytesin_rate'] = bytesin_rate
dataset['tls_bytesout_rate'] = bytesout_rate
return dataset
def process_tlsSubject_type(dataset):
tlsSubject_list = list(dataset['tlsSubject'].values)
''' C_type, CN_type '''
# C_dic = {'': 0.3878787878787879, 'XX': 750, '--': 0.0, 'DE': 1.1764705882352942, 'DK': 0.0, 'JP': 0.0, 'CNstore': 0.0, 'CN': 0.017035775127768313, 'US': 0.6221461187214612, 'AU': 1046.0, 'MY': 0.0, 'GB': 540.0, 'other': 18.125000000000007}
# CN_dic = {'': 1.851012390450287, 'CMCC': 0.0, '1': 0.0, 'svn': 0.0, 'me': 20.909090909090907, 'org': 0.1234567901234568, 'localdomain': 0.0, 'link': 8, '*': 82, 'top': 31, 'net': 2.0, 'ms': 0.0, 'DBAPP': 0.0, 'info': 20, 'local': 0.0, 'XX': 17, 'sb': 33, 'sslvpn': 0.0, 'cn': 0.006082725060827251, 'io': 0.10695187165775401, '0': 0.0, 'com': 0.29000969932104753, 'im': 0.0, 'other': 8.768115942028999}
C_list = []
CN_list = []
ST_list = []
L_list = []
O_list = []
emailAddress_list = []
serialNumber_list = []
# tls_key_list = ['C', 'ST', 'L', 'O', 'OU', 'CN', 'emailAddress', 'unknown', 'serialNumber']
# tls_key_list = ['ST', 'L', 'O', 'emailAddress', 'serialNumber']
for idx in range(len(tlsSubject_list)):
tmp = extract_tls_info(tlsSubject_list[idx])
C_list.append(tmp.get('C', ''))
CN_list.append(tmp.get('CN', '').split('.')[-1])
ST_list.append(tmp.get('ST', ''))
L_list.append(tmp.get('L', ''))
O_list.append(tmp.get('O', ''))
emailAddress_list.append(tmp.get('emailAddress', ''))
serialNumber_list.append(tmp.get('serialNumber', ''))
# C_type = [C_dic.get(x, C_dic['other']) for x in C_list]
# CN_type = [CN_dic.get(x, CN_dic['other']) for x in CN_list]
dataset['tls_C_type'] = C_list
dataset['tls_CN_type'] = CN_list
dataset['ST'] = ST_list
dataset['L'] = L_list
dataset['O'] = O_list
dataset['emailAddress'] = emailAddress_list
dataset['serialNumber'] = serialNumber_list
return dataset
def drop_repeat_rows(dataset):
# return dataset.drop_duplicates([x for x in dataset.columns if x!='eventId'], keep='first')
return dataset
def process_port_adjacency(dataset):
idx = range(dataset.shape[0])
dataset['idx'] = idx
ips_label = dataset[['srcAddress', 'destAddress', 'srcPort', 'idx']]
ips_label = ips_label.sort_values(by = ['srcAddress', 'destAddress', 'srcPort'])
ips_label = list(ips_label.values)
port_adjacency = [0] * dataset.shape[0]
for idx in range(dataset.shape[0]):
cur = list(ips_label[idx])
if idx == dataset.shape[0] - 1:
next = ['', '', 0]
else:
next = list(ips_label[idx+1])
if idx == 0:
before = ['', '', 0]
else:
before = list(ips_label[idx-1])
min_ = 1000000
if cur[0] == before[0] and cur[1] == before[1]:
min_ = cur[2] - before[2]
if cur[0] == next[0] and cur[1] == next[1]:
tmp = next[2] - cur[2]
if tmp < min_:
min_ = tmp
if min_ != 1000000:
port_adjacency[cur[-1]] = min_
else:
port_adjacency[cur[-1]] = 350
dataset['srcPort_adjacency'] = port_adjacency
dataset = dataset.drop(['idx'], axis=1)
return dataset
def ExtractTlsSubject(data, onehotencoders=None, handle_key='tlsSubject'):
tls_key_list = ['C', 'ST', 'L', 'O', 'OU', 'CN']
#tls_key_hash_len = [65536, 65536, 65536, 65536, 65536, 65536]
for ncol in tls_key_list:
data[ncol] = ''
for idx, row in enumerate(data.iterrows()):
tlsSubject_str = row[1][handle_key]
if tlsSubject_str=='':
continue
tlsSubject_list = tlsSubject_str.split(',')
tlsSubject_list = [item.split('/') for item in tlsSubject_list]
tlsSubject_list = sum(tlsSubject_list, [])
i=0
while i < len(tlsSubject_list):
if ('=' not in tlsSubject_list[i]):
if i==0:
del tlsSubject_list[0]
tlsSubject_list[i-1] += tlsSubject_list[i]
del tlsSubject_list[i]
else:
tlsSubject_list[i] = tlsSubject_list[i].strip(' ')
i += 1
tlsSubject_list = [item.split('=') for item in tlsSubject_list]
try:
for key, value in tlsSubject_list:
if key in tls_key_list:
data.loc[idx, key] = value
except:
pass
if not (onehotencoders is None):
for key in tls_key_list:
x = onehotencoders[key].transform(data[key].to_numpy().reshape(-1,1))
data = pd.concat([data, pd.DataFrame(x)], axis=1)
else:
onehotencoders = {}
for key in tls_key_list:
onehotencoder = OneHotEncoder(categories='auto', sparse=False, dtype=np.int8, handle_unknown='ignore')
x = onehotencoder.fit_transform(data[key].to_numpy().reshape(-1,1))
data = pd.concat([data, pd.DataFrame(x)], axis=1)
onehotencoders[key] = onehotencoder
return data, onehotencoders
def ELFHash(str):
hvalue = 0
for i in str:
hvalue = (hvalue<<4) + ord(i)
if (hvalue & 0xF0000) != 0:
x = hvalue & 0xF0000
hvalue ^= (x >> 10)
hvalue &= ~x
hvalue &= 0xFFFF
return hvalue
def ProcessData(dataset, encoders=None, keyList=None):
keyList = {
# 'srcPort': 16,
'destPort': 1/1100,
'tlsVersion': 0,
'tlsSni_postfix_type': 5/13200,
'tls_C_type': 5/13200,
'tls_CN_type': 5/13200,
'srcAddress': 1.0,
'destAddress': 1.0,
'ST':5/13200,
'L':5/13200,
'O':5/13200,
'emailAddress': 5/13200,
'serialNumber': 5/13200
} if keyList==None else keyList
onhot_list = ['destPort', 'tlsVersion', 'tlsSni_postfix_type', 'tls_C_type', 'tls_CN_type'] + ['ST', 'L', 'O']
# , 'emailAddress', 'serialNumber'
if encoders==None:
dataset = process_tlsSni_type(dataset)
dataset = process_tlsSubject_type(dataset)
encoders = []
for key in onhot_list:
dataset, a_encoder = process_oneHot_by_cnt(dataset, key, keyList[key])
encoders.append(a_encoder)
dataset = process_port_adjacency(dataset)
dataset = process_tlsIssuerDn(dataset)
dataset = process_tlsSubject_len(dataset)
dataset = process_bytes(dataset)
else:
dataset = process_tlsSni_type(dataset)
dataset = process_tlsSubject_type(dataset)
for idx, key in enumerate(onhot_list):
dataset, _ = process_oneHot_by_cnt(dataset, key, keyList[key], vcnt=encoders[idx])
dataset = process_port_adjacency(dataset)
dataset = process_tlsIssuerDn(dataset)
dataset = process_tlsSubject_len(dataset)
dataset = process_bytes(dataset)
return dataset, encoders
|
[
"pandas.DataFrame",
"sklearn.preprocessing.OneHotEncoder",
"numpy.array"
] |
[((3711, 3801), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""', 'sparse': '(False)', 'dtype': 'np.int8', 'handle_unknown': '"""ignore"""'}), "(categories='auto', sparse=False, dtype=np.int8,\n handle_unknown='ignore')\n", (3724, 3801), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((4043, 4133), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""', 'sparse': '(False)', 'dtype': 'np.int8', 'handle_unknown': '"""ignore"""'}), "(categories='auto', sparse=False, dtype=np.int8,\n handle_unknown='ignore')\n", (4056, 4133), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1220, 1254), 'numpy.array', 'np.array', (['(col == vkey)'], {'dtype': 'dtype'}), '(col == vkey, dtype=dtype)\n', (1228, 1254), True, 'import numpy as np\n'), ((3912, 3939), 'pandas.DataFrame', 'pd.DataFrame', (['prefix_onehot'], {}), '(prefix_onehot)\n', (3924, 3939), True, 'import pandas as pd\n'), ((4246, 4274), 'pandas.DataFrame', 'pd.DataFrame', (['postfix_onehot'], {}), '(postfix_onehot)\n', (4258, 4274), True, 'import pandas as pd\n'), ((14512, 14602), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""', 'sparse': '(False)', 'dtype': 'np.int8', 'handle_unknown': '"""ignore"""'}), "(categories='auto', sparse=False, dtype=np.int8,\n handle_unknown='ignore')\n", (14525, 14602), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((3846, 3862), 'numpy.array', 'np.array', (['prefix'], {}), '(prefix)\n', (3854, 3862), True, 'import numpy as np\n'), ((4179, 4196), 'numpy.array', 'np.array', (['postfix'], {}), '(postfix)\n', (4187, 4196), True, 'import numpy as np\n'), ((14387, 14402), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (14399, 14402), True, 'import pandas as pd\n'), ((14715, 14730), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (14727, 14730), True, 'import pandas as pd\n')]
|
import argparse
import os
import sys
from sys import stdout
import mdtraj as md
import numpy as np
import parmed
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as unit
from openforcefield.topology import Molecule, Topology
from openmmforcefields.generators import SystemGenerator
from perses.utils.openeye import OEMol_to_omm_ff, createOEMolFromSDF
from simtk.openmm import MonteCarloBarostat, XmlSerializer
from simtk.openmm.app import CheckpointReporter, ForceField, PDBFile
from simtk.openmm.app.pdbreporter import PDBReporter
from simtk.openmm.app.statedatareporter import StateDataReporter
# Read arguments to get ligand
parser = argparse.ArgumentParser()
parser.add_argument(
"-ligand",
help="the docked ligand to be prepared for simulation",
choices=["larotrectinib", "selitrectinib", "repotrectinib"],
type=str,
)
args = parser.parse_args()
chosen_ligand = args.ligand
# Parameters
print("--> Reading parameters")
pressure = 1.0 * unit.bar
temperature = 300 * unit.kelvin
nonbonded_method = app.PME
constraints = app.HBonds
remove_cm_motion = True
collision_rate = 1.0 / unit.picoseconds
timestep = 0.002 * unit.picoseconds
solvent_padding = 10.0 * unit.angstrom
ionic_strength = 150 * unit.millimolar
# Forcefield
protein_forcefield = "amber14/protein.ff14SB.xml"
small_molecule_forcefield = "openff-1.1.0"
solvation_forcefield = "amber14/tip3p.xml"
forcefield = ForceField(protein_forcefield, solvation_forcefield)
# Set steps and frequencies
nsteps = 2500000 # 5 ns
report_freq = 100
chk_freq = 500
traj_freq = 1000 # 2500 frames
# Set the input file names
input_pdb = "6KZD_prepped.pdb"
input_ligands_sdf = "../../structures_from_docking/6KZD_chemgauss_docking.sdf"
# Create output directory
output_prefix = "./output/" + chosen_ligand
os.makedirs(output_prefix, exist_ok=True)
print("--> Directory ", output_prefix, " created ")
# Set file names
integrator_xml_filename = "integrator_2fs.xml"
state_xml_filename = "equilibrated_state_5ns.xml"
state_pdb_filename = "equilibrated_state_5ns.pdb"
system_xml_filename = "equilibrated_system_5ns.xml"
checkpoint_filename = "equilibrated_checkpoint_5ns.chk"
traj_output_filename = "equilibrated_traj_5ns.xtc"
# Define the barostat for the system
barostat = mm.MonteCarloBarostat(pressure, temperature)
# Load and sort ligands
molecules = Molecule.from_file(input_ligands_sdf)
ligand_names = ["larotrectinib", "selitrectinib", "repotrectinib"]
ligand_dict = dict(zip(ligand_names, molecules)) # Create dict for easy access later
# Make the SystemGenerator
system_generator = SystemGenerator(
forcefields=[protein_forcefield, solvation_forcefield],
barostat=barostat,
periodic_forcefield_kwargs={"nonbondedMethod": app.PME},
small_molecule_forcefield=small_molecule_forcefield,
molecules=ligand_dict[chosen_ligand],
)
# Read in the PDB and create an OpenMM topology
pdbfile = app.PDBFile(input_pdb)
protein_topology, protein_positions = pdbfile.topology, pdbfile.positions
# Add ligand to topology - credit to @hannahbrucemacdonald for help here
print("--> Combining protein and ligand topologies")
off_ligand_topology = Topology.from_molecules(ligand_dict[chosen_ligand])
ligand_topology = off_ligand_topology.to_openmm()
ligand_positions = ligand_dict[chosen_ligand].conformers[0]
md_protein_topology = md.Topology.from_openmm(
protein_topology
) # using mdtraj for protein top
md_ligand_topology = md.Topology.from_openmm(
ligand_topology
) # using mdtraj for ligand top
md_complex_topology = md_protein_topology.join(md_ligand_topology) # add them together
complex_topology = md_complex_topology.to_openmm() # now back to openmm
total_atoms = len(protein_positions) + len(ligand_positions)
complex_positions = unit.Quantity(np.zeros([total_atoms, 3]), unit=unit.nanometers)
complex_positions[0 : len(protein_positions)] = protein_positions
for index, atom in enumerate(ligand_positions, len(protein_positions)):
coords = atom / atom.unit
complex_positions[index] = (
coords / 10.0
) * unit.nanometers # since openmm works in nm
# Add hydrogens and solvate the system
modeller = app.Modeller(complex_topology, complex_positions)
print("Adding hydrogens to the system...")
modeller.addHydrogens(system_generator.forcefield)
print("Solvating the system...")
modeller.addSolvent(
forcefield=system_generator.forcefield,
model="tip3p",
ionicStrength=ionic_strength,
padding=solvent_padding,
)
# Create an OpenMM system
print("--> Creating an OpenMM system")
system = system_generator.create_system(modeller.topology)
# Make and serialize integrator - Langevin dynamics
print(
"Serializing integrator to %s"
% os.path.join(output_prefix, integrator_xml_filename)
)
integrator = mm.LangevinIntegrator(
temperature, collision_rate, timestep # Friction coefficient
)
with open(os.path.join(output_prefix, integrator_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(integrator)
outfile.write(xml)
# Define the platform to use; CUDA, OpenCL, CPU, or Reference. Or do not specify
# the platform to use the default (fastest) platform
# platform = mm.Platform.getPlatformByName("OpenCL")
# prop = dict(OpenCLPrecision="mixed") # Use mixed single/double precision
# Create the Simulation object
sim = app.Simulation(modeller.topology, system, integrator) # , platform, prop)
# Set the particle positions
sim.context.setPositions(modeller.positions)
# Minimize the energy
print("--> Minimising energy with docked ligand: " + chosen_ligand)
print(
" initial : %8.3f kcal/mol"
% (
sim.context.getState(getEnergy=True).getPotentialEnergy()
/ unit.kilocalories_per_mole
)
)
sim.minimizeEnergy()
print(
" final : %8.3f kcal/mol"
% (
sim.context.getState(getEnergy=True).getPotentialEnergy()
/ unit.kilocalories_per_mole
)
)
# set starting velocities:
print("--> Generating random starting velocities")
sim.context.setVelocitiesToTemperature(temperature * unit.kelvin)
# write limited state information to standard out:
sim.reporters.append(
StateDataReporter(
stdout,
reportInterval=report_freq,
step=True,
time=True,
potentialEnergy=True,
kineticEnergy=True,
temperature=True,
speed=True,
progress=True,
remainingTime=True,
totalSteps=nsteps,
separator="\t",
)
)
# Write to checkpoint files regularly:
sim.reporters.append(
CheckpointReporter(
file=os.path.join(output_prefix, checkpoint_filename), reportInterval=chk_freq
)
)
# Write out the trajectory
sim.reporters.append(
md.reporters.XTCReporter(
file=os.path.join(output_prefix, traj_output_filename), reportInterval=traj_freq
)
)
# Run NPT dynamics
print("--> Running dynamics in the NPT ensemble for the 6KZD:" + chosen_ligand + " complex")
sim.step(nsteps)
# Save and serialize the final state
print("--> Serializing state to %s" % os.path.join(output_prefix, state_xml_filename))
state = sim.context.getState(
getPositions=True, getVelocities=True, getEnergy=True, getForces=True
)
with open(os.path.join(output_prefix, state_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(state)
outfile.write(xml)
# Save the final state as a PDB
print("--> Saving final state as %s" % os.path.join(output_prefix, state_pdb_filename))
with open(os.path.join(output_prefix, state_pdb_filename), "w") as outfile:
PDBFile.writeFile(
sim.topology,
sim.context.getState(getPositions=True, enforcePeriodicBox=True).getPositions(),
file=outfile,
keepIds=True,
)
# Save and serialize system
print("--> Serializing system to %s" % os.path.join(output_prefix, system_xml_filename))
system.setDefaultPeriodicBoxVectors(*state.getPeriodicBoxVectors())
with open(os.path.join(output_prefix, system_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(system)
outfile.write(xml)
|
[
"openforcefield.topology.Molecule.from_file",
"os.makedirs",
"argparse.ArgumentParser",
"openmmforcefields.generators.SystemGenerator",
"simtk.openmm.app.Simulation",
"os.path.join",
"openforcefield.topology.Topology.from_molecules",
"numpy.zeros",
"simtk.openmm.LangevinIntegrator",
"simtk.openmm.app.PDBFile",
"simtk.openmm.app.statedatareporter.StateDataReporter",
"simtk.openmm.MonteCarloBarostat",
"simtk.openmm.XmlSerializer.serialize",
"simtk.openmm.app.Modeller",
"mdtraj.Topology.from_openmm",
"simtk.openmm.app.ForceField"
] |
[((665, 690), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (688, 690), False, 'import argparse\n'), ((1420, 1472), 'simtk.openmm.app.ForceField', 'ForceField', (['protein_forcefield', 'solvation_forcefield'], {}), '(protein_forcefield, solvation_forcefield)\n', (1430, 1472), False, 'from simtk.openmm.app import CheckpointReporter, ForceField, PDBFile\n'), ((1801, 1842), 'os.makedirs', 'os.makedirs', (['output_prefix'], {'exist_ok': '(True)'}), '(output_prefix, exist_ok=True)\n', (1812, 1842), False, 'import os\n'), ((2268, 2312), 'simtk.openmm.MonteCarloBarostat', 'mm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (2289, 2312), True, 'import simtk.openmm as mm\n'), ((2350, 2387), 'openforcefield.topology.Molecule.from_file', 'Molecule.from_file', (['input_ligands_sdf'], {}), '(input_ligands_sdf)\n', (2368, 2387), False, 'from openforcefield.topology import Molecule, Topology\n'), ((2588, 2840), 'openmmforcefields.generators.SystemGenerator', 'SystemGenerator', ([], {'forcefields': '[protein_forcefield, solvation_forcefield]', 'barostat': 'barostat', 'periodic_forcefield_kwargs': "{'nonbondedMethod': app.PME}", 'small_molecule_forcefield': 'small_molecule_forcefield', 'molecules': 'ligand_dict[chosen_ligand]'}), "(forcefields=[protein_forcefield, solvation_forcefield],\n barostat=barostat, periodic_forcefield_kwargs={'nonbondedMethod': app.\n PME}, small_molecule_forcefield=small_molecule_forcefield, molecules=\n ligand_dict[chosen_ligand])\n", (2603, 2840), False, 'from openmmforcefields.generators import SystemGenerator\n'), ((2909, 2931), 'simtk.openmm.app.PDBFile', 'app.PDBFile', (['input_pdb'], {}), '(input_pdb)\n', (2920, 2931), True, 'import simtk.openmm.app as app\n'), ((3155, 3206), 'openforcefield.topology.Topology.from_molecules', 'Topology.from_molecules', (['ligand_dict[chosen_ligand]'], {}), '(ligand_dict[chosen_ligand])\n', (3178, 3206), False, 'from openforcefield.topology import Molecule, Topology\n'), ((3340, 3381), 'mdtraj.Topology.from_openmm', 'md.Topology.from_openmm', (['protein_topology'], {}), '(protein_topology)\n', (3363, 3381), True, 'import mdtraj as md\n'), ((3441, 3481), 'mdtraj.Topology.from_openmm', 'md.Topology.from_openmm', (['ligand_topology'], {}), '(ligand_topology)\n', (3464, 3481), True, 'import mdtraj as md\n'), ((4152, 4201), 'simtk.openmm.app.Modeller', 'app.Modeller', (['complex_topology', 'complex_positions'], {}), '(complex_topology, complex_positions)\n', (4164, 4201), True, 'import simtk.openmm.app as app\n'), ((4772, 4832), 'simtk.openmm.LangevinIntegrator', 'mm.LangevinIntegrator', (['temperature', 'collision_rate', 'timestep'], {}), '(temperature, collision_rate, timestep)\n', (4793, 4832), True, 'import simtk.openmm as mm\n'), ((5318, 5371), 'simtk.openmm.app.Simulation', 'app.Simulation', (['modeller.topology', 'system', 'integrator'], {}), '(modeller.topology, system, integrator)\n', (5332, 5371), True, 'import simtk.openmm.app as app\n'), ((3776, 3802), 'numpy.zeros', 'np.zeros', (['[total_atoms, 3]'], {}), '([total_atoms, 3])\n', (3784, 3802), True, 'import numpy as np\n'), ((4954, 4992), 'simtk.openmm.XmlSerializer.serialize', 'mm.XmlSerializer.serialize', (['integrator'], {}), '(integrator)\n', (4980, 4992), True, 'import simtk.openmm as mm\n'), ((6119, 6344), 'simtk.openmm.app.statedatareporter.StateDataReporter', 'StateDataReporter', (['stdout'], {'reportInterval': 'report_freq', 'step': '(True)', 'time': '(True)', 'potentialEnergy': '(True)', 'kineticEnergy': '(True)', 'temperature': '(True)', 'speed': '(True)', 'progress': '(True)', 'remainingTime': '(True)', 'totalSteps': 'nsteps', 'separator': '"""\t"""'}), "(stdout, reportInterval=report_freq, step=True, time=True,\n potentialEnergy=True, kineticEnergy=True, temperature=True, speed=True,\n progress=True, remainingTime=True, totalSteps=nsteps, separator='\\t')\n", (6136, 6344), False, 'from simtk.openmm.app.statedatareporter import StateDataReporter\n'), ((7247, 7280), 'simtk.openmm.XmlSerializer.serialize', 'mm.XmlSerializer.serialize', (['state'], {}), '(state)\n', (7273, 7280), True, 'import simtk.openmm as mm\n'), ((7958, 7992), 'simtk.openmm.XmlSerializer.serialize', 'mm.XmlSerializer.serialize', (['system'], {}), '(system)\n', (7984, 7992), True, 'import simtk.openmm as mm\n'), ((4704, 4756), 'os.path.join', 'os.path.join', (['output_prefix', 'integrator_xml_filename'], {}), '(output_prefix, integrator_xml_filename)\n', (4716, 4756), False, 'import os\n'), ((4873, 4925), 'os.path.join', 'os.path.join', (['output_prefix', 'integrator_xml_filename'], {}), '(output_prefix, integrator_xml_filename)\n', (4885, 4925), False, 'import os\n'), ((7006, 7053), 'os.path.join', 'os.path.join', (['output_prefix', 'state_xml_filename'], {}), '(output_prefix, state_xml_filename)\n', (7018, 7053), False, 'import os\n'), ((7171, 7218), 'os.path.join', 'os.path.join', (['output_prefix', 'state_xml_filename'], {}), '(output_prefix, state_xml_filename)\n', (7183, 7218), False, 'import os\n'), ((7376, 7423), 'os.path.join', 'os.path.join', (['output_prefix', 'state_pdb_filename'], {}), '(output_prefix, state_pdb_filename)\n', (7388, 7423), False, 'import os\n'), ((7435, 7482), 'os.path.join', 'os.path.join', (['output_prefix', 'state_pdb_filename'], {}), '(output_prefix, state_pdb_filename)\n', (7447, 7482), False, 'import os\n'), ((7753, 7801), 'os.path.join', 'os.path.join', (['output_prefix', 'system_xml_filename'], {}), '(output_prefix, system_xml_filename)\n', (7765, 7801), False, 'import os\n'), ((7881, 7929), 'os.path.join', 'os.path.join', (['output_prefix', 'system_xml_filename'], {}), '(output_prefix, system_xml_filename)\n', (7893, 7929), False, 'import os\n'), ((6541, 6589), 'os.path.join', 'os.path.join', (['output_prefix', 'checkpoint_filename'], {}), '(output_prefix, checkpoint_filename)\n', (6553, 6589), False, 'import os\n'), ((6716, 6765), 'os.path.join', 'os.path.join', (['output_prefix', 'traj_output_filename'], {}), '(output_prefix, traj_output_filename)\n', (6728, 6765), False, 'import os\n')]
|
import h5py as h5
import feather
import pandas as pd
import numpy as np
import os
correlation_files = os.listdir("correlation_folder")
for i in range(0, len(correlation_files)):
print(i)
correlation = pd.read_feather("correlation_folder/"+correlation_files[i])
f = h5.File("h5/"+correlation_files[i].replace(".f","")+".h5", "w")
dset = f.create_dataset("data/correlation", correlation.shape, dtype=np.float16, chunks=(1,correlation.shape[0]))
dset[:,:] = correlation
genemeta = f.create_dataset("meta/genes", data=np.array(list(map(str.upper, correlation.columns)), dtype='S10'), dtype='S10')
f.close()
def load_correlation(gene):
f = h5.File("h5/correlation_2.h5", "r")
genes = np.array(f["meta/genes"]).astype(np.str)
idx = list(genes).index(gene)
cor = np.array(f["data/correlation"][:,idx]).astype(np.float64)
f.close()
return(cor)
start = time.time()
coco = load_correlation("SOX2")
print(time.time() - start)
import h5py as h5
import s3fs
import numpy as np
import time
from multiprocessing import Process
import random
def loadGenesS3():
genes = 0
s3 = s3fs.S3FileSystem(anon=True)
with h5.File(s3.open("s3://mssm-prismx/correlation_0.h5", 'rb'), 'r', lib_version='latest') as f:
genes = np.array(f["meta/genes"]).astype(np.str)
return genes
def load_correlationS3(gene, genes, cormat, results):
cor = 0
s3 = s3fs.S3FileSystem(anon=True)
with h5.File(s3.open("s3://mssm-prismx/correlation_"+str(cormat)+".h5", 'rb'), 'r', lib_version='latest') as f:
idx = list(genes).index(gene)
cor = np.array(f["data/correlation"][idx,:]).astype(np.float64)
results[cormat] = cor
genes = loadGenesS3()
start = time.time()
coco = load_correlationS3("MAPK1", genes)
print(time.time() - start)
from multiprocessing.pool import ThreadPool as Pool
import pandas as pd
start = time.time()
pool = Pool(1)
cormats = list(range(0,50))
cormats.append("global")
results = pd.DataFrame(np.zeros(shape=(len(genes), len(cormats))), columns=cormats)
for i in cormats:
pool.apply_async(load_correlationS3, ("P53", genes, i, results))
pool.close()
pool.join()
print(time.time() - start)
start = time.time()
pool = Pool(10)
results = pd.DataFrame(np.zeros(shape=(len(genes), 20)), columns=genes[1000:1020])
for gene in genes[1000:1010]:
results[gene] = pool.apply_async(load_correlationS3, (gene, genes,)).get()
pool.close()
pool.join()
print(time.time() - start)
start = time.time()
for gene in genes[2000:2050]:
load_correlationS3(gene, genes, results)
print(time.time() - start)
f = h5.File("h5/correlation_0.h5", "r")
genes = np.array(f["meta/genes"]).astype(np.str)
f.close()
idx = list(genes).index("0610009L18")
print(idx)
list(genes).index('0610009L18')
f = h5.File("h5/correlation_0.h5", "r")
genes = np.array(f["meta/genes"]).astype(np.str)
f.close()
f = h5.File("h5/correlation_0.h5", "w")
dset = f.create_dataset("data/correlation", correlation.shape, dtype=np.float16, chunks=(1,correlation.shape[0]))
dset[:,:] = correlation
genemeta = f.create_dataset("meta/genes", data=np.array(list(map(str.upper, correlation.columns)), dtype='S10'), dtype='S10')
f.close()
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
t1 = np.arange(0, 8, 0.001)
s1 = np.sin(t1) + 1.5
s2 = np.sin(t1*6)/5
s3 = s1+s2-3.5
g1 = np.sin(t1+np.pi/2) + 1.5
g2 = np.sin(t1*6)/5
g3 = g1+g2-3.5
plt.plot(t1, s1, label="low frequency")
plt.plot(t1, s2, label="high frequency")
plt.plot(t1, s3, label="combined frequency")
plt.legend()
plt.title("gene A")
#plt.show()
plt.savefig("genea.png")
plt.close()
plt.plot(t1, g1, label="low frequency")
plt.plot(t1, g2, label="high frequency")
plt.plot(t1, g3, label="combined frequency")
plt.legend()
plt.title("gene B")
#plt.show()
plt.savefig("geneb.png")
plt.close()
plt.plot(t1, s3+3.5, label="gene A")
plt.plot(t1, g3+3.5, label="gene B")
plt.legend()
plt.title("full spectrum gene similarity")
#plt.show()
plt.savefig("fullspectrum.png")
plt.close()
plt.plot(t1, s2, label="gene A")
plt.plot(t1, g2, label="gene B")
plt.legend()
plt.title("high frequency spectrum gene similarity")
#plt.show()
plt.savefig("highspectrum.png")
plt.close()
np.corrcoef(s3,g3)
k1 = list(s3[4000:8000])+list(s3[0:4000])
k2 = list(g3[4000:8000])+list(g3[0:4000])
plt.plot(t1, np.array(k1)+3.5, label="gene A")
plt.plot(t1, np.array(k2)+3.5, label="gene B")
plt.legend()
plt.title("shuffled spectrum gene similarity")
#plt.show()
plt.savefig("shufflespectrum.png")
plt.close()
|
[
"pandas.read_feather",
"os.listdir",
"matplotlib.pyplot.savefig",
"numpy.corrcoef",
"s3fs.S3FileSystem",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"h5py.File",
"multiprocessing.pool.ThreadPool",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.sin",
"matplotlib.pyplot.title",
"time.time",
"numpy.arange"
] |
[((103, 135), 'os.listdir', 'os.listdir', (['"""correlation_folder"""'], {}), "('correlation_folder')\n", (113, 135), False, 'import os\n'), ((901, 912), 'time.time', 'time.time', ([], {}), '()\n', (910, 912), False, 'import time\n'), ((1728, 1739), 'time.time', 'time.time', ([], {}), '()\n', (1737, 1739), False, 'import time\n'), ((1894, 1905), 'time.time', 'time.time', ([], {}), '()\n', (1903, 1905), False, 'import time\n'), ((1913, 1920), 'multiprocessing.pool.ThreadPool', 'Pool', (['(1)'], {}), '(1)\n', (1917, 1920), True, 'from multiprocessing.pool import ThreadPool as Pool\n'), ((2209, 2220), 'time.time', 'time.time', ([], {}), '()\n', (2218, 2220), False, 'import time\n'), ((2228, 2236), 'multiprocessing.pool.ThreadPool', 'Pool', (['(10)'], {}), '(10)\n', (2232, 2236), True, 'from multiprocessing.pool import ThreadPool as Pool\n'), ((2491, 2502), 'time.time', 'time.time', ([], {}), '()\n', (2500, 2502), False, 'import time\n'), ((2612, 2647), 'h5py.File', 'h5.File', (['"""h5/correlation_0.h5"""', '"""r"""'], {}), "('h5/correlation_0.h5', 'r')\n", (2619, 2647), True, 'import h5py as h5\n'), ((2796, 2831), 'h5py.File', 'h5.File', (['"""h5/correlation_0.h5"""', '"""r"""'], {}), "('h5/correlation_0.h5', 'r')\n", (2803, 2831), True, 'import h5py as h5\n'), ((2899, 2934), 'h5py.File', 'h5.File', (['"""h5/correlation_0.h5"""', '"""w"""'], {}), "('h5/correlation_0.h5', 'w')\n", (2906, 2934), True, 'import h5py as h5\n'), ((3287, 3309), 'numpy.arange', 'np.arange', (['(0)', '(8)', '(0.001)'], {}), '(0, 8, 0.001)\n', (3296, 3309), True, 'import numpy as np\n'), ((3436, 3475), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 's1'], {'label': '"""low frequency"""'}), "(t1, s1, label='low frequency')\n", (3444, 3475), True, 'import matplotlib.pyplot as plt\n'), ((3476, 3516), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 's2'], {'label': '"""high frequency"""'}), "(t1, s2, label='high frequency')\n", (3484, 3516), True, 'import matplotlib.pyplot as plt\n'), ((3517, 3561), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 's3'], {'label': '"""combined frequency"""'}), "(t1, s3, label='combined frequency')\n", (3525, 3561), True, 'import matplotlib.pyplot as plt\n'), ((3562, 3574), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3572, 3574), True, 'import matplotlib.pyplot as plt\n'), ((3575, 3594), 'matplotlib.pyplot.title', 'plt.title', (['"""gene A"""'], {}), "('gene A')\n", (3584, 3594), True, 'import matplotlib.pyplot as plt\n'), ((3607, 3631), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""genea.png"""'], {}), "('genea.png')\n", (3618, 3631), True, 'import matplotlib.pyplot as plt\n'), ((3632, 3643), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3641, 3643), True, 'import matplotlib.pyplot as plt\n'), ((3646, 3685), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'g1'], {'label': '"""low frequency"""'}), "(t1, g1, label='low frequency')\n", (3654, 3685), True, 'import matplotlib.pyplot as plt\n'), ((3686, 3726), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'g2'], {'label': '"""high frequency"""'}), "(t1, g2, label='high frequency')\n", (3694, 3726), True, 'import matplotlib.pyplot as plt\n'), ((3727, 3771), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'g3'], {'label': '"""combined frequency"""'}), "(t1, g3, label='combined frequency')\n", (3735, 3771), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3784), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3782, 3784), True, 'import matplotlib.pyplot as plt\n'), ((3785, 3804), 'matplotlib.pyplot.title', 'plt.title', (['"""gene B"""'], {}), "('gene B')\n", (3794, 3804), True, 'import matplotlib.pyplot as plt\n'), ((3817, 3841), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""geneb.png"""'], {}), "('geneb.png')\n", (3828, 3841), True, 'import matplotlib.pyplot as plt\n'), ((3842, 3853), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3851, 3853), True, 'import matplotlib.pyplot as plt\n'), ((3855, 3893), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', '(s3 + 3.5)'], {'label': '"""gene A"""'}), "(t1, s3 + 3.5, label='gene A')\n", (3863, 3893), True, 'import matplotlib.pyplot as plt\n'), ((3892, 3930), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', '(g3 + 3.5)'], {'label': '"""gene B"""'}), "(t1, g3 + 3.5, label='gene B')\n", (3900, 3930), True, 'import matplotlib.pyplot as plt\n'), ((3929, 3941), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3939, 3941), True, 'import matplotlib.pyplot as plt\n'), ((3942, 3984), 'matplotlib.pyplot.title', 'plt.title', (['"""full spectrum gene similarity"""'], {}), "('full spectrum gene similarity')\n", (3951, 3984), True, 'import matplotlib.pyplot as plt\n'), ((3997, 4028), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fullspectrum.png"""'], {}), "('fullspectrum.png')\n", (4008, 4028), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4040), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4038, 4040), True, 'import matplotlib.pyplot as plt\n'), ((4042, 4074), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 's2'], {'label': '"""gene A"""'}), "(t1, s2, label='gene A')\n", (4050, 4074), True, 'import matplotlib.pyplot as plt\n'), ((4075, 4107), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'g2'], {'label': '"""gene B"""'}), "(t1, g2, label='gene B')\n", (4083, 4107), True, 'import matplotlib.pyplot as plt\n'), ((4108, 4120), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4118, 4120), True, 'import matplotlib.pyplot as plt\n'), ((4121, 4173), 'matplotlib.pyplot.title', 'plt.title', (['"""high frequency spectrum gene similarity"""'], {}), "('high frequency spectrum gene similarity')\n", (4130, 4173), True, 'import matplotlib.pyplot as plt\n'), ((4186, 4217), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""highspectrum.png"""'], {}), "('highspectrum.png')\n", (4197, 4217), True, 'import matplotlib.pyplot as plt\n'), ((4218, 4229), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4227, 4229), True, 'import matplotlib.pyplot as plt\n'), ((4231, 4250), 'numpy.corrcoef', 'np.corrcoef', (['s3', 'g3'], {}), '(s3, g3)\n', (4242, 4250), True, 'import numpy as np\n'), ((4431, 4443), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4441, 4443), True, 'import matplotlib.pyplot as plt\n'), ((4444, 4490), 'matplotlib.pyplot.title', 'plt.title', (['"""shuffled spectrum gene similarity"""'], {}), "('shuffled spectrum gene similarity')\n", (4453, 4490), True, 'import matplotlib.pyplot as plt\n'), ((4503, 4537), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""shufflespectrum.png"""'], {}), "('shufflespectrum.png')\n", (4514, 4537), True, 'import matplotlib.pyplot as plt\n'), ((4538, 4549), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4547, 4549), True, 'import matplotlib.pyplot as plt\n'), ((211, 272), 'pandas.read_feather', 'pd.read_feather', (["('correlation_folder/' + correlation_files[i])"], {}), "('correlation_folder/' + correlation_files[i])\n", (226, 272), True, 'import pandas as pd\n'), ((671, 706), 'h5py.File', 'h5.File', (['"""h5/correlation_2.h5"""', '"""r"""'], {}), "('h5/correlation_2.h5', 'r')\n", (678, 706), True, 'import h5py as h5\n'), ((1130, 1158), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {'anon': '(True)'}), '(anon=True)\n', (1147, 1158), False, 'import s3fs\n'), ((1411, 1439), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {'anon': '(True)'}), '(anon=True)\n', (1428, 1439), False, 'import s3fs\n'), ((3316, 3326), 'numpy.sin', 'np.sin', (['t1'], {}), '(t1)\n', (3322, 3326), True, 'import numpy as np\n'), ((3338, 3352), 'numpy.sin', 'np.sin', (['(t1 * 6)'], {}), '(t1 * 6)\n', (3344, 3352), True, 'import numpy as np\n'), ((3374, 3396), 'numpy.sin', 'np.sin', (['(t1 + np.pi / 2)'], {}), '(t1 + np.pi / 2)\n', (3380, 3396), True, 'import numpy as np\n'), ((3404, 3418), 'numpy.sin', 'np.sin', (['(t1 * 6)'], {}), '(t1 * 6)\n', (3410, 3418), True, 'import numpy as np\n'), ((951, 962), 'time.time', 'time.time', ([], {}), '()\n', (960, 962), False, 'import time\n'), ((1788, 1799), 'time.time', 'time.time', ([], {}), '()\n', (1797, 1799), False, 'import time\n'), ((2177, 2188), 'time.time', 'time.time', ([], {}), '()\n', (2186, 2188), False, 'import time\n'), ((2461, 2472), 'time.time', 'time.time', ([], {}), '()\n', (2470, 2472), False, 'import time\n'), ((2585, 2596), 'time.time', 'time.time', ([], {}), '()\n', (2594, 2596), False, 'import time\n'), ((2656, 2681), 'numpy.array', 'np.array', (["f['meta/genes']"], {}), "(f['meta/genes'])\n", (2664, 2681), True, 'import numpy as np\n'), ((2841, 2866), 'numpy.array', 'np.array', (["f['meta/genes']"], {}), "(f['meta/genes'])\n", (2849, 2866), True, 'import numpy as np\n'), ((4350, 4362), 'numpy.array', 'np.array', (['k1'], {}), '(k1)\n', (4358, 4362), True, 'import numpy as np\n'), ((4397, 4409), 'numpy.array', 'np.array', (['k2'], {}), '(k2)\n', (4405, 4409), True, 'import numpy as np\n'), ((719, 744), 'numpy.array', 'np.array', (["f['meta/genes']"], {}), "(f['meta/genes'])\n", (727, 744), True, 'import numpy as np\n'), ((804, 843), 'numpy.array', 'np.array', (["f['data/correlation'][:, idx]"], {}), "(f['data/correlation'][:, idx])\n", (812, 843), True, 'import numpy as np\n'), ((1277, 1302), 'numpy.array', 'np.array', (["f['meta/genes']"], {}), "(f['meta/genes'])\n", (1285, 1302), True, 'import numpy as np\n'), ((1608, 1647), 'numpy.array', 'np.array', (["f['data/correlation'][idx, :]"], {}), "(f['data/correlation'][idx, :])\n", (1616, 1647), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
# Visualization of particles with gravity
# Source: http://enja.org/2010/08/27/adventures-in-opencl-part-2-particles-with-opengl/
import pyopencl as cl # OpenCL - GPU computing interface
mf = cl.mem_flags
from pyopencl.tools import get_gl_sharing_context_properties
from OpenGL.GL import * # OpenGL - GPU rendering interface
from OpenGL.GLU import * # OpenGL tools (mipmaps, NURBS, perspective projection, shapes)
from OpenGL.GLUT import * # OpenGL tool to make a visualization window
from OpenGL.arrays import vbo
import numpy # Number tools
import sys # System tools (path, modules, maxint)
width = 800
height = 600
num_particles = 100000
time_step = .005
mouse_down = False
mouse_old = {'x': 0., 'y': 0.}
rotate = {'x': 0., 'y': 0., 'z': 0.}
translate = {'x': 0., 'y': 0., 'z': 0.}
initial_translate = {'x': 0., 'y': 0., 'z': -2.5}
def glut_window():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(width, height)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("Particle Simulation")
glutDisplayFunc(on_display) # Called by GLUT every frame
glutKeyboardFunc(on_key)
glutMouseFunc(on_click)
glutMotionFunc(on_mouse_move)
glutTimerFunc(10, on_timer, 10) # Call draw every 30 ms
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., width / float(height), .1, 1000.)
return(window)
def initial_buffers(num_particles):
np_position = numpy.ndarray((num_particles, 4), dtype=numpy.float32)
np_color = numpy.ndarray((num_particles, 4), dtype=numpy.float32)
np_velocity = numpy.ndarray((num_particles, 4), dtype=numpy.float32)
np_position[:,0] = numpy.sin(numpy.arange(0., num_particles) * 2.001 * numpy.pi / num_particles)
np_position[:,0] *= numpy.random.random_sample((num_particles,)) / 3. + .2
np_position[:,1] = numpy.cos(numpy.arange(0., num_particles) * 2.001 * numpy.pi / num_particles)
np_position[:,1] *= numpy.random.random_sample((num_particles,)) / 3. + .2
np_position[:,2] = 0.
np_position[:,3] = 1.
np_color[:,:] = [1.,1.,1.,1.] # White particles
np_velocity[:,0] = np_position[:,0] * 2.
np_velocity[:,1] = np_position[:,1] * 2.
np_velocity[:,2] = 3.
np_velocity[:,3] = numpy.random.random_sample((num_particles, ))
gl_position = vbo.VBO(data=np_position, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)
gl_position.bind()
gl_color = vbo.VBO(data=np_color, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)
gl_color.bind()
return (np_position, np_velocity, gl_position, gl_color)
def on_timer(t):
glutTimerFunc(t, on_timer, t)
glutPostRedisplay()
def on_key(*args):
if args[0] == '\033' or args[0] == 'q':
sys.exit()
def on_click(button, state, x, y):
mouse_old['x'] = x
mouse_old['y'] = y
def on_mouse_move(x, y):
rotate['x'] += (y - mouse_old['y']) * .2
rotate['y'] += (x - mouse_old['x']) * .2
mouse_old['x'] = x
mouse_old['y'] = y
def on_display():
"""Render the particles"""
# Update or particle positions by calling the OpenCL kernel
cl.enqueue_acquire_gl_objects(queue, [cl_gl_position, cl_gl_color])
kernelargs = (cl_gl_position, cl_gl_color, cl_velocity, cl_start_position, cl_start_velocity, numpy.float32(time_step))
program.particle_fountain(queue, (num_particles,), None, *(kernelargs))
cl.enqueue_release_gl_objects(queue, [cl_gl_position, cl_gl_color])
queue.finish()
glFlush()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# Handle mouse transformations
glTranslatef(initial_translate['x'], initial_translate['y'], initial_translate['z'])
glRotatef(rotate['x'], 1, 0, 0)
glRotatef(rotate['y'], 0, 1, 0) #we switched around the axis so make this rotate_z
glTranslatef(translate['x'], translate['y'], translate['z'])
# Render the particles
glEnable(GL_POINT_SMOOTH)
glPointSize(2)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Set up the VBOs
gl_color.bind()
glColorPointer(4, GL_FLOAT, 0, gl_color)
gl_position.bind()
glVertexPointer(4, GL_FLOAT, 0, gl_position)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
# Draw the VBOs
glDrawArrays(GL_POINTS, 0, num_particles)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glDisable(GL_BLEND)
glutSwapBuffers()
window = glut_window()
(np_position, np_velocity, gl_position, gl_color) = initial_buffers(num_particles)
platform = cl.get_platforms()[0]
context = cl.Context(properties=[(cl.context_properties.PLATFORM, platform)] + get_gl_sharing_context_properties())
queue = cl.CommandQueue(context)
cl_velocity = cl.Buffer(context, mf.COPY_HOST_PTR, hostbuf=np_velocity)
cl_start_position = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np_position)
cl_start_velocity = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np_velocity)
cl_gl_position = cl.GLBuffer(context, mf.READ_WRITE, int(gl_position.buffers[0]))
cl_gl_color = cl.GLBuffer(context, mf.READ_WRITE, int(gl_color.buffers[0]))
kernel = """__kernel void particle_fountain(__global float4* position,
__global float4* color,
__global float4* velocity,
__global float4* start_position,
__global float4* start_velocity,
float time_step)
{
unsigned int i = get_global_id(0);
float4 p = position[i];
float4 v = velocity[i];
float life = velocity[i].w;
life -= time_step;
if (life <= 0.f)
{
p = start_position[i];
v = start_velocity[i];
life = 1.0f;
}
v.z -= 9.8f*time_step;
p.x += v.x*time_step;
p.y += v.y*time_step;
p.z += v.z*time_step;
v.w = life;
position[i] = p;
velocity[i] = v;
color[i].w = life; /* Fade points as life decreases */
}"""
program = cl.Program(context, kernel).build()
glutMainLoop()
|
[
"pyopencl.Buffer",
"pyopencl.Program",
"pyopencl.enqueue_release_gl_objects",
"numpy.random.random_sample",
"pyopencl.get_platforms",
"numpy.float32",
"pyopencl.CommandQueue",
"pyopencl.tools.get_gl_sharing_context_properties",
"numpy.ndarray",
"pyopencl.enqueue_acquire_gl_objects",
"sys.exit",
"OpenGL.arrays.vbo.VBO",
"numpy.arange"
] |
[((4872, 4896), 'pyopencl.CommandQueue', 'cl.CommandQueue', (['context'], {}), '(context)\n', (4887, 4896), True, 'import pyopencl as cl\n'), ((4912, 4969), 'pyopencl.Buffer', 'cl.Buffer', (['context', 'mf.COPY_HOST_PTR'], {'hostbuf': 'np_velocity'}), '(context, mf.COPY_HOST_PTR, hostbuf=np_velocity)\n', (4921, 4969), True, 'import pyopencl as cl\n'), ((4990, 5062), 'pyopencl.Buffer', 'cl.Buffer', (['context', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'np_position'}), '(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np_position)\n', (4999, 5062), True, 'import pyopencl as cl\n'), ((5083, 5155), 'pyopencl.Buffer', 'cl.Buffer', (['context', '(mf.READ_ONLY | mf.COPY_HOST_PTR)'], {'hostbuf': 'np_velocity'}), '(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=np_velocity)\n', (5092, 5155), True, 'import pyopencl as cl\n'), ((1544, 1598), 'numpy.ndarray', 'numpy.ndarray', (['(num_particles, 4)'], {'dtype': 'numpy.float32'}), '((num_particles, 4), dtype=numpy.float32)\n', (1557, 1598), False, 'import numpy\n'), ((1614, 1668), 'numpy.ndarray', 'numpy.ndarray', (['(num_particles, 4)'], {'dtype': 'numpy.float32'}), '((num_particles, 4), dtype=numpy.float32)\n', (1627, 1668), False, 'import numpy\n'), ((1687, 1741), 'numpy.ndarray', 'numpy.ndarray', (['(num_particles, 4)'], {'dtype': 'numpy.float32'}), '((num_particles, 4), dtype=numpy.float32)\n', (1700, 1741), False, 'import numpy\n'), ((2350, 2394), 'numpy.random.random_sample', 'numpy.random.random_sample', (['(num_particles,)'], {}), '((num_particles,))\n', (2376, 2394), False, 'import numpy\n'), ((2419, 2491), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', ([], {'data': 'np_position', 'usage': 'GL_DYNAMIC_DRAW', 'target': 'GL_ARRAY_BUFFER'}), '(data=np_position, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)\n', (2426, 2491), False, 'from OpenGL.arrays import vbo\n'), ((2530, 2599), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', ([], {'data': 'np_color', 'usage': 'GL_DYNAMIC_DRAW', 'target': 'GL_ARRAY_BUFFER'}), '(data=np_color, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)\n', (2537, 2599), False, 'from OpenGL.arrays import vbo\n'), ((3212, 3279), 'pyopencl.enqueue_acquire_gl_objects', 'cl.enqueue_acquire_gl_objects', (['queue', '[cl_gl_position, cl_gl_color]'], {}), '(queue, [cl_gl_position, cl_gl_color])\n', (3241, 3279), True, 'import pyopencl as cl\n'), ((3484, 3551), 'pyopencl.enqueue_release_gl_objects', 'cl.enqueue_release_gl_objects', (['queue', '[cl_gl_position, cl_gl_color]'], {}), '(queue, [cl_gl_position, cl_gl_color])\n', (3513, 3551), True, 'import pyopencl as cl\n'), ((4724, 4742), 'pyopencl.get_platforms', 'cl.get_platforms', ([], {}), '()\n', (4740, 4742), True, 'import pyopencl as cl\n'), ((2830, 2840), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2838, 2840), False, 'import sys\n'), ((3378, 3402), 'numpy.float32', 'numpy.float32', (['time_step'], {}), '(time_step)\n', (3391, 3402), False, 'import numpy\n'), ((6258, 6285), 'pyopencl.Program', 'cl.Program', (['context', 'kernel'], {}), '(context, kernel)\n', (6268, 6285), True, 'import pyopencl as cl\n'), ((1869, 1913), 'numpy.random.random_sample', 'numpy.random.random_sample', (['(num_particles,)'], {}), '((num_particles,))\n', (1895, 1913), False, 'import numpy\n'), ((2050, 2094), 'numpy.random.random_sample', 'numpy.random.random_sample', (['(num_particles,)'], {}), '((num_particles,))\n', (2076, 2094), False, 'import numpy\n'), ((4825, 4860), 'pyopencl.tools.get_gl_sharing_context_properties', 'get_gl_sharing_context_properties', ([], {}), '()\n', (4858, 4860), False, 'from pyopencl.tools import get_gl_sharing_context_properties\n'), ((1776, 1808), 'numpy.arange', 'numpy.arange', (['(0.0)', 'num_particles'], {}), '(0.0, num_particles)\n', (1788, 1808), False, 'import numpy\n'), ((1957, 1989), 'numpy.arange', 'numpy.arange', (['(0.0)', 'num_particles'], {}), '(0.0, num_particles)\n', (1969, 1989), False, 'import numpy\n')]
|
"""Module containing models representing patients and their data.
The Model layer is responsible for the 'business logic' part of the software.
Patients' data is held in an inflammation table (2D array) where each row contains
inflammation data for a single patient taken over a number of days
and each column represents a single day across all patients.
"""
import numpy as np
def load_csv(filename):
"""Load a Numpy array from a CSV
:param filename: Filename of CSV to load
"""
return np.loadtxt(fname=filename, delimiter=',')
def daily_mean(data):
"""Calculate the daily mean of a 2D inflammation data array.
:param: 2D array of data
:returns: vector of arithmetic means of data"""
return np.mean(data, axis=0)
def daily_max(data):
"""Calculate the daily max of a 2D inflammation data array.
:param: 2D array of data
:returns: vector of maximum of data"""
return np.max(data, axis=0)
def daily_min(data):
"""Calculate the daily min of a 2D inflammation data array.
:param: 2D array of data
:returns: vector of min of data"""
return np.min(data, axis=0)
def patient_normalise(data):
"""
Normalise patient data from a 2D inflammation data array.
NaN values are ignored, and normalised to 0.
Negative values are rounded to 0.
"""
if np.any(data < 0):
raise ValueError('Inflammation values should not be negative')
max_val = np.max(data, axis=1)
with np.errstate(invalid='ignore', divide='ignore'):
normalised = data / max_val[:, np.newaxis]
normalised[np.isnan(normalised)] = 0
normalised[normalised < 0] = 0
return normalised
class Observation:
def __init__(self, day, value):
self.day = day
self.value = value
def __str__(self):
return self.value
class Person:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
class Patient(Person):
"""A patient in an inflammation study."""
def __init__(self, name, observations=None):
super().__init__(name)
self.observations = []
if observations is not None:
self.observations = observations
def add_observation(self, value, day=None):
if day is None:
try:
day = self.observations[-1].day + 1
except IndexError:
day = 0
new_observation = Observation(value, day)
self.observations.append(new_observation)
return new_observation
@property
def last_observation(self):
return self.observations[-1]
# TODO(lesson-design) Implement data persistence
# TODO(lesson-design) Add Doctor class
|
[
"numpy.mean",
"numpy.any",
"numpy.max",
"numpy.errstate",
"numpy.isnan",
"numpy.min",
"numpy.loadtxt"
] |
[((511, 552), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': 'filename', 'delimiter': '""","""'}), "(fname=filename, delimiter=',')\n", (521, 552), True, 'import numpy as np\n'), ((735, 756), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (742, 756), True, 'import numpy as np\n'), ((928, 948), 'numpy.max', 'np.max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (934, 948), True, 'import numpy as np\n'), ((1116, 1136), 'numpy.min', 'np.min', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1122, 1136), True, 'import numpy as np\n'), ((1342, 1358), 'numpy.any', 'np.any', (['(data < 0)'], {}), '(data < 0)\n', (1348, 1358), True, 'import numpy as np\n'), ((1445, 1465), 'numpy.max', 'np.max', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (1451, 1465), True, 'import numpy as np\n'), ((1475, 1521), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (1486, 1521), True, 'import numpy as np\n'), ((1589, 1609), 'numpy.isnan', 'np.isnan', (['normalised'], {}), '(normalised)\n', (1597, 1609), True, 'import numpy as np\n')]
|
import numpy as np
# We create a rank 1 ndarray
x = np.array([1,2,3,4])
# We print x
print()
print('x = ', x)
# We apply different mathematical functions to all elements of x
print()
print('EXP(x) =', np.exp(x))
print()
print('SQRT(x) =',np.sqrt(x))
print()
print('POW(x,2) =',np.power(x,2)) # We raise all elements to the power of 2
|
[
"numpy.exp",
"numpy.array",
"numpy.sqrt",
"numpy.power"
] |
[((53, 75), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (61, 75), True, 'import numpy as np\n'), ((204, 213), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (210, 213), True, 'import numpy as np\n'), ((241, 251), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (248, 251), True, 'import numpy as np\n'), ((280, 294), 'numpy.power', 'np.power', (['x', '(2)'], {}), '(x, 2)\n', (288, 294), True, 'import numpy as np\n')]
|
from numpy.testing.utils import assert_equal, assert_raises
from nose import with_setup
from nose.plugins.attrib import attr
from brian2 import *
from brian2.devices.device import restore_device
@attr('standalone-compatible')
@with_setup(teardown=restore_device)
def test_poissoninput():
# Test extreme cases and do a very basic test of an intermediate case, we
# don't want tests to be stochastic
G = NeuronGroup(10, '''x : volt
y : volt
y2 : volt
z : volt
z2 : volt
w : 1''')
G.w = 0.5
never_update = PoissonInput(G, 'x', 100, 0*Hz, weight=1*volt)
always_update = PoissonInput(G, 'y', 50, 1/defaultclock.dt, weight=2*volt)
always_update2 = PoissonInput(G, 'y2', 50, 1/defaultclock.dt, weight='1*volt + 1*volt')
sometimes_update = PoissonInput(G, 'z', 10000, 50*Hz, weight=0.5*volt)
sometimes_update2 = PoissonInput(G, 'z2', 10000, 50*Hz, weight='w*volt')
mon = StateMonitor(G, ['x', 'y', 'y2', 'z', 'z2'], record=True, when='end')
run(1*ms)
assert_equal(0, mon.x[:])
assert_equal(np.tile((1+np.arange(mon.y[:].shape[1]))*50*2*volt, (10, 1)),
mon.y[:])
assert_equal(np.tile((1+np.arange(mon.y[:].shape[1]))*50*2*volt, (10, 1)),
mon.y2[:])
assert all(np.var(mon.z[:], axis=1) > 0) # variability over time
assert all(np.var(mon.z[:], axis=0) > 0) # variability over neurons
assert all(np.var(mon.z2[:], axis=1) > 0) # variability over time
assert all(np.var(mon.z2[:], axis=0) > 0) # variability over neurons
@attr('codegen-independent')
def test_poissoninput_errors():
# Targeting non-existing variable
G = NeuronGroup(10, '''x : volt
y : 1''')
assert_raises(KeyError, lambda: PoissonInput(G, 'z', 100, 100*Hz, weight=1.0))
# Incorrect units
assert_raises(DimensionMismatchError,
lambda: PoissonInput(G, 'x', 100, 100*Hz, weight=1.0))
assert_raises(DimensionMismatchError,
lambda: PoissonInput(G, 'y', 100, 100*Hz, weight=1.0*volt))
# dt change
old_dt = defaultclock.dt
inp = PoissonInput(G, 'x', 100, 100*Hz, weight=1*volt)
defaultclock.dt = 2 * old_dt
net = Network(collect())
assert_raises(NotImplementedError, lambda: net.run(0*ms))
defaultclock.dt = old_dt
if __name__ == '__main__':
# test_poissoninput()
# restore_device()
test_poissoninput_errors()
|
[
"nose.with_setup",
"numpy.testing.utils.assert_equal",
"nose.plugins.attrib.attr"
] |
[((198, 227), 'nose.plugins.attrib.attr', 'attr', (['"""standalone-compatible"""'], {}), "('standalone-compatible')\n", (202, 227), False, 'from nose.plugins.attrib import attr\n'), ((229, 264), 'nose.with_setup', 'with_setup', ([], {'teardown': 'restore_device'}), '(teardown=restore_device)\n', (239, 264), False, 'from nose import with_setup\n'), ((1661, 1688), 'nose.plugins.attrib.attr', 'attr', (['"""codegen-independent"""'], {}), "('codegen-independent')\n", (1665, 1688), False, 'from nose.plugins.attrib import attr\n'), ((1131, 1156), 'numpy.testing.utils.assert_equal', 'assert_equal', (['(0)', 'mon.x[:]'], {}), '(0, mon.x[:])\n', (1143, 1156), False, 'from numpy.testing.utils import assert_equal, assert_raises\n')]
|
from tespy.connections import connection
from tespy.components import source, sink, pipe
from tespy.networks import network
import numpy as np
from matplotlib import pyplot as plt
nw = network(['water'], p_unit='bar', T_unit='C', h_unit='kJ / kg')
# %% components
pi = pipe('pipe')
si = sink('sink')
so = source('source')
# %% connections
a = connection(so, 'out1', pi, 'in1')
b = connection(pi, 'out1', si, 'in1')
nw.add_conns(a, b)
# %% connection parameters
a.set_attr(h=40, fluid={'water': 1}, p=1, m=10)
# %% component parameters
pi.set_attr(ks=1e-4, L=100, D='var', Q=0)
# %% solve
nw.set_attr(iterinfo=False)
# specify different pressure ratios for the pipe, calculate the diameter required
for pr in np.linspace(0.95, 0.999, 10):
pi.set_attr(pr=pr)
nw.solve(mode='design')
print('Pressure ratio: ' + str(round(pr, 3)) + ', diameter: ' + str(round(pi.D.val * 1000, 0)))
|
[
"tespy.components.source",
"tespy.components.sink",
"numpy.linspace",
"tespy.components.pipe",
"tespy.networks.network",
"tespy.connections.connection"
] |
[((187, 249), 'tespy.networks.network', 'network', (["['water']"], {'p_unit': '"""bar"""', 'T_unit': '"""C"""', 'h_unit': '"""kJ / kg"""'}), "(['water'], p_unit='bar', T_unit='C', h_unit='kJ / kg')\n", (194, 249), False, 'from tespy.networks import network\n'), ((272, 284), 'tespy.components.pipe', 'pipe', (['"""pipe"""'], {}), "('pipe')\n", (276, 284), False, 'from tespy.components import source, sink, pipe\n'), ((290, 302), 'tespy.components.sink', 'sink', (['"""sink"""'], {}), "('sink')\n", (294, 302), False, 'from tespy.components import source, sink, pipe\n'), ((308, 324), 'tespy.components.source', 'source', (['"""source"""'], {}), "('source')\n", (314, 324), False, 'from tespy.components import source, sink, pipe\n'), ((348, 381), 'tespy.connections.connection', 'connection', (['so', '"""out1"""', 'pi', '"""in1"""'], {}), "(so, 'out1', pi, 'in1')\n", (358, 381), False, 'from tespy.connections import connection\n'), ((386, 419), 'tespy.connections.connection', 'connection', (['pi', '"""out1"""', 'si', '"""in1"""'], {}), "(pi, 'out1', si, 'in1')\n", (396, 419), False, 'from tespy.connections import connection\n'), ((722, 750), 'numpy.linspace', 'np.linspace', (['(0.95)', '(0.999)', '(10)'], {}), '(0.95, 0.999, 10)\n', (733, 750), True, 'import numpy as np\n')]
|
import random
import torch
import numpy as np
from torch_geometric.utils import degree, to_undirected
def negative_sampling(edge_index, num_nodes=None, num_neg_samples=None,
force_undirected=False):
num_neg_samples = num_neg_samples or edge_index.size(1)
# Handle '|V|^2 - |E| < |E|' case for G = (V, E).
num_neg_samples = min(num_neg_samples,
num_nodes * num_nodes - edge_index.size(1))
rng = range(num_nodes**2)
# idx = N * i + j
idx = (edge_index[0] * num_nodes + edge_index[1]).to('cpu')
perm = torch.tensor(random.sample(rng, num_neg_samples))
# pos edge면 true 처리
mask = torch.from_numpy(np.isin(perm, idx)).to(torch.bool)
rest = mask.nonzero().view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.tensor(random.sample(rng, rest.size(0)))
mask = torch.from_numpy(np.isin(tmp, idx)).to(torch.bool)
perm[rest] = tmp
rest = rest[mask.nonzero().view(-1)]
row = perm / num_nodes
col = perm % num_nodes
neg_edge_index = torch.stack([row, col], dim=0).long()
return neg_edge_index.to(edge_index.device)
|
[
"random.sample",
"torch.stack",
"numpy.isin"
] |
[((597, 632), 'random.sample', 'random.sample', (['rng', 'num_neg_samples'], {}), '(rng, num_neg_samples)\n', (610, 632), False, 'import random\n'), ((1079, 1109), 'torch.stack', 'torch.stack', (['[row, col]'], {'dim': '(0)'}), '([row, col], dim=0)\n', (1090, 1109), False, 'import torch\n'), ((686, 704), 'numpy.isin', 'np.isin', (['perm', 'idx'], {}), '(perm, idx)\n', (693, 704), True, 'import numpy as np\n'), ((898, 915), 'numpy.isin', 'np.isin', (['tmp', 'idx'], {}), '(tmp, idx)\n', (905, 915), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,time,datetime,sys
import numpy as np
import dgcnn
import tensorflow as tf
def round_decimals(val,digits):
factor = float(np.power(10,digits))
return int(val * factor+0.5) / factor
def iteration_from_filename(file_name):
return int((file_name.split('-'))[-1])
def iotest(flags):
# IO configuration
io = dgcnn.io_factory(flags)
io.initialize()
num_entries = io.num_entries()
ctr = 0
while ctr < num_entries:
idx,data,label,weight=io.next()
msg = str(ctr) + '/' + str(num_entries) + ' ... ' + str(idx) + ' ' + str(data[0].shape)
if label:
msg += str(label[0].shape)
if weight:
msg += str(weight[0].shape)
print(msg)
ctr += len(data)
io.finalize()
class Handlers:
sess = None
data_io = None
csv_logger = None
weight_io = None
train_logger = None
iteration = 0
def train(flags):
flags.TRAIN = True
handlers = prepare(flags)
train_loop(flags,handlers)
def inference(flags):
flags.TRAIN = False
handlers = prepare(flags)
inference_loop(flags,handlers)
def prepare(flags):
handlers = Handlers()
# assert
if flags.BATCH_SIZE % (flags.MINIBATCH_SIZE * len(flags.GPUS)):
msg = '--batch_size (%d) must be a modular of --gpus (%d) * --minibatch_size (%d)\n'
msg = msg % (flags.BATCH_SIZE,flags.MINIBATCH_SIZE,len(flags.GPUS))
sys.stderr.write(msg)
sys.exit(1)
# IO configuration
handlers.data_io = dgcnn.io_factory(flags)
handlers.data_io.initialize()
_,train_data,_,_ = handlers.data_io.next()
# Trainer configuration
flags.NUM_CHANNEL = handlers.data_io.num_channels()
handlers.trainer = dgcnn.trainval(flags)
handlers.trainer.initialize()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
handlers.sess = tf.Session(config=config)
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
handlers.sess.run(init)
handlers.weight_io = tf.train.Saver(max_to_keep=flags.CHECKPOINT_NUM,
keep_checkpoint_every_n_hours=flags.CHECKPOINT_HOUR)
if flags.WEIGHT_PREFIX:
save_dir = flags.WEIGHT_PREFIX[0:flags.WEIGHT_PREFIX.rfind('/')]
if save_dir and not os.path.isdir(save_dir): os.makedirs(save_dir)
handlers.iteration = 0
loaded_iteration = 0
if flags.MODEL_PATH:
handlers.weight_io.restore(handlers.sess, flags.MODEL_PATH)
loaded_iteration = iteration_from_filename(flags.MODEL_PATH)
if flags.TRAIN: handlers.iteration = loaded_iteration+1
if flags.LOG_DIR:
if not os.path.exists(flags.LOG_DIR): os.mkdir(flags.LOG_DIR)
handlers.train_logger = tf.summary.FileWriter(flags.LOG_DIR)
handlers.train_logger.add_graph(handlers.sess.graph)
logname = '%s/train_log-%07d.csv' % (flags.LOG_DIR,loaded_iteration)
if not flags.TRAIN:
logname = '%s/inference_log-%07d.csv' % (flags.LOG_DIR,loaded_iteration)
handlers.csv_logger = open(logname,'w')
return handlers
def train_loop(flags,handlers):
handlers.csv_logger.write('iter,epoch')
handlers.csv_logger.write(',titer,ttrain,tio,tsave,tsummary')
handlers.csv_logger.write(',tsumiter,tsumtrain,tsumio,tsumsave,tsumsummary')
handlers.csv_logger.write(',loss,accuracy\n')
tsum = 0.
tsum_train = 0.
tsum_io = 0.
tsum_save = 0.
tsum_summary = 0.
while handlers.iteration < flags.ITERATION:
tstamp_iteration = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
tstart_iteration = time.time()
report_step = flags.REPORT_STEP and ((handlers.iteration+1) % flags.REPORT_STEP == 0)
summary_step = flags.SUMMARY_STEP and handlers.train_logger and ((handlers.iteration+1) % flags.SUMMARY_STEP == 0)
checkpt_step = flags.CHECKPOINT_STEP and flags.WEIGHT_PREFIX and ((handlers.iteration+1) % flags.CHECKPOINT_STEP == 0)
tstart = time.time()
idx,data,label,weight = handlers.data_io.next()
tspent_io = time.time() - tstart
tsum_io += tspent_io
current_idx = 0
loss_v = []
accuracy_v = []
handlers.trainer.zero_gradients(handlers.sess)
# Accummulate gradients
tspent_train = 0.
tspent_summary = 0.
while current_idx < flags.BATCH_SIZE:
tstart = time.time()
data_v = []
label_v = []
weight_v = None
if weight is not None: weight_v = []
for _ in flags.GPUS:
start = current_idx
end = current_idx + flags.MINIBATCH_SIZE
data_v.append(data[start:end])
label_v.append(label[start:end])
if weight is not None:
weight_v.append(weight[start:end])
current_idx = end
# compute gradients
make_summary = summary_step and (current_idx == flags.BATCH_SIZE)
res = handlers.trainer.accum_gradient(handlers.sess,data_v,label_v,weight_v,summary=make_summary)
accuracy_v.append(res[1])
loss_v.append(res[2])
tspent_train = tspent_train + (time.time() - tstart)
# log summary
if make_summary:
tstart = time.time()
handlers.train_logger.add_summary(res[3],handlers.iteration)
tspent_summary = time.time() - tstart
# Apply gradients
tstart = time.time()
handlers.trainer.apply_gradient(handlers.sess)
tspent_train = tspent_train + (time.time() - tstart)
tsum_train += tspent_train
tsum_summary += tspent_summary
# Compute loss/accuracy
loss = np.mean(loss_v)
accuracy = np.mean(accuracy_v)
epoch = handlers.iteration * float(flags.BATCH_SIZE) / handlers.data_io.num_entries()
# Save snapshot
tspent_save = 0.
if checkpt_step:
tstart = time.time()
ssf_path = handlers.weight_io.save(handlers.sess,flags.WEIGHT_PREFIX,global_step=handlers.iteration)
tspent_save = time.time() - tstart
print('saved @',ssf_path)
# Report (logger)
if handlers.csv_logger:
tspent_iteration = time.time() - tstart_iteration
tsum += tspent_iteration
csv_data = '%d,%g,' % (handlers.iteration,epoch)
csv_data += '%g,%g,%g,%g,%g,' % (tspent_iteration,tspent_train,tspent_io,tspent_save,tspent_summary)
csv_data += '%g,%g,%g,%g,%g,' % (tsum,tsum_train,tsum_io,tsum_save,tsum_summary)
csv_data += '%g,%g\n' % (loss,accuracy)
handlers.csv_logger.write(csv_data)
# Report (stdout)
if report_step:
loss = round_decimals(loss,4)
accuracy = round_decimals(accuracy,4)
tfrac = round_decimals(tspent_train/tspent_iteration*100.,2)
epoch = round_decimals(epoch,2)
mem = handlers.sess.run(tf.contrib.memory_stats.MaxBytesInUse())
msg = 'Iteration %d (epoch %g) @ %s ... train time fraction %g%% max mem. %g ... loss %g accuracy %g'
msg = msg % (handlers.iteration,epoch,tstamp_iteration,tfrac,mem,loss,accuracy)
print(msg)
sys.stdout.flush()
if handlers.csv_logger: handlers.csv_logger.flush()
if handlers.train_logger: handlers.train_logger.flush()
# Increment iteration counter
handlers.iteration +=1
handlers.train_logger.close()
handlers.csv_logger.close()
handlers.data_io.finalize()
def inference_loop(flags,handlers):
handlers.csv_logger.write('iter,epoch')
handlers.csv_logger.write(',titer,tinference,tio')
handlers.csv_logger.write(',tsumiter,tsuminference,tsumio')
handlers.csv_logger.write(',loss,accuracy\n')
tsum = 0.
tsum_io = 0.
tsum_inference = 0.
while handlers.iteration < flags.ITERATION:
tstamp_iteration = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
tstart_iteration = time.time()
report_step = flags.REPORT_STEP and ((handlers.iteration+1) % flags.REPORT_STEP == 0)
tstart = time.time()
idx,data,label,weight = handlers.data_io.next()
tspent_io = time.time() - tstart
tsum_io += tspent_io
current_idx = 0
softmax_vv = []
loss_v = []
accuracy_v = []
# Run inference
tspent_inference = 0.
tstart = time.time()
while current_idx < flags.BATCH_SIZE:
data_v = []
label_v = None
weight_v = None
if label is not None: label_v = []
if weight is not None: weight_v = []
for _ in flags.GPUS:
start = current_idx
end = current_idx + flags.MINIBATCH_SIZE
data_v.append(data[start:end])
if label is not None:
label_v.append(label[start:end])
if weight is not None:
weight_v.append(weight[start:end])
current_idx = end
# compute gradients
res = handlers.trainer.inference(handlers.sess,data_v,label_v,weight_v)
if flags.LABEL_KEY:
softmax_vv = softmax_vv + res[0:-2]
accuracy_v.append(res[-2])
loss_v.append(res[-1])
else:
softmax_vv = softmax_vv + res
tspent_inference = tspent_inference + (time.time() - tstart)
tsum_inference += tspent_inference
# Store output if requested
if flags.OUTPUT_FILE:
idx_ctr = 0
for softmax_v in softmax_vv:
for softmax in softmax_v:
handlers.data_io.store(idx[idx_ctr],softmax)
idx_ctr += 1
# Compute loss/accuracy
loss,accuracy=[-1,-1]
if flags.LABEL_KEY:
loss = np.mean(loss_v)
accuracy = np.mean(accuracy_v)
epoch = handlers.iteration * float(flags.BATCH_SIZE) / handlers.data_io.num_entries()
# Report (logger)
if handlers.csv_logger:
tspent_iteration = time.time() - tstart_iteration
tsum += tspent_iteration
csv_data = '%d,%g,' % (handlers.iteration,epoch)
csv_data += '%g,%g,%g,' % (tspent_iteration,tspent_inference,tspent_io)
csv_data += '%g,%g,%g,' % (tsum,tsum_inference,tsum_io)
csv_data += '%g,%g\n' % (loss,accuracy)
handlers.csv_logger.write(csv_data)
# Report (stdout)
if report_step:
loss = round_decimals(loss,4)
accuracy = round_decimals(accuracy,4)
tfrac = round_decimals(tspent_inference/tspent_iteration*100.,2)
epoch = round_decimals(epoch,2)
mem = handlers.sess.run(tf.contrib.memory_stats.MaxBytesInUse())
msg = 'Iteration %d (epoch %g) @ %s ... inference time fraction %g%% max mem. %g ... loss %g accuracy %g'
msg = msg % (handlers.iteration,epoch,tstamp_iteration,tfrac,mem,loss,accuracy)
print(msg)
sys.stdout.flush()
if handlers.csv_logger: handlers.csv_logger.flush()
# Increment iteration counter
handlers.iteration +=1
handlers.csv_logger.close()
handlers.data_io.finalize()
|
[
"tensorflow.local_variables_initializer",
"dgcnn.trainval",
"sys.exit",
"numpy.mean",
"os.path.exists",
"tensorflow.Session",
"os.path.isdir",
"os.mkdir",
"tensorflow.ConfigProto",
"sys.stdout.flush",
"dgcnn.io_factory",
"sys.stderr.write",
"tensorflow.summary.FileWriter",
"time.time",
"tensorflow.contrib.memory_stats.MaxBytesInUse",
"os.makedirs",
"numpy.power",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer"
] |
[((434, 457), 'dgcnn.io_factory', 'dgcnn.io_factory', (['flags'], {}), '(flags)\n', (450, 457), False, 'import dgcnn\n'), ((1547, 1570), 'dgcnn.io_factory', 'dgcnn.io_factory', (['flags'], {}), '(flags)\n', (1563, 1570), False, 'import dgcnn\n'), ((1750, 1771), 'dgcnn.trainval', 'dgcnn.trainval', (['flags'], {}), '(flags)\n', (1764, 1771), False, 'import dgcnn\n'), ((1837, 1853), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1851, 1853), True, 'import tensorflow as tf\n'), ((1950, 1975), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1960, 1975), True, 'import tensorflow as tf\n'), ((2131, 2236), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'flags.CHECKPOINT_NUM', 'keep_checkpoint_every_n_hours': 'flags.CHECKPOINT_HOUR'}), '(max_to_keep=flags.CHECKPOINT_NUM,\n keep_checkpoint_every_n_hours=flags.CHECKPOINT_HOUR)\n', (2145, 2236), True, 'import tensorflow as tf\n'), ((243, 263), 'numpy.power', 'np.power', (['(10)', 'digits'], {}), '(10, digits)\n', (251, 263), True, 'import numpy as np\n'), ((1466, 1487), 'sys.stderr.write', 'sys.stderr.write', (['msg'], {}), '(msg)\n', (1482, 1487), False, 'import os, time, datetime, sys\n'), ((1492, 1503), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1500, 1503), False, 'import os, time, datetime, sys\n'), ((1994, 2027), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2025, 2027), True, 'import tensorflow as tf\n'), ((2047, 2079), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2077, 2079), True, 'import tensorflow as tf\n'), ((2815, 2851), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['flags.LOG_DIR'], {}), '(flags.LOG_DIR)\n', (2836, 2851), True, 'import tensorflow as tf\n'), ((3686, 3697), 'time.time', 'time.time', ([], {}), '()\n', (3695, 3697), False, 'import os, time, datetime, sys\n'), ((4050, 4061), 'time.time', 'time.time', ([], {}), '()\n', (4059, 4061), False, 'import os, time, datetime, sys\n'), ((5370, 5381), 'time.time', 'time.time', ([], {}), '()\n', (5379, 5381), False, 'import os, time, datetime, sys\n'), ((5601, 5616), 'numpy.mean', 'np.mean', (['loss_v'], {}), '(loss_v)\n', (5608, 5616), True, 'import numpy as np\n'), ((5632, 5651), 'numpy.mean', 'np.mean', (['accuracy_v'], {}), '(accuracy_v)\n', (5639, 5651), True, 'import numpy as np\n'), ((7770, 7781), 'time.time', 'time.time', ([], {}), '()\n', (7779, 7781), False, 'import os, time, datetime, sys\n'), ((7892, 7903), 'time.time', 'time.time', ([], {}), '()\n', (7901, 7903), False, 'import os, time, datetime, sys\n'), ((8164, 8175), 'time.time', 'time.time', ([], {}), '()\n', (8173, 8175), False, 'import os, time, datetime, sys\n'), ((2415, 2436), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (2426, 2436), False, 'import os, time, datetime, sys\n'), ((2732, 2761), 'os.path.exists', 'os.path.exists', (['flags.LOG_DIR'], {}), '(flags.LOG_DIR)\n', (2746, 2761), False, 'import os, time, datetime, sys\n'), ((2763, 2786), 'os.mkdir', 'os.mkdir', (['flags.LOG_DIR'], {}), '(flags.LOG_DIR)\n', (2771, 2786), False, 'import os, time, datetime, sys\n'), ((4130, 4141), 'time.time', 'time.time', ([], {}), '()\n', (4139, 4141), False, 'import os, time, datetime, sys\n'), ((4422, 4433), 'time.time', 'time.time', ([], {}), '()\n', (4431, 4433), False, 'import os, time, datetime, sys\n'), ((5819, 5830), 'time.time', 'time.time', ([], {}), '()\n', (5828, 5830), False, 'import os, time, datetime, sys\n'), ((7001, 7019), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7017, 7019), False, 'import os, time, datetime, sys\n'), ((7972, 7983), 'time.time', 'time.time', ([], {}), '()\n', (7981, 7983), False, 'import os, time, datetime, sys\n'), ((9404, 9419), 'numpy.mean', 'np.mean', (['loss_v'], {}), '(loss_v)\n', (9411, 9419), True, 'import numpy as np\n'), ((9437, 9456), 'numpy.mean', 'np.mean', (['accuracy_v'], {}), '(accuracy_v)\n', (9444, 9456), True, 'import numpy as np\n'), ((10491, 10509), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10507, 10509), False, 'import os, time, datetime, sys\n'), ((2390, 2413), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (2403, 2413), False, 'import os, time, datetime, sys\n'), ((5208, 5219), 'time.time', 'time.time', ([], {}), '()\n', (5217, 5219), False, 'import os, time, datetime, sys\n'), ((5468, 5479), 'time.time', 'time.time', ([], {}), '()\n', (5477, 5479), False, 'import os, time, datetime, sys\n'), ((5958, 5969), 'time.time', 'time.time', ([], {}), '()\n', (5967, 5969), False, 'import os, time, datetime, sys\n'), ((6086, 6097), 'time.time', 'time.time', ([], {}), '()\n', (6095, 6097), False, 'import os, time, datetime, sys\n'), ((6743, 6782), 'tensorflow.contrib.memory_stats.MaxBytesInUse', 'tf.contrib.memory_stats.MaxBytesInUse', ([], {}), '()\n', (6780, 6782), True, 'import tensorflow as tf\n'), ((9022, 9033), 'time.time', 'time.time', ([], {}), '()\n', (9031, 9033), False, 'import os, time, datetime, sys\n'), ((9622, 9633), 'time.time', 'time.time', ([], {}), '()\n', (9631, 9633), False, 'import os, time, datetime, sys\n'), ((10229, 10268), 'tensorflow.contrib.memory_stats.MaxBytesInUse', 'tf.contrib.memory_stats.MaxBytesInUse', ([], {}), '()\n', (10266, 10268), True, 'import tensorflow as tf\n'), ((3620, 3631), 'time.time', 'time.time', ([], {}), '()\n', (3629, 3631), False, 'import os, time, datetime, sys\n'), ((5126, 5137), 'time.time', 'time.time', ([], {}), '()\n', (5135, 5137), False, 'import os, time, datetime, sys\n'), ((5314, 5325), 'time.time', 'time.time', ([], {}), '()\n', (5323, 5325), False, 'import os, time, datetime, sys\n'), ((7704, 7715), 'time.time', 'time.time', ([], {}), '()\n', (7713, 7715), False, 'import os, time, datetime, sys\n')]
|
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
Some hacky functions
'''
import os, sys
import imp
import tempfile
import shutil
import functools
import itertools
import math
import ctypes
import numpy
import h5py
from pyscf.lib import param
c_double_p = ctypes.POINTER(ctypes.c_double)
c_int_p = ctypes.POINTER(ctypes.c_int)
c_null_ptr = ctypes.POINTER(ctypes.c_void_p)
def load_library(libname):
# numpy 1.6 has bug in ctypeslib.load_library, see numpy/distutils/misc_util.py
if '1.6' in numpy.__version__:
if (sys.platform.startswith('linux') or
sys.platform.startswith('gnukfreebsd')):
so_ext = '.so'
elif sys.platform.startswith('darwin'):
so_ext = '.dylib'
elif sys.platform.startswith('win'):
so_ext = '.dll'
else:
raise OSError('Unknown platform')
libname_so = libname + so_ext
return ctypes.CDLL(os.path.join(os.path.dirname(__file__), libname_so))
else:
_loaderpath = os.path.dirname(__file__)
return numpy.ctypeslib.load_library(libname, _loaderpath)
#Fixme, the standard resouce module gives wrong number when objects are released
#see http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/#fn:1
#or use slow functions as memory_profiler._get_memory did
CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
PAGESIZE = os.sysconf("SC_PAGE_SIZE")
def current_memory():
#import resource
#return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
if sys.platform.startswith('linux'):
with open("/proc/%s/statm" % os.getpid()) as f:
vms, rss = [int(x)*PAGESIZE for x in f.readline().split()[:2]]
return rss/1e6, vms/1e6
else:
return 0, 0
def num_threads():
if 'OMP_NUM_THREADS' in os.environ:
return int(os.environ['OMP_NUM_THREADS'])
else:
import multiprocessing
return multiprocessing.cpu_count()
def c_int_arr(m):
npm = numpy.array(m).flatten('C')
arr = (ctypes.c_int * npm.size)(*npm)
# cannot return LP_c_double class,
#Xreturn npm.ctypes.data_as(c_int_p), which destructs npm before return
return arr
def f_int_arr(m):
npm = numpy.array(m).flatten('F')
arr = (ctypes.c_int * npm.size)(*npm)
return arr
def c_double_arr(m):
npm = numpy.array(m).flatten('C')
arr = (ctypes.c_double * npm.size)(*npm)
return arr
def f_double_arr(m):
npm = numpy.array(m).flatten('F')
arr = (ctypes.c_double * npm.size)(*npm)
return arr
def member(test, x, lst):
for l in lst:
if test(x, l):
return True
return False
def remove_dup(test, lst, from_end=False):
if test is None:
return set(lst)
else:
if from_end:
lst = list(reversed(lst))
seen = []
for l in lst:
if not member(test, l, seen):
seen.append(l)
return seen
def remove_if(test, lst):
return [x for x in lst if not test(x)]
def find_if(test, lst):
for l in lst:
if test(l):
return l
raise ValueError('No element of the given list matches the test condition.')
def arg_first_match(test, lst):
for i,x in enumerate(lst):
if test(x):
return i
raise ValueError('No element of the given list matches the test condition.')
def _balanced_partition(cum, ntasks):
segsize = float(cum[-1]) / ntasks
bounds = numpy.arange(ntasks+1) * segsize
displs = abs(bounds[:,None] - cum).argmin(axis=1)
return displs
def _blocksize_partition(cum, blocksize):
n = len(cum) - 1
displs = [0]
p0 = 0
for i in range(1, n):
if cum[i+1]-cum[p0] > blocksize:
displs.append(i)
p0 = i
displs.append(n)
return displs
def flatten(lst):
'''flatten nested lists
x[0] + x[1] + x[2] + ...
Examples:
>>> flatten([[0, 2], [1], [[9, 8, 7]]])
[0, 2, 1, [9, 8, 7]]
'''
return list(itertools.chain.from_iterable(lst))
def prange(start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
def prange_tril(start, stop, blocksize):
'''for p0, p1 in prange_tril: p1*(p1+1)/2-p0*(p0+1)/2 < blocksize'''
idx = numpy.arange(start, stop+1)
cum_costs = idx*(idx+1)//2 - start*(start+1)//2
displs = [x+start for x in _blocksize_partition(cum_costs, blocksize)]
return zip(displs[:-1], displs[1:])
class ctypes_stdout(object):
'''make c-printf output to string, but keep python print in /dev/pts/1.
Note it cannot correctly handle c-printf with GCC, don't know why.
Usage:
with ctypes_stdout() as stdout:
...
print(stdout.read())'''
def __enter__(self):
sys.stdout.flush()
self._contents = None
self.old_stdout_fileno = sys.stdout.fileno()
self.bak_stdout_fd = os.dup(self.old_stdout_fileno)
self.bak_stdout = sys.stdout
self.fd, self.ftmp = tempfile.mkstemp(dir='/dev/shm')
os.dup2(self.fd, self.old_stdout_fileno)
sys.stdout = os.fdopen(self.bak_stdout_fd, 'w')
return self
def __exit__(self, type, value, traceback):
sys.stdout.flush()
os.fsync(self.fd)
self._contents = open(self.ftmp, 'r').read()
os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)
sys.stdout = self.bak_stdout # self.bak_stdout_fd is closed
#os.close(self.fd) is closed when os.fdopen is closed
os.remove(self.ftmp)
def read(self):
if self._contents:
return self._contents
else:
sys.stdout.flush()
#f = os.fdopen(self.fd, 'r') # need to rewind(0) before reading
#f.seek(0)
return open(self.ftmp, 'r').read()
class capture_stdout(object):
'''redirect all stdout (c printf & python print) into a string
Usage:
with capture_stdout() as stdout:
...
print(stdout.read())
'''
def __enter__(self):
sys.stdout.flush()
self._contents = None
self.old_stdout_fileno = sys.stdout.fileno()
self.bak_stdout_fd = os.dup(self.old_stdout_fileno)
self.fd, self.ftmp = tempfile.mkstemp(dir='/dev/shm')
os.dup2(self.fd, self.old_stdout_fileno)
return self
def __exit__(self, type, value, traceback):
sys.stdout.flush()
self._contents = open(self.ftmp, 'r').read()
os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)
os.close(self.bak_stdout_fd)
#os.close(self.fd) will be closed when os.fdopen is closed
os.remove(self.ftmp)
def read(self):
if self._contents:
return self._contents
else:
sys.stdout.flush()
#f = os.fdopen(self.fd, 'r') # need to rewind(0) before reading
#f.seek(0)
return open(self.ftmp, 'r').read()
class quite_run(object):
'''output nothing
Examples
--------
with quite_run():
...
'''
def __enter__(self):
sys.stdout.flush()
self.dirnow = os.getcwd()
self.tmpdir = tempfile.mkdtemp(dir='/dev/shm')
os.chdir(self.tmpdir)
self.old_stdout_fileno = sys.stdout.fileno()
self.bak_stdout_fd = os.dup(self.old_stdout_fileno)
self.fnull = open(os.devnull, 'wb')
os.dup2(self.fnull.fileno(), self.old_stdout_fileno)
def __exit__(self, type, value, traceback):
sys.stdout.flush()
os.dup2(self.bak_stdout_fd, self.old_stdout_fileno)
self.fnull.close()
shutil.rmtree(self.tmpdir)
os.chdir(self.dirnow)
# from pygeocoder
# this decorator lets me use methods as both static and instance methods
# In contrast to classmethod, when obj.function() is called, the first
# argument is obj in omnimethod rather than obj.__class__ in classmethod
class omnimethod(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
return functools.partial(self.func, instance)
class StreamObject(object):
'''For most methods, there are three stream functions to pipe computing stream:
1 ``.set_`` function to update object attributes, eg
``mf = scf.RHF(mol).set(conv_tol=1e-5)`` is identical to proceed in two steps
``mf = scf.RHF(mol); mf.conv_tol=1e-5``
2 ``.run`` function to execute the kenerl function (the function arguments
are passed to kernel function). If keyword arguments is given, it will first
call ``.set`` function to update object attributes then execute the kernel
function. Eg
``mf = scf.RHF(mol).run(dm_init, conv_tol=1e-5)`` is identical to three steps
``mf = scf.RHF(mol); mf.conv_tol=1e-5; mf.kernel(dm_init)``
3 ``.apply`` function to apply the given function/class to the current object
(function arguments and keyword arguments are passed to the given function).
Eg
``mol.apply(scf.RHF).run().apply(mcscf.CASSCF, 6, 4, frozen=4)`` is identical to
``mf = scf.RHF(mol); mf.kernel(); mcscf.CASSCF(mf, 6, 4, frozen=4)``
'''
verbose = 0
stdout = sys.stdout
_keys = set(['verbose', 'stdout'])
def run(self, *args, **kwargs):
'''Call the kernel function of current object. `args` will be passed
to kernel function. `kwargs` will be used to update the attributes of
current object.
'''
self.set(**kwargs)
self.kernel(*args)
return self
def set(self, **kwargs):
'''Update the attributes of the current object.
'''
#if hasattr(self, '_keys'):
# for k,v in kwargs.items():
# setattr(self, k, v)
# if k not in self._keys:
# sys.stderr.write('Warning: %s does not have attribute %s\n'
# % (self.__class__, k))
#else:
for k,v in kwargs.items():
setattr(self, k, v)
return self
def apply(self, fn, *args, **kwargs):
'''Apply the fn to rest arguments: return fn(*args, **kwargs)
'''
return fn(self, *args, **kwargs)
# def _format_args(self, args, kwargs, kernel_kw_lst):
# args1 = [kwargs.pop(k, v) for k, v in kernel_kw_lst]
# return args + args1[len(args):], kwargs
def check_sanity(self):
'''Check misinput of class attributes, check whether a class method is
overwritten. It does not check the attributes which are prefixed with
"_".
'''
if (self.verbose > 0 and # logger.QUIET
hasattr(self, '_keys')):
check_sanity(self, self._keys, self.stdout)
return self
_warn_once_registry = {}
def check_sanity(obj, keysref, stdout=sys.stdout):
'''Check misinput of class attributes, check whether a class method is
overwritten. It does not check the attributes which are prefixed with
"_".
'''
objkeys = [x for x in obj.__dict__ if not x.startswith('_')]
keysub = set(objkeys) - set(keysref)
if keysub:
class_attr = set(dir(obj.__class__))
keyin = keysub.intersection(class_attr)
if keyin:
msg = ('Overwrite attributes %s of %s\n' %
(' '.join(keyin), obj.__class__))
if msg not in _warn_once_registry:
_warn_once_registry[msg] = 1
sys.stderr.write(msg)
if stdout is not sys.stdout:
stdout.write(msg)
keydiff = keysub - class_attr
if keydiff:
msg = ('%s does not have attributes %s\n' %
(obj.__class__, ' '.join(keydiff)))
if msg not in _warn_once_registry:
_warn_once_registry[msg] = 1
sys.stderr.write(msg)
if stdout is not sys.stdout:
stdout.write(msg)
return obj
def with_doc(doc):
'''Use this decorator to add doc string for function
@with_doc(doc)
def fn:
...
makes
fn.__doc__ = doc
'''
def make_fn(fn):
fn.__doc__ = doc
return fn
return make_fn
def overwrite_mro(obj, mro):
'''A hacky function to overwrite the __mro__ attribute'''
class HackMRO(type):
pass
# Overwrite type.mro function so that Temp class can use the given mro
HackMRO.mro = lambda self: mro
if sys.version_info < (3,):
class Temp(obj.__class__):
__metaclass__ = HackMRO
else:
#class Temp(obj.__class__, metaclass=HackMRO):
# pass
raise NotImplementedError()
obj = Temp()
# Delete mro function otherwise all subclass of Temp are not able to
# resolve the right mro
del(HackMRO.mro)
return obj
def izip(*args):
'''python2 izip == python3 zip'''
if sys.version_info < (3,):
return itertools.izip(*args)
else:
return zip(*args)
from threading import Thread
from multiprocessing import Queue, Process
class ProcessWithReturnValue(Process):
def __init__(self, group=None, target=None, name=None, args=(),
kwargs=None):
self._q = Queue()
def qwrap(*args, **kwargs):
self._q.put(target(*args, **kwargs))
Process.__init__(self, group, qwrap, name, args, kwargs)
def join(self):
Process.join(self)
return self._q.get()
get = join
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(),
kwargs=None):
self._q = Queue()
def qwrap(*args, **kwargs):
self._q.put(target(*args, **kwargs))
Thread.__init__(self, group, qwrap, name, args, kwargs)
def join(self):
Thread.join(self)
return self._q.get()
get = join
def background_thread(func, *args, **kwargs):
'''applying function in background'''
thread = ThreadWithReturnValue(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
def background_process(func, *args, **kwargs):
'''applying function in background'''
thread = ProcessWithReturnValue(target=func, args=args, kwargs=kwargs)
thread.start()
return thread
bg = background = bg_thread = background_thread
bp = bg_process = background_process
class H5TmpFile(h5py.File):
def __init__(self, filename=None, *args, **kwargs):
if filename is None:
tmpfile = tempfile.NamedTemporaryFile(dir=param.TMPDIR)
filename = tmpfile.name
h5py.File.__init__(self, filename, *args, **kwargs)
def __del__(self):
self.close()
def finger(a):
return numpy.dot(numpy.cos(numpy.arange(a.size)), a.ravel())
def ndpointer(*args, **kwargs):
base = numpy.ctypeslib.ndpointer(*args, **kwargs)
@classmethod
def from_param(cls, obj):
if obj is None:
return obj
return base.from_param(obj)
return type(base.__name__, (base,), {'from_param': from_param})
class call_in_background(object):
'''Asynchonously execute the given function
Usage:
with call_in_background(fun) as async_fun:
async_fun(a, b) # == fun(a, b)
do_something_else()
with call_in_background(fun1, fun2) as (afun1, afun2):
afun2(a, b)
do_something_else()
afun2(a, b)
do_something_else()
afun1(a, b)
do_something_else()
'''
def __init__(self, *fns):
self.fns = fns
self.handler = None
def __enter__(self):
if imp.lock_held():
# Some modules like nosetests, coverage etc
# python -m unittest test_xxx.py or nosetests test_xxx.py
# hang when Python multi-threading was used in the import stage due to (Python
# import lock) bug in the threading module. See also
# https://github.com/paramiko/paramiko/issues/104
# https://docs.python.org/2/library/threading.html#importing-in-threaded-code
# Disable the asynchoronous mode for safe importing
def def_async_fn(fn):
return fn
else:
def def_async_fn(fn):
def async_fn(*args, **kwargs):
if self.handler is not None:
self.handler.join()
self.handler = Thread(target=fn, args=args, kwargs=kwargs)
self.handler.start()
return self.handler
return async_fn
if len(self.fns) == 1:
return def_async_fn(self.fns[0])
else:
return [def_async_fn(fn) for fn in self.fns]
def __exit__(self, type, value, traceback):
if self.handler is not None:
self.handler.join()
if __name__ == '__main__':
for i,j in prange_tril(0, 90, 300):
print(i, j, j*(j+1)//2-i*(i+1)//2)
|
[
"sys.platform.startswith",
"multiprocessing.cpu_count",
"os.fsync",
"numpy.array",
"imp.lock_held",
"itertools.izip",
"numpy.ctypeslib.load_library",
"threading.Thread.join",
"numpy.arange",
"os.remove",
"threading.Thread.__init__",
"os.dup",
"itertools.chain.from_iterable",
"numpy.ctypeslib.ndpointer",
"os.getpid",
"tempfile.NamedTemporaryFile",
"sys.stdout.flush",
"h5py.File.__init__",
"os.close",
"os.path.dirname",
"sys.stderr.write",
"os.sysconf",
"tempfile.mkdtemp",
"os.fdopen",
"multiprocessing.Queue",
"sys.stdout.fileno",
"tempfile.mkstemp",
"multiprocessing.Process.join",
"os.dup2",
"ctypes.POINTER",
"multiprocessing.Process.__init__",
"os.getcwd",
"os.chdir",
"functools.partial",
"shutil.rmtree",
"threading.Thread"
] |
[((267, 298), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (281, 298), False, 'import ctypes\n'), ((309, 337), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_int'], {}), '(ctypes.c_int)\n', (323, 337), False, 'import ctypes\n'), ((351, 382), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_void_p'], {}), '(ctypes.c_void_p)\n', (365, 382), False, 'import ctypes\n'), ((1383, 1407), 'os.sysconf', 'os.sysconf', (['"""SC_CLK_TCK"""'], {}), "('SC_CLK_TCK')\n", (1393, 1407), False, 'import os, sys\n'), ((1419, 1445), 'os.sysconf', 'os.sysconf', (['"""SC_PAGE_SIZE"""'], {}), "('SC_PAGE_SIZE')\n", (1429, 1445), False, 'import os, sys\n'), ((1566, 1598), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (1589, 1598), False, 'import os, sys\n'), ((4284, 4313), 'numpy.arange', 'numpy.arange', (['start', '(stop + 1)'], {}), '(start, stop + 1)\n', (4296, 4313), False, 'import numpy\n'), ((14759, 14801), 'numpy.ctypeslib.ndpointer', 'numpy.ctypeslib.ndpointer', (['*args'], {}), '(*args, **kwargs)\n', (14784, 14801), False, 'import numpy\n'), ((1015, 1040), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1030, 1040), False, 'import os, sys\n'), ((1056, 1106), 'numpy.ctypeslib.load_library', 'numpy.ctypeslib.load_library', (['libname', '_loaderpath'], {}), '(libname, _loaderpath)\n', (1084, 1106), False, 'import numpy\n'), ((1963, 1990), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1988, 1990), False, 'import multiprocessing\n'), ((3484, 3508), 'numpy.arange', 'numpy.arange', (['(ntasks + 1)'], {}), '(ntasks + 1)\n', (3496, 3508), False, 'import numpy\n'), ((4020, 4054), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['lst'], {}), '(lst)\n', (4049, 4054), False, 'import itertools\n'), ((4789, 4807), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4805, 4807), False, 'import os, sys\n'), ((4871, 4890), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (4888, 4890), False, 'import os, sys\n'), ((4920, 4950), 'os.dup', 'os.dup', (['self.old_stdout_fileno'], {}), '(self.old_stdout_fileno)\n', (4926, 4950), False, 'import os, sys\n'), ((5017, 5049), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'dir': '"""/dev/shm"""'}), "(dir='/dev/shm')\n", (5033, 5049), False, 'import tempfile\n'), ((5058, 5098), 'os.dup2', 'os.dup2', (['self.fd', 'self.old_stdout_fileno'], {}), '(self.fd, self.old_stdout_fileno)\n', (5065, 5098), False, 'import os, sys\n'), ((5120, 5154), 'os.fdopen', 'os.fdopen', (['self.bak_stdout_fd', '"""w"""'], {}), "(self.bak_stdout_fd, 'w')\n", (5129, 5154), False, 'import os, sys\n'), ((5231, 5249), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5247, 5249), False, 'import os, sys\n'), ((5258, 5275), 'os.fsync', 'os.fsync', (['self.fd'], {}), '(self.fd)\n', (5266, 5275), False, 'import os, sys\n'), ((5337, 5388), 'os.dup2', 'os.dup2', (['self.bak_stdout_fd', 'self.old_stdout_fileno'], {}), '(self.bak_stdout_fd, self.old_stdout_fileno)\n', (5344, 5388), False, 'import os, sys\n'), ((5527, 5547), 'os.remove', 'os.remove', (['self.ftmp'], {}), '(self.ftmp)\n', (5536, 5547), False, 'import os, sys\n'), ((6056, 6074), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6072, 6074), False, 'import os, sys\n'), ((6138, 6157), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (6155, 6157), False, 'import os, sys\n'), ((6187, 6217), 'os.dup', 'os.dup', (['self.old_stdout_fileno'], {}), '(self.old_stdout_fileno)\n', (6193, 6217), False, 'import os, sys\n'), ((6247, 6279), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'dir': '"""/dev/shm"""'}), "(dir='/dev/shm')\n", (6263, 6279), False, 'import tempfile\n'), ((6288, 6328), 'os.dup2', 'os.dup2', (['self.fd', 'self.old_stdout_fileno'], {}), '(self.fd, self.old_stdout_fileno)\n', (6295, 6328), False, 'import os, sys\n'), ((6405, 6423), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6421, 6423), False, 'import os, sys\n'), ((6485, 6536), 'os.dup2', 'os.dup2', (['self.bak_stdout_fd', 'self.old_stdout_fileno'], {}), '(self.bak_stdout_fd, self.old_stdout_fileno)\n', (6492, 6536), False, 'import os, sys\n'), ((6545, 6573), 'os.close', 'os.close', (['self.bak_stdout_fd'], {}), '(self.bak_stdout_fd)\n', (6553, 6573), False, 'import os, sys\n'), ((6649, 6669), 'os.remove', 'os.remove', (['self.ftmp'], {}), '(self.ftmp)\n', (6658, 6669), False, 'import os, sys\n'), ((7092, 7110), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7108, 7110), False, 'import os, sys\n'), ((7133, 7144), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7142, 7144), False, 'import os, sys\n'), ((7167, 7199), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'dir': '"""/dev/shm"""'}), "(dir='/dev/shm')\n", (7183, 7199), False, 'import tempfile\n'), ((7208, 7229), 'os.chdir', 'os.chdir', (['self.tmpdir'], {}), '(self.tmpdir)\n', (7216, 7229), False, 'import os, sys\n'), ((7263, 7282), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (7280, 7282), False, 'import os, sys\n'), ((7312, 7342), 'os.dup', 'os.dup', (['self.old_stdout_fileno'], {}), '(self.old_stdout_fileno)\n', (7318, 7342), False, 'import os, sys\n'), ((7504, 7522), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7520, 7522), False, 'import os, sys\n'), ((7531, 7582), 'os.dup2', 'os.dup2', (['self.bak_stdout_fd', 'self.old_stdout_fileno'], {}), '(self.bak_stdout_fd, self.old_stdout_fileno)\n', (7538, 7582), False, 'import os, sys\n'), ((7618, 7644), 'shutil.rmtree', 'shutil.rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (7631, 7644), False, 'import shutil\n'), ((7653, 7674), 'os.chdir', 'os.chdir', (['self.dirnow'], {}), '(self.dirnow)\n', (7661, 7674), False, 'import os, sys\n'), ((8049, 8087), 'functools.partial', 'functools.partial', (['self.func', 'instance'], {}), '(self.func, instance)\n', (8066, 8087), False, 'import functools\n'), ((12884, 12905), 'itertools.izip', 'itertools.izip', (['*args'], {}), '(*args)\n', (12898, 12905), False, 'import itertools\n'), ((13171, 13178), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (13176, 13178), False, 'from multiprocessing import Queue, Process\n'), ((13272, 13328), 'multiprocessing.Process.__init__', 'Process.__init__', (['self', 'group', 'qwrap', 'name', 'args', 'kwargs'], {}), '(self, group, qwrap, name, args, kwargs)\n', (13288, 13328), False, 'from multiprocessing import Queue, Process\n'), ((13357, 13375), 'multiprocessing.Process.join', 'Process.join', (['self'], {}), '(self)\n', (13369, 13375), False, 'from multiprocessing import Queue, Process\n'), ((13575, 13582), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (13580, 13582), False, 'from multiprocessing import Queue, Process\n'), ((13676, 13731), 'threading.Thread.__init__', 'Thread.__init__', (['self', 'group', 'qwrap', 'name', 'args', 'kwargs'], {}), '(self, group, qwrap, name, args, kwargs)\n', (13691, 13731), False, 'from threading import Thread\n'), ((13760, 13777), 'threading.Thread.join', 'Thread.join', (['self'], {}), '(self)\n', (13771, 13777), False, 'from threading import Thread\n'), ((14537, 14588), 'h5py.File.__init__', 'h5py.File.__init__', (['self', 'filename', '*args'], {}), '(self, filename, *args, **kwargs)\n', (14555, 14588), False, 'import h5py\n'), ((15582, 15597), 'imp.lock_held', 'imp.lock_held', ([], {}), '()\n', (15595, 15597), False, 'import imp\n'), ((538, 570), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (561, 570), False, 'import os, sys\n'), ((586, 624), 'sys.platform.startswith', 'sys.platform.startswith', (['"""gnukfreebsd"""'], {}), "('gnukfreebsd')\n", (609, 624), False, 'import os, sys\n'), ((667, 700), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (690, 700), False, 'import os, sys\n'), ((2021, 2035), 'numpy.array', 'numpy.array', (['m'], {}), '(m)\n', (2032, 2035), False, 'import numpy\n'), ((2249, 2263), 'numpy.array', 'numpy.array', (['m'], {}), '(m)\n', (2260, 2263), False, 'import numpy\n'), ((2365, 2379), 'numpy.array', 'numpy.array', (['m'], {}), '(m)\n', (2376, 2379), False, 'import numpy\n'), ((2484, 2498), 'numpy.array', 'numpy.array', (['m'], {}), '(m)\n', (2495, 2498), False, 'import numpy\n'), ((5655, 5673), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5671, 5673), False, 'import os, sys\n'), ((6777, 6795), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6793, 6795), False, 'import os, sys\n'), ((14447, 14492), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'dir': 'param.TMPDIR'}), '(dir=param.TMPDIR)\n', (14474, 14492), False, 'import tempfile\n'), ((14680, 14700), 'numpy.arange', 'numpy.arange', (['a.size'], {}), '(a.size)\n', (14692, 14700), False, 'import numpy\n'), ((745, 775), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (768, 775), False, 'import os, sys\n'), ((943, 968), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (958, 968), False, 'import os, sys\n'), ((11413, 11434), 'sys.stderr.write', 'sys.stderr.write', (['msg'], {}), '(msg)\n', (11429, 11434), False, 'import os, sys\n'), ((11796, 11817), 'sys.stderr.write', 'sys.stderr.write', (['msg'], {}), '(msg)\n', (11812, 11817), False, 'import os, sys\n'), ((1637, 1648), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1646, 1648), False, 'import os, sys\n'), ((16301, 16344), 'threading.Thread', 'Thread', ([], {'target': 'fn', 'args': 'args', 'kwargs': 'kwargs'}), '(target=fn, args=args, kwargs=kwargs)\n', (16307, 16344), False, 'from threading import Thread\n')]
|
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
import cnn_tiny_model as model
import data_inputs
import cnn_tiny_settings as settings
FLAGS = settings.FLAGS
def eval_once(saver, summary_writer, top_k_op, summary_op):
'''
run eval once.
'''
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found.')
return
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))
# バッチごとの事例数
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0
total_sample_count = num_iter * FLAGS.batch_size
print('the number of total sample count: %d' % (total_sample_count))
step = 0
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e:
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
with tf.Graph().as_default():
# testデータのロード
images, labels = data_inputs.inputs('data/train_kirin_norm_32.tfrecords')
logits = model.inference(images)
top_k_op = tf.nn.in_top_k(logits, labels, 1)
variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay)
variables_to_restore = {}
for v in tf.trainable_variables():
if v in tf.trainable_variables():
restore_name = variable_averages.average_name(v)
else:
restore_name = v.op.name
variables_to_restore[restore_name] = v
saver = tf.train.Saver(variables_to_restore)
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, graph_def=graph_def)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None):
evaluate()
if __name__ == '__main__':
tf.app.run()
|
[
"cnn_tiny_model.inference",
"time.sleep",
"tensorflow.app.run",
"tensorflow.Graph",
"tensorflow.train.Coordinator",
"tensorflow.Session",
"tensorflow.nn.in_top_k",
"tensorflow.trainable_variables",
"tensorflow.get_default_graph",
"tensorflow.train.SummaryWriter",
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.ExponentialMovingAverage",
"data_inputs.inputs",
"tensorflow.Summary",
"math.ceil",
"tensorflow.train.Saver",
"tensorflow.merge_all_summaries",
"numpy.sum",
"datetime.datetime.now",
"tensorflow.get_collection"
] |
[((3269, 3281), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (3279, 3281), True, 'import tensorflow as tf\n'), ((522, 534), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (532, 534), True, 'import tensorflow as tf\n'), ((559, 610), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (588, 610), True, 'import tensorflow as tf\n'), ((900, 922), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (920, 922), True, 'import tensorflow as tf\n'), ((2217, 2273), 'data_inputs.inputs', 'data_inputs.inputs', (['"""data/train_kirin_norm_32.tfrecords"""'], {}), "('data/train_kirin_norm_32.tfrecords')\n", (2235, 2273), False, 'import data_inputs\n'), ((2291, 2314), 'cnn_tiny_model.inference', 'model.inference', (['images'], {}), '(images)\n', (2306, 2314), True, 'import cnn_tiny_model as model\n'), ((2335, 2368), 'tensorflow.nn.in_top_k', 'tf.nn.in_top_k', (['logits', 'labels', '(1)'], {}), '(logits, labels, 1)\n', (2349, 2368), True, 'import tensorflow as tf\n'), ((2406, 2467), 'tensorflow.train.ExponentialMovingAverage', 'tf.train.ExponentialMovingAverage', (['FLAGS.moving_average_decay'], {}), '(FLAGS.moving_average_decay)\n', (2439, 2467), True, 'import tensorflow as tf\n'), ((2519, 2543), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2541, 2543), True, 'import tensorflow as tf\n'), ((2782, 2818), 'tensorflow.train.Saver', 'tf.train.Saver', (['variables_to_restore'], {}), '(variables_to_restore)\n', (2796, 2818), True, 'import tensorflow as tf\n'), ((2840, 2864), 'tensorflow.merge_all_summaries', 'tf.merge_all_summaries', ([], {}), '()\n', (2862, 2864), True, 'import tensorflow as tf\n'), ((2949, 3008), 'tensorflow.train.SummaryWriter', 'tf.train.SummaryWriter', (['FLAGS.eval_dir'], {'graph_def': 'graph_def'}), '(FLAGS.eval_dir, graph_def=graph_def)\n', (2971, 3008), True, 'import tensorflow as tf\n'), ((983, 1028), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.QUEUE_RUNNERS'], {}), '(tf.GraphKeys.QUEUE_RUNNERS)\n', (1000, 1028), True, 'import tensorflow as tf\n'), ((1761, 1773), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (1771, 1773), True, 'import tensorflow as tf\n'), ((3162, 3198), 'time.sleep', 'time.sleep', (['FLAGS.eval_interval_secs'], {}), '(FLAGS.eval_interval_secs)\n', (3172, 3198), False, 'import time\n'), ((1175, 1223), 'math.ceil', 'math.ceil', (['(FLAGS.num_examples / FLAGS.batch_size)'], {}), '(FLAGS.num_examples / FLAGS.batch_size)\n', (1184, 1223), False, 'import math\n'), ((1560, 1579), 'numpy.sum', 'np.sum', (['predictions'], {}), '(predictions)\n', (1566, 1579), True, 'import numpy as np\n'), ((2145, 2155), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2153, 2155), True, 'import tensorflow as tf\n'), ((2565, 2589), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2587, 2589), True, 'import tensorflow as tf\n'), ((2886, 2908), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2906, 2908), True, 'import tensorflow as tf\n'), ((1711, 1725), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1723, 1725), False, 'from datetime import datetime\n')]
|
"""surfinBH
========
Surrogate final black hole properties for mergers of binary black holes.
See https://pypi.org/project/surfinBH/ for more details.
"""
__copyright__ = "Copyright (C) 2018 <NAME>"
__email__ = "<EMAIL>"
__status__ = "testing"
__author__ = "<NAME>"
__version__ = "1.1.7"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import os, sys
import h5py
import warnings
from . import _eval_pysur
from ._dataPath import DataPath
#=============================================================================
class SurFinBH(object):
"""
Class to load and evaluate surrogate fits for final BH properties.
Each derived class should do the following:
1. define _load_fits(self, h5file)
2. define _get_fit_params(self, x, fit_key)
3. define _eval_wrapper(self, fit_key, x, **kwargs)
4. define soft_param_lims and hard_param_lims.
5. define _extra_regression_kwargs, to test any additional kwargs used in
the _eval_wrapper method.
See _fit_evaluators.fit_7dq2.py for an example.
"""
#-------------------------------------------------------------------------
def __init__(self, name, soft_param_lims, hard_param_lims,
aligned_spin_only=False):
"""
name: Name of the fit excluding the surfinBH prefix. Ex: 7dq2.
soft_param_lims: param limits beyond which to raise a warning.
hard_param_lims: param limits beyond which to raise an error.
aligned_spin_only: raise an error if given precessing spins.
See _fit_evaluators.fit_7dq2.py for an example.
"""
self.name = name
self.soft_param_lims = soft_param_lims
self.hard_param_lims = hard_param_lims
self.aligned_spin_only = aligned_spin_only
h5file = h5py.File('%s/fit_%s.h5'%(DataPath(), name), 'r')
self.fits = self._load_fits(h5file)
h5file.close()
#-------------------------------------------------------------------------
def _read_dict(self, f):
""" Converts h5 groups to dictionaries
"""
d = {}
for k, item in f.items():
if type(item) == h5py._hl.dataset.Dataset:
v = item[()]
if type(v) == np.string_:
v = str(v)
if type(v) == str and v == "NONE":
d[k] = None
elif type(v) == str and v == "EMPTYARR":
d[k] = np.array([])
elif isinstance(v, bytes):
d[k] = v.decode('utf-8')
else:
d[k] = v
elif k[:5] == "DICT_":
d[k[5:]] = self._read_dict(item)
elif k[:5] == "LIST_":
tmpD = self._read_dict(item)
d[k[5:]] = [tmpD[str(i)] for i in range(len(tmpD))]
return d
#-------------------------------------------------------------------------
def _load_scalar_fit(self, fit_key=None, h5file=None, fit_data=None):
""" Loads a single fit
"""
if (fit_key is None) ^ (h5file is None):
raise ValueError("Either specify both fit_key and h5file, or"
" neither")
if not ((fit_key is None) ^ (fit_data is None)):
raise ValueError("Specify exactly one of fit_key and fit_data.")
if fit_data is None:
fit_data = self._read_dict(h5file[fit_key])
if 'fitType' in fit_data.keys() and fit_data['fitType'] == 'GPR':
fit = _eval_pysur.evaluate_fit.getGPRFitAndErrorEvaluator(fit_data)
else:
fit = _eval_pysur.evaluate_fit.getFitEvaluator(fit_data)
return fit
#-------------------------------------------------------------------------
def _load_vector_fit(self, fit_key, h5file):
""" Loads a vector of fits
"""
vector_fit = []
for i in range(len(h5file[fit_key].keys())):
fit_data = self._read_dict(h5file[fit_key]['comp_%d'%i])
vector_fit.append(self._load_scalar_fit(fit_data=fit_data))
return vector_fit
#-------------------------------------------------------------------------
def _evaluate_fits(self, x, fit_key):
""" Evaluates a particular fit by passing fit_key to self.fits.
Assumes self._get_fit_params() has been overriden.
"""
fit = self.fits[fit_key]
fit_params = self._get_fit_params(np.copy(x), fit_key)
if type(fit) == list:
res = []
for i in range(len(fit)):
res.append(fit[i](fit_params))
return np.array(res)
else:
return fit(fit_params)
#-------------------------------------------------------------------------
def _check_unused_kwargs(self, kwargs):
""" Call this at the end of call module to check if all the kwargs have
been used. Assumes kwargs were extracted using pop.
"""
if len(kwargs.keys()) != 0:
unused = ""
for k in kwargs.keys():
unused += "'%s', "%k
if unused[-2:] == ", ": # get rid of trailing comma
unused = unused[:-2]
raise Exception('Unused keys in kwargs: %s'%unused)
#-------------------------------------------------------------------------
def _check_param_limits(self, q, chiA, chiB, allow_extrap):
""" Checks that params are within allowed range of paramters.
Raises a warning if outside self.soft_param_lims limits and
raises an error if outside self.hard_param_lims.
If allow_extrap=True, skips these checks.
"""
if q < 1:
raise ValueError('Mass ratio should be >= 1.')
chiAmag = np.sqrt(np.sum(chiA**2))
chiBmag = np.sqrt(np.sum(chiB**2))
if chiAmag > 1 + 1e-14:
raise ValueError('Spin magnitude of BhA > 1.')
if chiBmag > 1 + 1e-14:
raise ValueError('Spin magnitude of BhB > 1.')
chiA = np.atleast_1d(chiA)
chiB = np.atleast_1d(chiB)
if len(chiA) != 3 or len(chiB) != 3:
raise TypeError("Expected input spins to be 3-vectors.")
if self.aligned_spin_only:
if np.sqrt(np.sum(chiA[:2]**2)) > 1e-14:
raise ValueError('The x & y components of chiA should be zero.')
if np.sqrt(np.sum(chiB[:2]**2)) > 1e-14:
raise ValueError('The x & y components of chiB should be zero.')
# Do not check param limits if allow_extrap=True
if allow_extrap:
return
if q > self.hard_param_lims['q']+ 1e-14:
raise ValueError('Mass ratio outside allowed range.')
elif q > self.soft_param_lims['q']:
warnings.warn('Mass ratio outside training range.')
if chiAmag > self.hard_param_lims['chiAmag']+ 1e-14:
raise ValueError('Spin magnitude of BhA outside allowed range.')
elif chiAmag > self.soft_param_lims['chiAmag']:
warnings.warn('Spin magnitude of BhA outside training range.')
if chiBmag > self.hard_param_lims['chiBmag']+ 1e-14:
raise ValueError('Spin magnitude of BhB outside allowed range.')
elif chiBmag > self.soft_param_lims['chiBmag']:
warnings.warn('Spin magnitude of BhB outside training range.')
#-------------------------------------------------------------------------
def _generate_random_params_for_tests(self):
""" Generate random parameters to use in tests.
"""
# Generate params randomly within allowed values
q = np.random.uniform(1, self.hard_param_lims['q'])
chiAmag= np.random.uniform(0, self.hard_param_lims['chiAmag'])
chiBmag= np.random.uniform(0, self.hard_param_lims['chiBmag'])
if self.aligned_spin_only:
chiAph, chiBph = 0, 0
chiAth, chiBth = np.random.choice([0, np.pi]), \
np.random.choice([0, np.pi])
else:
chiAth = np.arccos(np.random.uniform(-1., 1.))
chiBth = np.arccos(np.random.uniform(-1., 1.))
chiAph = np.random.uniform(0, 2*np.pi)
chiBph = np.random.uniform(0, 2*np.pi)
chiA = [chiAmag*np.sin(chiAth)*np.cos(chiAph),
chiAmag*np.sin(chiAth)*np.sin(chiAph),
chiAmag*np.cos(chiAth)]
chiB = [chiBmag*np.sin(chiBth)*np.cos(chiBph),
chiBmag*np.sin(chiBth)*np.sin(chiBph),
chiBmag*np.cos(chiBth)]
return q, chiA, chiB
#-------------------------------------------------------------------------
#---------------------- Override these ---------------------------------
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
def _load_fits(self, h5file):
""" Loads fits from h5file and returns a dictionary of fits. """
raise NotImplementedError("Please override me.")
return fits
#-------------------------------------------------------------------------
def _extra_regression_kwargs(self):
""" Add additional kwargs for regression tests. If not overriden,
this will be empty. See _fit_evaluators.fit_7dq2.py for an example.
"""
return []
#-------------------------------------------------------------------------
def _get_fit_params(self, x, fit_key):
""" Maps from input params x to the fit_params used to evaluate the
fit.
"""
raise NotImplementedError("Please override me.")
return fit_params
#-------------------------------------------------------------------------
def _eval_wrapper(self, fit_key, q, chiA, chiB, **kwargs):
""" Evaluates a particular fit. This varies for each surrogate.
Allowed values for fit_key are 'mf', 'chif' and 'vf' and 'all'.
chiA and chiB should have size 3.
Each derived class should have its own _eval_wrapper function but
call self._check_param_limits() first to do some sanity checks.
See _fit_evaluators.fit_7dq2.py for an example.
"""
raise NotImplementedError("Please override me.")
#-------------------------------------------------------------------------
#---------------------- Call methods ---------------------------------
#-------------------------------------------------------------------------
def mf(self, *args, **kwargs):
""" Evaluates fit and 1-sigma error estimate for remnant mass.
Returns:
mf, mf_err_est
"""
return self._eval_wrapper('mf', *args, **kwargs)
def chif(self, *args, **kwargs):
""" Evaluates fit and 1-sigma error estimate for remnant spin.
Returns:
chif, chif_err_est
chif and chif_err_est are arrays of size 3.
"""
return self._eval_wrapper('chif', *args, **kwargs)
def vf(self, *args, **kwargs):
""" Evaluates fit and 1-sigma error estimate for remnant kick velocity.
Returns:
vf, vf_err_est
vf and vf_err_est are arrays of size 3.
"""
return self._eval_wrapper('vf', *args, **kwargs)
def all(self, *args, **kwargs):
""" Evaluates fit and 1-sigma error estimate for remnant mass, spin
and kick velocity.
Returns:
mf, chif, vf, mf_err_est, chif_err_est, vf_err_est
chif, vf, chif_err_est and vf_err_est are arrays of size 3.
"""
return self._eval_wrapper('all', *args, **kwargs)
|
[
"numpy.copy",
"numpy.random.choice",
"numpy.sin",
"numpy.array",
"numpy.sum",
"numpy.cos",
"numpy.random.uniform",
"warnings.warn",
"numpy.atleast_1d"
] |
[((7008, 7027), 'numpy.atleast_1d', 'np.atleast_1d', (['chiA'], {}), '(chiA)\n', (7021, 7027), True, 'import numpy as np\n'), ((7043, 7062), 'numpy.atleast_1d', 'np.atleast_1d', (['chiB'], {}), '(chiB)\n', (7056, 7062), True, 'import numpy as np\n'), ((8613, 8660), 'numpy.random.uniform', 'np.random.uniform', (['(1)', "self.hard_param_lims['q']"], {}), "(1, self.hard_param_lims['q'])\n", (8630, 8660), True, 'import numpy as np\n'), ((8678, 8731), 'numpy.random.uniform', 'np.random.uniform', (['(0)', "self.hard_param_lims['chiAmag']"], {}), "(0, self.hard_param_lims['chiAmag'])\n", (8695, 8731), True, 'import numpy as np\n'), ((8749, 8802), 'numpy.random.uniform', 'np.random.uniform', (['(0)', "self.hard_param_lims['chiBmag']"], {}), "(0, self.hard_param_lims['chiBmag'])\n", (8766, 8802), True, 'import numpy as np\n'), ((5427, 5437), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (5434, 5437), True, 'import numpy as np\n'), ((5603, 5616), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (5611, 5616), True, 'import numpy as np\n'), ((6750, 6767), 'numpy.sum', 'np.sum', (['(chiA ** 2)'], {}), '(chiA ** 2)\n', (6756, 6767), True, 'import numpy as np\n'), ((6793, 6810), 'numpy.sum', 'np.sum', (['(chiB ** 2)'], {}), '(chiB ** 2)\n', (6799, 6810), True, 'import numpy as np\n'), ((9131, 9162), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (9148, 9162), True, 'import numpy as np\n'), ((9182, 9213), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (9199, 9213), True, 'import numpy as np\n'), ((7755, 7806), 'warnings.warn', 'warnings.warn', (['"""Mass ratio outside training range."""'], {}), "('Mass ratio outside training range.')\n", (7768, 7806), False, 'import warnings\n'), ((8014, 8076), 'warnings.warn', 'warnings.warn', (['"""Spin magnitude of BhA outside training range."""'], {}), "('Spin magnitude of BhA outside training range.')\n", (8027, 8076), False, 'import warnings\n'), ((8284, 8346), 'warnings.warn', 'warnings.warn', (['"""Spin magnitude of BhB outside training range."""'], {}), "('Spin magnitude of BhB outside training range.')\n", (8297, 8346), False, 'import warnings\n'), ((8901, 8929), 'numpy.random.choice', 'np.random.choice', (['[0, np.pi]'], {}), '([0, np.pi])\n', (8917, 8929), True, 'import numpy as np\n'), ((8949, 8977), 'numpy.random.choice', 'np.random.choice', (['[0, np.pi]'], {}), '([0, np.pi])\n', (8965, 8977), True, 'import numpy as np\n'), ((9023, 9051), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (9040, 9051), True, 'import numpy as np\n'), ((9082, 9110), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (9099, 9110), True, 'import numpy as np\n'), ((9252, 9266), 'numpy.cos', 'np.cos', (['chiAph'], {}), '(chiAph)\n', (9258, 9266), True, 'import numpy as np\n'), ((9307, 9321), 'numpy.sin', 'np.sin', (['chiAph'], {}), '(chiAph)\n', (9313, 9321), True, 'import numpy as np\n'), ((9347, 9361), 'numpy.cos', 'np.cos', (['chiAth'], {}), '(chiAth)\n', (9353, 9361), True, 'import numpy as np\n'), ((9403, 9417), 'numpy.cos', 'np.cos', (['chiBph'], {}), '(chiBph)\n', (9409, 9417), True, 'import numpy as np\n'), ((9458, 9472), 'numpy.sin', 'np.sin', (['chiBph'], {}), '(chiBph)\n', (9464, 9472), True, 'import numpy as np\n'), ((9498, 9512), 'numpy.cos', 'np.cos', (['chiBth'], {}), '(chiBth)\n', (9504, 9512), True, 'import numpy as np\n'), ((7236, 7257), 'numpy.sum', 'np.sum', (['(chiA[:2] ** 2)'], {}), '(chiA[:2] ** 2)\n', (7242, 7257), True, 'import numpy as np\n'), ((7370, 7391), 'numpy.sum', 'np.sum', (['(chiB[:2] ** 2)'], {}), '(chiB[:2] ** 2)\n', (7376, 7391), True, 'import numpy as np\n'), ((9237, 9251), 'numpy.sin', 'np.sin', (['chiAth'], {}), '(chiAth)\n', (9243, 9251), True, 'import numpy as np\n'), ((9292, 9306), 'numpy.sin', 'np.sin', (['chiAth'], {}), '(chiAth)\n', (9298, 9306), True, 'import numpy as np\n'), ((9388, 9402), 'numpy.sin', 'np.sin', (['chiBth'], {}), '(chiBth)\n', (9394, 9402), True, 'import numpy as np\n'), ((9443, 9457), 'numpy.sin', 'np.sin', (['chiBth'], {}), '(chiBth)\n', (9449, 9457), True, 'import numpy as np\n'), ((3434, 3446), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3442, 3446), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import math
import itertools
from scipy.linalg import svd, norm
# general messages for LM/etc optimization
TERMINATION_MESSAGES = {
None: "Status returned `None`. Error.",
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of iterations is exceeded.",
1: "`gtol` termination condition is satisfied. (small change in Jacobian)",
2: "`ftol` termination condition is satisfied. (small change in cost)",
3: "`xtol` termination condition is satisfied. (small step)",
4: "Both `ftol`(cost) and `xtol`(step) termination conditions are satisfied."
}
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def next_pow2(i):
"""
Find the next power of two
>>> int(next_pow2(5))
8
>>> int(next_pow2(250))
256
"""
# do not use NumPy here, math is much faster for single values
exponent = math.ceil(math.log(i) / math.log(2))
# the value: int(math.pow(2, exponent))
return exponent
def prime_factor(n):
"""Find the prime factorization of n
Efficient implementation. Find the factorization by trial division, using
the optimization of dividing only by two and the odd integers.
An improvement on trial division by two and the odd numbers is wheel
factorization, which uses a cyclic set of gaps between potential primes to
greatly reduce the number of trial divisions. Here we use a 2,3,5-wheel
Factoring wheels have the same O(sqrt(n)) time complexity as normal trial
division, but will be two or three times faster in practice.
>>> list(factors(90))
[2, 3, 3, 5]
"""
f = 2
increments = itertools.chain([1,2,2], itertools.cycle([4,2,4,2,4,6,2,6]))
for incr in increments:
if f*f > n:
break
while n % f == 0:
yield f
n //= f
f += incr
if n > 1:
yield n
def db(x, r=1):
"""relative value in dB
TODO: Maybe x should be rescaled to ]0..1].?
log10(0) = inf.
Parameters
----------
x: array like
r: float, optional
Reference value. default = 1
Notes
-----
https://en.wikipedia.org/wiki/Decibel#Field_quantities_and_root-power_quantities
"""
if not math.isclose(r, 1, rel_tol=1e-6):
x = x/r
# dont nag if x=0
with np.errstate(divide='ignore', invalid='ignore'):
return 20*np.log10(np.abs(x))
def import_npz(npz_file, namespace=globals()):
"""Load npz file and unpack data/dictionary to the given namespace
It is necessary to explicit call the function with globals() even if it is
set as default value here. The docs states that the scope is the defining
module not the calling.
Example for `oneliner` without using namespace(can only be used local)
for varName in data.files:
exec(varName + " = data['" + varName + "']")
Notes:
------
https://docs.python.org/3/library/functions.html#globals
"""
data = np.load(npz_file)
for varName in data:
try:
namespace[varName] = data[varName].item()
except ValueError:
namespace[varName] = data[varName]
def window(iterable, n=3):
"""Returns a sliding window (of width n) over data from the iterable
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ..."""
# https://stackoverflow.com/a/6822773/1121523
it = iter(iterable)
result = tuple(itertools.islice(it, n))
if len(result) == n:
yield result
for element in it:
result = result[1:] + (element,)
yield result
def rescale(x, mini=None, maxi=None):
"""Rescale x to 0-1.
If mini and maxi is given, then they are used as the values that get scaled
to 0 and 1, respectively
Notes
-----
To 0..1:
z_i = (x_i− min(x)) / (max(x)−min(x))
Or custom range:
a = (maxval-minval) / (max(x)-min(x))
b = maxval - a * max(x)
z = a * x + b
"""
if hasattr(x, "__len__") is False:
return x
if mini is None:
mini = np.min(x)
if maxi is None:
maxi = np.max(x)
return (x - mini) / (maxi - mini)
def meanVar(Y, isnoise=False):
"""
Y = fft(y)/nsper
Parameters
----------
Y : ndarray (ndof, nsper, nper)
Y is the fft of y
"""
# number of periods
p = Y.shape[2]
# average over periods
Ymean = np.sum(Y,axis=2) / p
# subtract column mean from y in a broadcast way. Ie: y is 3D matrix and
# for every 2D slice we subtract y_mean. Python automatically
# broadcast(repeat) y_mean.
# https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc
Y0 = Y - Ymean[...,None]
W = []
# weights. Only used if the signal is noisy and multiple periods are
# used
if p > 1 and isnoise:
W = np.sum(np.abs(Y0)**2, axis=2)/(p-1)
return Ymean, W
def weightfcn(cov):
"""Calculate weight. For subspace is the square inverse of covG. For
pnlss it is the square inverse of covY"""
F = cov.shape[0]
covinvsq = np.empty_like(cov)
for f in range(F):
covinvsq[f] = matrix_square_inv(cov[f])
return covinvsq
def matrix_square_inv(A):
"""Calculate the inverse of the matrix square root of `A`
Calculate `X` such that XX = inv(A)
`A` is assumed positive definite, thus the all singular values are strictly
positive. Given the svd decomposition A=UsVᴴ, we see that
AAᴴ = Us²Uᴴ (remember (UsV)ᴴ = VᴴsUᴴ) and it follows that
(AAᴴ)⁻¹/² = Us⁻¹Uᴴ
Returns
-------
X : ndarray(n,n)
Inverse of matrix square root of A
Notes
-----
See the comments here.
https://math.stackexchange.com/questions/106774/matrix-square-root
"""
U, s, _ = svd(A, full_matrices=False)
return U * 1/np.sqrt(s) @ U.conj().T
def mmul_weight(mat, weight):
"""Add weight. Computes the Jacobian of the weighted error ``e_W(f) = W(f,:,:)*e(f)``
"""
# np.einsum('ijk,kl',weight, mat) or
# np.einsum('ijk,kl->ijl',weight, mat) or
# np.einsum('ijk,jl->ilk',weight,mat)
# np.tensordot(weight, mat, axes=1)
# np.matmul(weight, mat)
return np.matmul(weight, mat)
def normalize_columns(mat):
# Rms values of each column
scaling = np.sqrt(np.mean(mat**2,axis=0))
# or scaling = 1/np.sqrt(mat.shape[0]) * np.linalg.norm(mat,ord=2,axis=0)
# Robustify against columns with zero rms value
scaling[scaling == 0] = 1
# Scale columns with 1/rms value
# This modifies mat in place(ie the input mat). We do not want that.
# mat /= scaling
return mat/scaling, scaling
def lm(fun, x0, jac, info=2, nmax=50, lamb=None, ftol=1e-8, xtol=1e-8,
gtol=1e-8, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem using levenberg marquardt
algorithm. See also :scipy-optimize:func:`scipy.optimize.least_squares`
Parameters
----------
fun : callable
Function which computes the vector of residuals
x0: array_like with shape (n,) or float
Initial guess on independent variables.
jac : callable
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]).
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8.
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
info : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations
"""
# the error vector
err_old = fun(x0, *args, **kwargs)
# Maybe divide by 2 to match scipy's implementation of minpack
cost = np.dot(err_old, err_old)
cost_old = cost.copy()
# Initialization of the Levenberg-Marquardt loop
niter = 0
ninner_max = 10
nfev = 1
status = None
message = ''
cost_vec = np.empty(nmax+1)
x0_mat = np.empty((nmax+1, len(x0)))
# save initial guess
x0_mat[0] = x0.copy()
cost_vec[0] = cost.copy()
if info == 2:
print(f"{'i':3} | {'inner':5} | {'cost':12} | {'cond':12} |"
f" {'lambda':6}")
stop = False
while niter < nmax and not stop:
J = jac(x0, *args, **kwargs)
J, scaling = normalize_columns(J)
U, s, Vt = svd(J, full_matrices=False)
if norm(J) < gtol: # small jacobian
stop = True
status = 1
if lamb is None:
# Initialize lambda as largest sing. value of initial jacobian.
# pinleton2002
lamb = s[0]
# as long as the step is unsuccessful
ninner = 0
# determine rank of jacobian/estimate non-zero singular values(rank
# estimate)
tol = max(J.shape)*np.spacing(max(s))
r = np.sum(s > tol)
# step with direction from err
s = s[:r]
sr = s.copy() # only saved to calculate cond. number later
while cost >= cost_old and ninner < ninner_max and not stop:
s /= (s**2 + lamb**2)
ds = -np.linalg.multi_dot((err_old, U[:,:r] * s, Vt[:r]))
ds /= scaling
x0test = x0 + ds
err = fun(x0test, *args, **kwargs)
cost = np.dot(err,err)
if cost >= cost_old:
# step unsuccessful, increase lambda, ie. Lean more towards
# gradient descent method(converges in larger range)
lamb *= np.sqrt(10)
s = sr.copy()
elif np.isnan(cost):
print('Unstable model. Increasing lambda')
cost = np.inf
lamb *= np.sqrt(10)
s = sr.copy()
else:
# Lean more towards Gauss-Newton algorithm(converges faster)
lamb /= 2
ninner += 1
if norm(ds) < xtol: # small step
stop = True
status = 3
if np.abs((cost-cost_old)/cost) < ftol: # small change in costfcn
stop = True
status = 2 if status is None else 4
if info == 2:
jac_cond = sr[0]/sr[-1]
# {cost/2/nfd/R/p:12.3f} for freq weighting
print(f"{niter:3d} | {ninner:5d} | {cost:12.8g} | {jac_cond:12.3f}"
f" | {lamb:6.3f}")
if cost < cost_old or stop:
cost_old = cost
err_old = err
x0 = x0test
# save intermediate models
x0_mat[niter+1] = x0.copy()
cost_vec[niter+1] = cost.copy()
niter += 1
nfev += ninner
if niter == nmax:
status = 0
message = TERMINATION_MESSAGES[status]
if info > 0:
print(f"Terminated: {message:s}")
print(f"Function evaluations {nfev}, initial cost {cost_vec[0]:.4e}, "
f"final cost {cost:.4e}")
res = {'x':x0, 'cost': cost, 'fun':err, 'niter': niter, 'x_mat':
x0_mat[:niter], 'cost_vec':cost_vec[niter], 'message':message,
'success':status > 0, 'nfev':nfev, 'njev':niter, 'status':status}
return res
|
[
"numpy.sqrt",
"numpy.linalg.multi_dot",
"math.log",
"numpy.mean",
"numpy.max",
"numpy.dot",
"numpy.matmul",
"numpy.empty",
"numpy.min",
"numpy.abs",
"itertools.cycle",
"numpy.isnan",
"scipy.linalg.svd",
"itertools.islice",
"math.isclose",
"numpy.errstate",
"numpy.sum",
"numpy.empty_like",
"scipy.linalg.norm",
"numpy.load"
] |
[((3224, 3241), 'numpy.load', 'np.load', (['npz_file'], {}), '(npz_file)\n', (3231, 3241), True, 'import numpy as np\n'), ((5271, 5289), 'numpy.empty_like', 'np.empty_like', (['cov'], {}), '(cov)\n', (5284, 5289), True, 'import numpy as np\n'), ((5964, 5991), 'scipy.linalg.svd', 'svd', (['A'], {'full_matrices': '(False)'}), '(A, full_matrices=False)\n', (5967, 5991), False, 'from scipy.linalg import svd, norm\n'), ((6372, 6394), 'numpy.matmul', 'np.matmul', (['weight', 'mat'], {}), '(weight, mat)\n', (6381, 6394), True, 'import numpy as np\n'), ((8372, 8396), 'numpy.dot', 'np.dot', (['err_old', 'err_old'], {}), '(err_old, err_old)\n', (8378, 8396), True, 'import numpy as np\n'), ((8575, 8593), 'numpy.empty', 'np.empty', (['(nmax + 1)'], {}), '(nmax + 1)\n', (8583, 8593), True, 'import numpy as np\n'), ((1922, 1963), 'itertools.cycle', 'itertools.cycle', (['[4, 2, 4, 2, 4, 6, 2, 6]'], {}), '([4, 2, 4, 2, 4, 6, 2, 6])\n', (1937, 1963), False, 'import itertools\n'), ((2487, 2520), 'math.isclose', 'math.isclose', (['r', '(1)'], {'rel_tol': '(1e-06)'}), '(r, 1, rel_tol=1e-06)\n', (2499, 2520), False, 'import math\n'), ((2569, 2615), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (2580, 2615), True, 'import numpy as np\n'), ((3653, 3676), 'itertools.islice', 'itertools.islice', (['it', 'n'], {}), '(it, n)\n', (3669, 3676), False, 'import itertools\n'), ((4271, 4280), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (4277, 4280), True, 'import numpy as np\n'), ((4317, 4326), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4323, 4326), True, 'import numpy as np\n'), ((4611, 4628), 'numpy.sum', 'np.sum', (['Y'], {'axis': '(2)'}), '(Y, axis=2)\n', (4617, 4628), True, 'import numpy as np\n'), ((6479, 6504), 'numpy.mean', 'np.mean', (['(mat ** 2)'], {'axis': '(0)'}), '(mat ** 2, axis=0)\n', (6486, 6504), True, 'import numpy as np\n'), ((8988, 9015), 'scipy.linalg.svd', 'svd', (['J'], {'full_matrices': '(False)'}), '(J, full_matrices=False)\n', (8991, 9015), False, 'from scipy.linalg import svd, norm\n'), ((9482, 9497), 'numpy.sum', 'np.sum', (['(s > tol)'], {}), '(s > tol)\n', (9488, 9497), True, 'import numpy as np\n'), ((1145, 1156), 'math.log', 'math.log', (['i'], {}), '(i)\n', (1153, 1156), False, 'import math\n'), ((1159, 1170), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (1167, 1170), False, 'import math\n'), ((6009, 6019), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (6016, 6019), True, 'import numpy as np\n'), ((9028, 9035), 'scipy.linalg.norm', 'norm', (['J'], {}), '(J)\n', (9032, 9035), False, 'from scipy.linalg import svd, norm\n'), ((9919, 9935), 'numpy.dot', 'np.dot', (['err', 'err'], {}), '(err, err)\n', (9925, 9935), True, 'import numpy as np\n'), ((2644, 2653), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2650, 2653), True, 'import numpy as np\n'), ((9745, 9797), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['(err_old, U[:, :r] * s, Vt[:r])'], {}), '((err_old, U[:, :r] * s, Vt[:r]))\n', (9764, 9797), True, 'import numpy as np\n'), ((10138, 10149), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (10145, 10149), True, 'import numpy as np\n'), ((10197, 10211), 'numpy.isnan', 'np.isnan', (['cost'], {}), '(cost)\n', (10205, 10211), True, 'import numpy as np\n'), ((10529, 10537), 'scipy.linalg.norm', 'norm', (['ds'], {}), '(ds)\n', (10533, 10537), False, 'from scipy.linalg import svd, norm\n'), ((10630, 10662), 'numpy.abs', 'np.abs', (['((cost - cost_old) / cost)'], {}), '((cost - cost_old) / cost)\n', (10636, 10662), True, 'import numpy as np\n'), ((5044, 5054), 'numpy.abs', 'np.abs', (['Y0'], {}), '(Y0)\n', (5050, 5054), True, 'import numpy as np\n'), ((10326, 10337), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (10333, 10337), True, 'import numpy as np\n')]
|
# -----------------------------------
#
# 01_02 线性回归
#
# -----------------------------------
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
# -----------------------------------
# 1. 数据
# -----------------------------------
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)
X = (X_raw - X_raw.mean()) / X_raw.std()
y = (y_raw - y_raw.mean()) / y_raw.std()
# -----------------------------------
# 2. 理论
# -----------------------------------
# -----------------------------------
# 3. Numpy
# -----------------------------------
w, b = 0, 0
num_epoch = 1000
learning_rate = 1e-3
for e in range(num_epoch):
# 手动计算损失函数关于自变量(模型参数)的梯度
y_pred = w * X + b
grad_w, grad_b = (y_pred - y).dot(X), (y_pred - y).sum()
# 更新参数
w, b = w - learning_rate * grad_w, b - learning_rate * grad_b
print(w, b)
# -----------------------------------
# 4. TensorFlow
# -----------------------------------
X = tf.constant(X)
y = tf.constant(y)
w = tf.get_variable('w', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
b = tf.get_variable('b', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
variables = [w, b]
num_epoch = 1000
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3)
for e in range(num_epoch):
# 使用tf.GradientTape()记录损失函数的梯度信息
with tf.GradientTape() as tape:
y_pred = w * X + b
loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y))
# TensorFlow自动计算损失函数关于自变量(模型参数)的梯度
grads = tape.gradient(loss, variables)
# TensorFlow自动根据梯度更新参数
optimizer.apply_gradients(grads_and_vars=zip(grads, variables))
print(w, b)
|
[
"tensorflow.get_variable",
"tensorflow.enable_eager_execution",
"tensorflow.train.GradientDescentOptimizer",
"numpy.array",
"tensorflow.GradientTape",
"tensorflow.constant",
"tensorflow.square"
] |
[((139, 166), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (164, 166), True, 'import tensorflow as tf\n'), ((261, 319), 'numpy.array', 'np.array', (['[2013, 2014, 2015, 2016, 2017]'], {'dtype': 'np.float32'}), '([2013, 2014, 2015, 2016, 2017], dtype=np.float32)\n', (269, 319), True, 'import numpy as np\n'), ((328, 391), 'numpy.array', 'np.array', (['[12000, 14000, 15000, 16500, 17500]'], {'dtype': 'np.float32'}), '([12000, 14000, 15000, 16500, 17500], dtype=np.float32)\n', (336, 391), True, 'import numpy as np\n'), ((1029, 1043), 'tensorflow.constant', 'tf.constant', (['X'], {}), '(X)\n', (1040, 1043), True, 'import tensorflow as tf\n'), ((1048, 1062), 'tensorflow.constant', 'tf.constant', (['y'], {}), '(y)\n', (1059, 1062), True, 'import tensorflow as tf\n'), ((1068, 1155), 'tensorflow.get_variable', 'tf.get_variable', (['"""w"""'], {'dtype': 'tf.float32', 'shape': '[]', 'initializer': 'tf.zeros_initializer'}), "('w', dtype=tf.float32, shape=[], initializer=tf.\n zeros_initializer)\n", (1083, 1155), True, 'import tensorflow as tf\n'), ((1155, 1242), 'tensorflow.get_variable', 'tf.get_variable', (['"""b"""'], {'dtype': 'tf.float32', 'shape': '[]', 'initializer': 'tf.zeros_initializer'}), "('b', dtype=tf.float32, shape=[], initializer=tf.\n zeros_initializer)\n", (1170, 1242), True, 'import tensorflow as tf\n'), ((1287, 1341), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1320, 1341), True, 'import tensorflow as tf\n'), ((1414, 1431), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1429, 1431), True, 'import tensorflow as tf\n'), ((1503, 1524), 'tensorflow.square', 'tf.square', (['(y_pred - y)'], {}), '(y_pred - y)\n', (1512, 1524), True, 'import tensorflow as tf\n')]
|
import cv2
import numpy as np
drawing = False # true if mouse is pressed
mode = True # if True, draw rectangle. Press 'm' to toggle to curve
ix,iy = -1,-1
# mouse callback function
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing,mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_circle)
while(1):
cv2.imshow('image', img)
k = cv2.waitKey(1) & 0xff
if k == ord('m'):
mode = not mode
elif k == ord('q'):
break
cv2.destroyAllWindows()
|
[
"cv2.setMouseCallback",
"cv2.rectangle",
"cv2.imshow",
"numpy.zeros",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.namedWindow"
] |
[((783, 816), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (791, 816), True, 'import numpy as np\n'), ((817, 841), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (832, 841), False, 'import cv2\n'), ((842, 884), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'draw_circle'], {}), "('image', draw_circle)\n", (862, 884), False, 'import cv2\n'), ((1040, 1063), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1061, 1063), False, 'import cv2\n'), ((900, 924), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (910, 924), False, 'import cv2\n'), ((933, 947), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (944, 947), False, 'import cv2\n'), ((449, 502), 'cv2.rectangle', 'cv2.rectangle', (['img', '(ix, iy)', '(x, y)', '(0, 255, 0)', '(-1)'], {}), '(img, (ix, iy), (x, y), (0, 255, 0), -1)\n', (462, 502), False, 'import cv2\n'), ((529, 572), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(img, (x, y), 5, (0, 0, 255), -1)\n', (539, 572), False, 'import cv2\n'), ((667, 720), 'cv2.rectangle', 'cv2.rectangle', (['img', '(ix, iy)', '(x, y)', '(0, 255, 0)', '(-1)'], {}), '(img, (ix, iy), (x, y), (0, 255, 0), -1)\n', (680, 720), False, 'import cv2\n'), ((739, 782), 'cv2.circle', 'cv2.circle', (['img', '(x, y)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(img, (x, y), 5, (0, 0, 255), -1)\n', (749, 782), False, 'import cv2\n')]
|
import sys
from PIL import Image
import argparse
import os
import numpy as np
import torch
import cv2
torch.set_printoptions(sci_mode=False, precision=4)
np.set_printoptions(suppress=True, precision=4)
def save_matrix(filename, mat, print_stats=False):
import matplotlib.pyplot as plt
# corr = F.avg_pool2d(corr, 4, stride=4).squeeze(1).squeeze(0)
if print_stats:
print("{}: {}. mean/std: {:.5f}, {:.5f}".format(filename, list(mat.shape),
np.abs(mat).mean(), mat.std()))
plt.imshow(mat)
plt.colorbar()
plt.savefig(filename) # dpi=1200
plt.clf()
print(f"Saved '{filename}'")
def get_boundary(h, w, H, W, radius):
top = max(0, h - radius)
bottom = min(H, h + radius + 1)
left = max(0, w - radius)
right = min(W, w + radius + 1)
return top, bottom, left, right
def vis_attention(model_name, img1_path, img2_path, points, attention5d_path,
radius=16, box_radius=8, img_scale=1, alpha=1, savedir='attvis',
proj_img2=False):
img2_name = os.path.basename(img2_path)
img2_trunk = os.path.splitext(img2_name)[0]
if img1_path is not None:
img1_np = cv2.imread(img1_path)
img1_name = os.path.basename(img1_path)
img1_trunk = os.path.splitext(img1_name)[0]
img1_np = cv2.resize(img1_np, (0,0), fx=img_scale, fy=img_scale)
else:
img1_np = None
img2_np = cv2.imread(img2_path)[:,:,::-1]
img2_np = cv2.resize(img2_np, (0,0), fx=img_scale, fy=img_scale)
H, W = img2_np.shape[:2]
attention5d = torch.load(attention5d_path, map_location='cpu')
if not os.path.exists(savedir):
os.makedirs(savedir, exist_ok=True)
for point in points:
w0, h0 = point
w0 = int(w0 * img_scale)
h0 = int(h0 * img_scale)
h, w = h0 // 8, w0 // 8
if img_scale != 1:
print(f"{point[0]}, {point[1]} => {w0}, {h0} => {w}, {h}")
else:
print(f"{w0}, {h0} => {w}, {h}")
# attention: H//8, W//8
attention = attention5d[0, h, w].numpy()
# Set attention outside the radius to 0.
if radius > 0:
mask = np.zeros_like(attention, dtype=bool)
attn_top, attn_bottom, attn_left, attn_right = get_boundary(h, w, H//8, W//8, radius)
mask[attn_top:attn_bottom, attn_left:attn_right] = True
attention = attention * mask.astype(float)
median = np.median(attention[mask])
else:
median = np.median(attention)
neg_count = np.count_nonzero(attention < 0)
pos_count = np.count_nonzero(attention > 0)
print(f"{point}: median {median}, {pos_count} > 0, {neg_count} < 0")
box_top, box_bottom, box_left, box_right = get_boundary(h0, w0, H, W, radius=box_radius)
if img1_np is not None:
# draw a square around the point
# the side length of the square is 2*radius+1
blank_rect = np.copy(img1_np)
cv2.rectangle(blank_rect, (box_left, box_top), (box_right, box_bottom), (0, 0, 255), 1)
img1_np2 = cv2.addWeighted(img1_np, (1-alpha), blank_rect, alpha, 0)
img1_savename = f"{img1_trunk}-{point[0]},{point[1]}-highlight.png"
img1_savepath = os.path.join(savedir, img1_savename)
cv2.imwrite(img1_savepath, img1_np2)
print(f"Saved '{img1_savepath}'")
attention = cv2.resize(attention, (W, H))
attention -= median
attention[attention < 0] = 0
attention = (255 * attention / attention.max()).astype(np.uint8)
# heatmap: [368, 768, 3]
heatmap = cv2.applyColorMap(attention, cv2.COLORMAP_JET)[:, :, ::-1]
overlaid_img2 = img2_np * 0.6 + heatmap * 0.3
overlaid_img2 = overlaid_img2.astype(np.uint8)
blank_rect = overlaid_img2.copy()
# self attention on Frame-2, draw a red rectangle.
if img1_path == img2_path:
color = (255, 0, 0)
cv2.rectangle(blank_rect, (box_left, box_top), (box_right, box_bottom), color, 1)
# Cross-frame attention. If proj_img2, draw a green rectangle in Frame-2
# at the same location of the query in Frame-1.
elif proj_img2:
color = (0, 255, 0)
cv2.rectangle(blank_rect, (box_left, box_top), (box_right, box_bottom), color, 1)
overlaid_img2 = cv2.addWeighted(overlaid_img2, (1-alpha), blank_rect, alpha, 0)
overlaid_img2_obj = Image.fromarray(overlaid_img2)
img2_savename = f"{img2_trunk}-{point[0]},{point[1]}-{model_name}.png"
img2_savepath = os.path.join(savedir, img2_savename)
overlaid_img2_obj.save(img2_savepath)
print(f"Saved '{img2_savepath}'")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', dest="model_name", type=str)
parser.add_argument('--img1', dest='img1_path', type=str)
parser.add_argument('--img2', dest='img2_path', type=str)
# --points is a list of tuples. specified as: --points 11,22.44,77.33,15
parser.add_argument('--points', type=str)
parser.add_argument('--att', dest='attention5d_path', type=str, required=True)
parser.add_argument('--savedir', type=str, default='attvis')
parser.add_argument('--scale', dest='img_scale', type=float, default=1.0)
parser.add_argument('--radius', dest='radius', type=int, default=16)
parser.add_argument('--box_radius', dest='box_radius', type=int, default=8)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--proj_img2', action='store_true')
args = parser.parse_args()
points = args.points.split(".")
points = [[int(x) for x in p.split(",")] for p in points]
vis_attention(args.model_name, args.img1_path, args.img2_path, points, args.attention5d_path,
args.radius, args.box_radius, args.img_scale, args.alpha, args.savedir, args.proj_img2)
|
[
"cv2.rectangle",
"numpy.count_nonzero",
"matplotlib.pyplot.imshow",
"os.path.exists",
"torch.set_printoptions",
"argparse.ArgumentParser",
"cv2.addWeighted",
"numpy.abs",
"matplotlib.pyplot.savefig",
"os.path.splitext",
"cv2.resize",
"cv2.imread",
"numpy.set_printoptions",
"cv2.applyColorMap",
"numpy.copy",
"PIL.Image.fromarray",
"numpy.median",
"cv2.imwrite",
"os.makedirs",
"torch.load",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"os.path.join",
"os.path.basename",
"numpy.zeros_like"
] |
[((104, 155), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'sci_mode': '(False)', 'precision': '(4)'}), '(sci_mode=False, precision=4)\n', (126, 155), False, 'import torch\n'), ((156, 203), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'precision': '(4)'}), '(suppress=True, precision=4)\n', (175, 203), True, 'import numpy as np\n'), ((530, 545), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mat'], {}), '(mat)\n', (540, 545), True, 'import matplotlib.pyplot as plt\n'), ((550, 564), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (562, 564), True, 'import matplotlib.pyplot as plt\n'), ((569, 590), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (580, 590), True, 'import matplotlib.pyplot as plt\n'), ((606, 615), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (613, 615), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1096), 'os.path.basename', 'os.path.basename', (['img2_path'], {}), '(img2_path)\n', (1085, 1096), False, 'import os\n'), ((1484, 1539), 'cv2.resize', 'cv2.resize', (['img2_np', '(0, 0)'], {'fx': 'img_scale', 'fy': 'img_scale'}), '(img2_np, (0, 0), fx=img_scale, fy=img_scale)\n', (1494, 1539), False, 'import cv2\n'), ((1586, 1634), 'torch.load', 'torch.load', (['attention5d_path'], {'map_location': '"""cpu"""'}), "(attention5d_path, map_location='cpu')\n", (1596, 1634), False, 'import torch\n'), ((4834, 4859), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4857, 4859), False, 'import argparse\n'), ((1114, 1141), 'os.path.splitext', 'os.path.splitext', (['img2_name'], {}), '(img2_name)\n', (1130, 1141), False, 'import os\n'), ((1193, 1214), 'cv2.imread', 'cv2.imread', (['img1_path'], {}), '(img1_path)\n', (1203, 1214), False, 'import cv2\n'), ((1235, 1262), 'os.path.basename', 'os.path.basename', (['img1_path'], {}), '(img1_path)\n', (1251, 1262), False, 'import os\n'), ((1336, 1391), 'cv2.resize', 'cv2.resize', (['img1_np', '(0, 0)'], {'fx': 'img_scale', 'fy': 'img_scale'}), '(img1_np, (0, 0), fx=img_scale, fy=img_scale)\n', (1346, 1391), False, 'import cv2\n'), ((1438, 1459), 'cv2.imread', 'cv2.imread', (['img2_path'], {}), '(img2_path)\n', (1448, 1459), False, 'import cv2\n'), ((1646, 1669), 'os.path.exists', 'os.path.exists', (['savedir'], {}), '(savedir)\n', (1660, 1669), False, 'import os\n'), ((1679, 1714), 'os.makedirs', 'os.makedirs', (['savedir'], {'exist_ok': '(True)'}), '(savedir, exist_ok=True)\n', (1690, 1714), False, 'import os\n'), ((2599, 2630), 'numpy.count_nonzero', 'np.count_nonzero', (['(attention < 0)'], {}), '(attention < 0)\n', (2615, 2630), True, 'import numpy as np\n'), ((2651, 2682), 'numpy.count_nonzero', 'np.count_nonzero', (['(attention > 0)'], {}), '(attention > 0)\n', (2667, 2682), True, 'import numpy as np\n'), ((3478, 3507), 'cv2.resize', 'cv2.resize', (['attention', '(W, H)'], {}), '(attention, (W, H))\n', (3488, 3507), False, 'import cv2\n'), ((4441, 4504), 'cv2.addWeighted', 'cv2.addWeighted', (['overlaid_img2', '(1 - alpha)', 'blank_rect', 'alpha', '(0)'], {}), '(overlaid_img2, 1 - alpha, blank_rect, alpha, 0)\n', (4456, 4504), False, 'import cv2\n'), ((4533, 4563), 'PIL.Image.fromarray', 'Image.fromarray', (['overlaid_img2'], {}), '(overlaid_img2)\n', (4548, 4563), False, 'from PIL import Image\n'), ((4667, 4703), 'os.path.join', 'os.path.join', (['savedir', 'img2_savename'], {}), '(savedir, img2_savename)\n', (4679, 4703), False, 'import os\n'), ((1284, 1311), 'os.path.splitext', 'os.path.splitext', (['img1_name'], {}), '(img1_name)\n', (1300, 1311), False, 'import os\n'), ((2192, 2228), 'numpy.zeros_like', 'np.zeros_like', (['attention'], {'dtype': 'bool'}), '(attention, dtype=bool)\n', (2205, 2228), True, 'import numpy as np\n'), ((2483, 2509), 'numpy.median', 'np.median', (['attention[mask]'], {}), '(attention[mask])\n', (2492, 2509), True, 'import numpy as np\n'), ((2545, 2565), 'numpy.median', 'np.median', (['attention'], {}), '(attention)\n', (2554, 2565), True, 'import numpy as np\n'), ((3018, 3034), 'numpy.copy', 'np.copy', (['img1_np'], {}), '(img1_np)\n', (3025, 3034), True, 'import numpy as np\n'), ((3047, 3138), 'cv2.rectangle', 'cv2.rectangle', (['blank_rect', '(box_left, box_top)', '(box_right, box_bottom)', '(0, 0, 255)', '(1)'], {}), '(blank_rect, (box_left, box_top), (box_right, box_bottom), (0,\n 0, 255), 1)\n', (3060, 3138), False, 'import cv2\n'), ((3158, 3215), 'cv2.addWeighted', 'cv2.addWeighted', (['img1_np', '(1 - alpha)', 'blank_rect', 'alpha', '(0)'], {}), '(img1_np, 1 - alpha, blank_rect, alpha, 0)\n', (3173, 3215), False, 'import cv2\n'), ((3325, 3361), 'os.path.join', 'os.path.join', (['savedir', 'img1_savename'], {}), '(savedir, img1_savename)\n', (3337, 3361), False, 'import os\n'), ((3374, 3410), 'cv2.imwrite', 'cv2.imwrite', (['img1_savepath', 'img1_np2'], {}), '(img1_savepath, img1_np2)\n', (3385, 3410), False, 'import cv2\n'), ((3697, 3743), 'cv2.applyColorMap', 'cv2.applyColorMap', (['attention', 'cv2.COLORMAP_JET'], {}), '(attention, cv2.COLORMAP_JET)\n', (3714, 3743), False, 'import cv2\n'), ((4046, 4131), 'cv2.rectangle', 'cv2.rectangle', (['blank_rect', '(box_left, box_top)', '(box_right, box_bottom)', 'color', '(1)'], {}), '(blank_rect, (box_left, box_top), (box_right, box_bottom),\n color, 1)\n', (4059, 4131), False, 'import cv2\n'), ((4334, 4419), 'cv2.rectangle', 'cv2.rectangle', (['blank_rect', '(box_left, box_top)', '(box_right, box_bottom)', 'color', '(1)'], {}), '(blank_rect, (box_left, box_top), (box_right, box_bottom),\n color, 1)\n', (4347, 4419), False, 'import cv2\n'), ((478, 489), 'numpy.abs', 'np.abs', (['mat'], {}), '(mat)\n', (484, 489), True, 'import numpy as np\n')]
|
# @Time : 2022/1/1
# @Author : <NAME>
# @email : <EMAIL>
import ipdb
import math
import torch
import numpy as np
import torch.nn.functional as F
from loguru import logger
from torch import nn
import os
from crslab.model.base import BaseModel
from crslab.model.utils.modules.info_nce_loss import info_nce_loss
from crslab.model.utils.functions import edge_to_pyg_format
from crslab.model.utils.modules.cross_entropy_loss import Handle_Croess_Entropy_Loss
from torch_geometric.nn import RGCNConv
from crslab.config import MODEL_PATH
from crslab.model.base import BaseModel
from crslab.model.utils.modules.info_nce_loss import info_nce_loss
from crslab.model.utils.functions import edge_to_pyg_format
from crslab.model.utils.modules.attention import SelfAttentionBatch, SelfAttentionSeq
from crslab.model.utils.modules.transformer import TransformerDecoder, TransformerEncoder
from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, \
_normalize, \
create_position_codes
NEAR_INF_FP16 = 65504
NEAR_INF = 1e20
def neginf(dtype):
"""Returns a representable finite number near -inf for a dtype."""
if dtype is torch.float16:
return -NEAR_INF_FP16
else:
return -NEAR_INF
class DBModel(nn.Module):
def __init__(self, opt, device, vocab, side_data):
super().__init__()
# vocab
self.vocab_size = vocab['vocab_size']
self.pad_token_idx = vocab['pad']
self.start_token_idx = vocab['start']
self.end_token_idx = vocab['end']
self.token_emb_dim = opt['token_emb_dim']
self.pretrained_embedding = side_data.get('embedding', None)
# kg
self.n_word = side_data['word_kg']['n_entity']
self.kg_name = opt['kg_name']
self.n_entity = side_data[self.kg_name]['n_entity']
self.pad_word_idx = vocab['pad_word']
self.pad_entity_idx = vocab['pad_entity']
entity_kg = side_data['entity_kg']
self.n_relation = entity_kg['n_relation']
entity_edges = entity_kg['edge']
self.entity_edge_idx, self.entity_edge_type = edge_to_pyg_format(entity_edges, 'RGCN')
self.entity_edge_idx = self.entity_edge_idx.to(device)
self.entity_edge_type = self.entity_edge_type.to(device)
word_edges = side_data['word_kg']['edge']
self.word_edges = edge_to_pyg_format(word_edges, 'GCN').to(device)
self.num_bases = opt['num_bases']
self.kg_emb_dim = opt['kg_emb_dim']
# transformer
self.n_heads = opt['n_heads']
self.n_layers = opt['n_layers']
self.ffn_size = opt['ffn_size']
self.dropout = opt['dropout']
self.attention_dropout = opt['attention_dropout']
self.relu_dropout = opt['relu_dropout']
self.learn_positional_embeddings = opt['learn_positional_embeddings']
self.embeddings_scale = opt['embeddings_scale']
self.reduction = opt['reduction']
self.n_positions = opt['n_positions']
self.response_truncate = opt.get('response_truncate', 20)
self._build_model()
def _build_model(self):
self._build_conversation_layer()
def _build_conversation_layer(self):
self.conv_entity_norm = nn.Linear(self.kg_emb_dim, self.ffn_size)
self.conv_entity_attn_norm = nn.Linear(self.kg_emb_dim, self.ffn_size)
def forward(self, batch, mode, kgModel):
entity_attn_rep, entity_representations = self.entity_model_kbrd(batch, mode, kgModel) # (bs, dim), (bs, n_context_entities, dim)
conv_entity_emb, conv_entity_reps = self.conv_entaity_model(entity_attn_rep, entity_representations) # (bs, ffn_size), (bs, n_context_entities, ffn_size)
return entity_attn_rep, entity_representations, conv_entity_emb, conv_entity_reps
# (bs, dim), (bs, n_context_entities, dim), (bs, ffn_size), (bs, n_context_entities, ffn_size)
def entity_model_kbrd(self, batch, mode, kgModel):
context_entities_kbrd = batch['context_entities_kbrd'] # [bs, nb_context_entities]
context_entities = batch['context_entities'] # (bs, entity_truncate)
user_rep, kg_embedding = kgModel._get_kg_user_rep(context_entities_kbrd) # (bs, dim), (n_entities, dim)
entity_representations = kg_embedding[context_entities] # (bs, entity_truncate, dim)
return user_rep, entity_representations
def conv_entaity_model(self, entity_attn_rep, entity_representations):
# encoder-decoder
conv_entity_emb = self.conv_entity_attn_norm(entity_attn_rep) # (bs, ffn_size)
conv_entity_reps = self.conv_entity_norm(entity_representations) # (bs, n_context_entities, ffn_size)
return conv_entity_emb, conv_entity_reps # (bs, ffn_size), (bs, n_context_entities, ffn_size)
class CoarseReviewModelForDecoder(nn.Module):
def __init__(self, opt, device, vocab, side_data):
super().__init__()
# vocab
self.vocab_size = vocab['vocab_size']
self.pad_token_idx = vocab['pad']
self.start_token_idx = vocab['start']
self.end_token_idx = vocab['end']
self.token_emb_dim = opt['token_emb_dim']
self.pretrained_embedding = side_data.get('embedding', None)
# kg
self.n_word = side_data['word_kg']['n_entity']
self.kg_name = opt['kg_name']
self.n_entity = side_data[self.kg_name]['n_entity']
self.pad_word_idx = vocab['pad_word']
self.pad_entity_idx = vocab['pad_entity']
entity_kg = side_data['entity_kg']
self.n_relation = entity_kg['n_relation']
entity_edges = entity_kg['edge']
self.entity_edge_idx, self.entity_edge_type = edge_to_pyg_format(entity_edges, 'RGCN')
self.entity_edge_idx = self.entity_edge_idx.to(device)
self.entity_edge_type = self.entity_edge_type.to(device)
word_edges = side_data['word_kg']['edge']
self.word_edges = edge_to_pyg_format(word_edges, 'GCN').to(device)
self.num_bases = opt['num_bases']
self.kg_emb_dim = opt['kg_emb_dim']
# transformer
self.n_heads = opt['n_heads']
self.n_layers = opt['n_layers']
self.ffn_size = opt['ffn_size']
self.dropout = opt['dropout']
self.attention_dropout = opt['attention_dropout']
self.relu_dropout = opt['relu_dropout']
self.learn_positional_embeddings = opt['learn_positional_embeddings']
self.embeddings_scale = opt['embeddings_scale']
self.reduction = opt['reduction']
self.n_positions = opt['n_positions']
self.response_truncate = opt.get('response_truncate', 20)
self._build_model()
def _build_model(self):
self._build_conv_concept_encoder()
def _build_conv_concept_encoder(self):
self.conv_review_attn_norm = nn.Linear(self.token_emb_dim, self.ffn_size)
self.conv_review_norm = nn.Linear(self.token_emb_dim, self.ffn_size)
def forward(self, batch, mode, reviewModel):
review_user_rep, review_reps, review_pad_reps, review_padding_mask, review_token_reps, review_token_padding_mask = \
self.model_review(batch, mode, reviewModel)
# (bs, dim), (~bs*nb_review, dim), (bs, n_review, dim), (bs, nb_review), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
conv_review_emb, conv_review_reps = self.conv_review_model(review_user_rep, review_pad_reps)
# (bs, ffn_size), (bs, n_review, ffn_size)
return conv_review_emb, conv_review_reps, review_padding_mask, review_token_reps, review_token_padding_mask
# (bs, ffn_size), (bs, n_review, dim), (bs, ffn_size), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
def model_review(self, batch, mode, reviewModel):
review_user_rep, review_reps, review_state = reviewModel.get_review_user_rep_and_review_rep(batch, mode)
# (bs, dim), (~bs*nb_review, dim), (~bs*nb_review, seq_len3, dim)
review_pad_reps, review_padding_mask, review_token_reps, review_token_padding_mask = reviewModel.get_review_sample_reps(
batch, mode, review_reps, review_state)
# (bs, nb_review, dim), (bs, nb_review), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
return review_user_rep, review_reps, review_pad_reps, \
review_padding_mask, review_token_reps, review_token_padding_mask
# (bs, dim), (~bs*nb_review, dim), (bs, n_review, dim),
# (bs, nb_review), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
def conv_review_model(self, review_attn_rep, review_representations):
# (bs, dim), (bs, n_review, dim), (bs, nb_review, dim), (bs, seq_len3, dim)
conv_review_emb = self.conv_review_attn_norm(review_attn_rep) # (bs, ffn_size)
conv_review_reps = self.conv_review_norm(review_representations) # (bs, n_context_words, ffn_size)
return conv_review_emb, conv_review_reps
# (bs, ffn_size), (bs, n_review, ffn_size)
class FineReviewDecoderAttention(nn.Module):
def __init__(self, n_heads, dim, dropout=.0):
super(FineReviewDecoderAttention, self).__init__()
self.n_heads = n_heads
self.dim = dim
self.dim_per_head = self.dim // self.n_heads
self.attn_dropout = nn.Dropout(p=dropout) # --attention-dropout
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
# TODO: merge for the initialization step
nn.init.xavier_normal_(self.q_lin.weight)
nn.init.xavier_normal_(self.k_lin.weight)
nn.init.xavier_normal_(self.v_lin.weight)
# and set biases to 0
self.out_lin = nn.Linear(dim, dim)
nn.init.xavier_normal_(self.out_lin.weight)
self.fine_review_level_self_atten = SelfAttentionSeq(self.dim_per_head, self.dim_per_head)
def forward(self, query, key=None, value=None, mask=None, mask2=None):
# query: (bs, query_len, ffn_size)
# key/value: (bs, nb_review, key_len, ffn_size)
# mask: (bs, nb_review)
# mask2: (bs, nb_review, key_len)
query, key, value = self.set_q_k_v(query, key, value)
bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review = self.set_hyper_parameters(query, key, mask, mask2)
q, k, v = self.prepare_heads(query, key, value, bs, n_heads, dim_per_head)
# q: (bs*n_heads, query_len, dim_per_head)
# k/v: (bs*n_heads, nb_review, key_len, dim_per_head)
out = self.compute_func(q, k, v, query, mask, mask2, bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review) # (bs, query_len, dim)
return out
def set_q_k_v(self, query, key, value):
return query, key, value
def set_hyper_parameters(self, query, key, mask, mask2):
bs, query_len, dim = query.size()
assert dim == self.dim, \
f'Dimensions do not match: {dim} query vs {self.dim} configured'
assert mask is not None, 'Mask is None, please specify a mask'
assert mask2 is not None, 'Mask is None, please specify a mask'
n_heads = self.n_heads
dim_per_head = dim // n_heads
scale = math.sqrt(dim_per_head)
_, nb_review, key_len, dim = key.size()
return bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review
def prepare_heads(self, query, key, value, bs, n_heads, dim_per_head):
# query: (bs, query_len, ffn_size)
# key/value: (bs, nb_review, key_len, ffn_size)
q = self.prepare_head_q(self.q_lin(query), bs, n_heads, dim_per_head) # (bs*n_heads, query_len, dim_per_head)
k = self.prepare_head_kv(self.k_lin(key), bs, n_heads, dim_per_head) # (bs*n_heads, nb_review, key_len, dim_per_head)
v = self.prepare_head_kv(self.v_lin(value), bs, n_heads, dim_per_head) # (bs*n_heads, nb_review, key_len, dim_per_head)
return q, k, v
def prepare_head_q(self, tensor, bs, n_heads, dim_per_head):
# input is (bs, query_len, ffn_size)
# output is (bs*n_heads, query_len, dim_per_head)
bs, seq_len, _ = tensor.size()
tensor = tensor.view(bs, tensor.size(1), n_heads, dim_per_head)
tensor = tensor.transpose(1, 2).contiguous().view(
bs*n_heads,
seq_len,
dim_per_head
)
return tensor
def prepare_head_kv(self, tensor, bs, n_heads, dim_per_head):
# input is (bs, nb_review, key_len, ffn_size)
# output is (bs*n_heads, nb_review, key_len, dim_per_head)
bs, nb_review, seq_len, _ = tensor.size()
tensor = tensor.view(bs, nb_review, seq_len, n_heads, dim_per_head)
tensor = tensor.transpose(1, 3).transpose(2, 3).contiguous().view(
bs*n_heads,
nb_review,
seq_len,
dim_per_head
)
return tensor
def compute_func(self, q, k, v, query, mask, mask2, bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review):
# q: (bs*n_heads, query_len, dim_per_head)
# k/v: (bs*n_heads, nb_review, key_len, dim_per_head)
# mask: (bs, nb_review)
# mask2: (bs, nb_review, key_len)
attentioned = self.token_level_atten(q, k, v, query, mask, mask2, bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review)
# (bs*n_heads*nb_review, query_len, dim_per_head)
attentioned = self.review_level_atten(attentioned, mask, bs, n_heads, nb_review, query_len, dim_per_head) # (bs*n_heads, query_len, dim_per_head)
attentioned = (
attentioned.type_as(query)
.view(bs, n_heads, query_len, dim_per_head)
.transpose(1, 2).contiguous()
.view(bs, query_len, dim)
)
# (bs, query_len, dim)
out = self.out_lin(attentioned) # (bs, query_len, dim)
return out # (bs, query_len, dim)
def get_attn_mask(self, mask, bs, key_len, n_heads, query_len):
# Mask is [bs, key_len] (selfattn) or [bs, key_len, key_len] (enc attn)
attn_mask = (
(mask == 0)
.view(bs, 1, -1, key_len)
.repeat(1, n_heads, 1, 1)
.expand(bs, n_heads, query_len, key_len)
.view(bs*n_heads, query_len, key_len)
)
return attn_mask # (bs*n_heads, query_len, key_len)
def token_level_atten(self, q, k, v, query, mask, mask2, bs, query_len, n_heads, dim_per_head, scale, key_len, dim, nb_review):
# q: (bs*n_heads, query_len, dim_per_head)
# k/v: (bs*n_heads, nb_review, key_len, dim_per_head)
# query: (bs, seq_len2, ffn_size)
# mask: (bs, nb_review)
# mask2: (bs, nb_review, key_len)
q = (q.unsqueeze(1)
.expand(bs*n_heads, nb_review, query_len, dim_per_head)
.reshape(bs*n_heads*nb_review, query_len, dim_per_head))
k = k.view(bs*n_heads*nb_review, key_len, dim_per_head)
dot_prod = q.div_(scale).bmm(k.transpose(-2, -1)) # (bs*n_heads*nb_review, query_len, key_len)
attn_mask = self.get_token_level_attn_mask(mask2, bs, key_len, n_heads, query_len, nb_review)
# (bs*n_heads*nb_review, query_len, key_len)
assert attn_mask.shape == dot_prod.shape
dot_prod.masked_fill_(attn_mask, neginf(dot_prod.dtype)) # (bs*n_heads*nb_review, query_len, key_len)
attn_weights = F.softmax(dot_prod, dim=-1).type_as(query) # (bs*n_heads*nb_review, query_len, key_len)
attn_weights = self.attn_dropout(attn_weights) # --attention-dropout
v = v.view(bs*n_heads*nb_review, key_len, dim_per_head)
attentioned = attn_weights.bmm(v) # (bs*n_heads*nb_review, query_len, dim_per_head)
return attentioned # (bs*n_heads*nb_review, query_len, dim_per_head)
def review_level_atten(self, attentioned, mask, bs, n_heads, nb_review, query_len, dim_per_head):
# self-attention or (bs, nb_review, dim) as query
# attentioned: (bs*n_heads*nb_review, query_len, dim_per_head)
# mask: (bs, nb_review) :the padding posistion should be 1
attentioned = (attentioned
.view(bs*n_heads, nb_review, query_len, dim_per_head)
.transpose(1, 2).contiguous()
.view(bs*n_heads*query_len, nb_review, dim_per_head)
)
# (bs*n_heads*query_len, nb_review, dim_per_head)
mask = (mask
.unsqueeze(1).unsqueeze(1)
.expand(bs, n_heads, query_len, nb_review).contiguous()
.view(bs*n_heads*query_len, nb_review)
)
assert attentioned.shape[:2] == mask.shape[:2]
attentioned = self.fine_review_level_self_atten(attentioned, mask) # (bs*n_heads*query_len, dim_per_head)
attentioned = attentioned.view(bs*n_heads, query_len, dim_per_head)
return attentioned # (bs*n_heads, query_len, dim_per_head)
def get_token_level_attn_mask(self, mask2, bs, key_len, n_heads, query_len, nb_review):
# mask2: (bs, nb_review, key_len)
attn_mask = (
(mask2 == 0)
.view(bs, 1, nb_review, 1, key_len)
.repeat(1, n_heads, 1, 1, 1)
.expand(bs, n_heads, nb_review, query_len, key_len)
.view(bs*n_heads*nb_review, query_len, key_len)
)
return attn_mask # (bs*n_heads*nb_review, query_len, key_len)
class TransformerDecoderLayerCoarse(nn.Module):
def __init__(
self,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
dropout=0.0,
):
super().__init__()
self.dim = embedding_size
self.ffn_dim = ffn_size
self.dropout = nn.Dropout(p=dropout)
self.self_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm1 = nn.LayerNorm(embedding_size)
self.encoder_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2 = nn.LayerNorm(embedding_size)
self.encoder_db_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2_db = nn.LayerNorm(embedding_size)
self.encoder_review_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2_review = nn.LayerNorm(embedding_size)
self.fine_encoder_review_attention = FineReviewDecoderAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2_review2 = nn.LayerNorm(embedding_size)
self.ffn = TransformerFFN(embedding_size, ffn_size, relu_dropout=relu_dropout)
self.norm3 = nn.LayerNorm(embedding_size)
def forward(self,
inputs,
encoder_output, encoder_mask,
conv_entity_reps, entity_padding_mask,
conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask):
'''
input: (bs, seq_len2, dim)
encoder_output, encoder_mask: (bs, seq_len, dim), (bs, seq_len)
conv_entity_reps, entity_padding_mask: (bs, n_context_entities, ffn_size), (bs, entity_len)
conv_review_reps, review_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review)
review_token_reps, review_token_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review, seq_len3)
'''
inputs = self._decoder_self_attention(inputs)
inputs = self._db_decode_cross_attention(inputs, conv_entity_reps, entity_padding_mask)
inputs = self._coarse_review_decode_cross_attention(inputs, conv_review_reps, review_padding_mask)
inputs = self._fine_review_decode_cross_attention(inputs, review_token_reps, review_padding_mask, review_token_padding_mask)
# inputs = self._review_decode_cross_attention3(inputs, review_token_decode_atten_rep, review_token_padding_mask)
inputs = self._context_decode_cross_attention(inputs, encoder_output, encoder_mask)
inputs = self._ffn(inputs)
return inputs # (bs, seq_len2, dim)
def _decoder_self_attention(self, x):
decoder_mask = _create_selfattn_mask(x)
# first self attn
residual = x
# don't peak into the future!
x = self.self_attention(query=x, mask=decoder_mask)
x = self.dropout(x) # --dropout
x = x + residual
x = _normalize(x, self.norm1)
return x # (bs, seq_len2, dim)
def _db_decode_cross_attention(self, x, conv_entity_reps, entity_padding_mask):
# x: (bs, seq_len2, dim)
# conv_entity_reps, entity_padding_mask: (bs, n_context_entities, ffn_size), (bs, entity_len)
residual = x
x = self.encoder_db_attention(
query=x,
key=conv_entity_reps,
value=conv_entity_reps,
mask=entity_padding_mask
)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm2_db)
return x # (bs, seq_len2, dim)
def _coarse_review_decode_cross_attention(self, x, conv_review_reps, review_padding_mask):
# x: (bs, seq_len2, dim)
# conv_review_reps, review_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review)
residual = x
x = self.encoder_review_attention(
query=x,
key=conv_review_reps,
value=conv_review_reps,
mask=review_padding_mask
)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm2_review)
return x # (bs, seq_len2, dim)
def _fine_review_decode_cross_attention(self, x, review_token_reps, review_padding_mask, review_token_padding_mask):
# x: (bs, seq_len2, dim)
# (bs, nb_review, seq_len3, ffn_size), (bs, nb_review), (bs, nb_review, seq_len3)
residual = x
x = self.fine_encoder_review_attention(
query=x,
key=review_token_reps,
value=review_token_reps,
mask=review_padding_mask,
mask2=review_token_padding_mask,
)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm2_review2)
return x # (bs, seq_len2, dim)
def _context_decode_cross_attention(self, x, encoder_output, encoder_mask):
residual = x
x = self.encoder_attention(
query=x,
key=encoder_output,
value=encoder_output,
mask=encoder_mask
)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm2)
return x # (bs, seq_len2, dim)
def _ffn(self, x):
# finally the ffn
residual = x
x = self.ffn(x)
x = self.dropout(x) # --dropout
x = residual + x
x = _normalize(x, self.norm3)
return x # (bs, seq_len2, dim)
class TransformerDecoderLayerSelection(TransformerDecoderLayerCoarse):
def __init__(
self,
opt,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
dropout=0.0,
):
self.opt = opt
super().__init__(
n_heads,
embedding_size,
ffn_size,
attention_dropout,
relu_dropout,
dropout)
def forward(self,
inputs,
encoder_output, encoder_mask,
conv_entity_reps, entity_padding_mask,
conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask):
'''
input: (bs, seq_len2, dim)
encoder_output, encoder_mask: (bs, seq_len, dim), (bs, seq_len)
conv_entity_reps, entity_padding_mask: (bs, n_context_entities, ffn_size), (bs, entity_len)
conv_review_reps, review_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review)
review_token_reps, review_token_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review, seq_len3)
'''
inputs = self.forward_d_c_db_r_f(
inputs,
encoder_output, encoder_mask,
conv_entity_reps, entity_padding_mask,
conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask)
return inputs # (bs, seq_len2, dim)
def forward_d_c_db_r_f(self,
inputs,
encoder_output, encoder_mask,
conv_entity_reps, entity_padding_mask,
conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask):
logger.debug('[forward_d_c_db_r_f]')
inputs = self._decoder_self_attention(inputs)
inputs = self._context_decode_cross_attention(inputs, encoder_output, encoder_mask)
inputs = self._db_decode_cross_attention(inputs, conv_entity_reps, entity_padding_mask)
inputs = self._coarse_review_decode_cross_attention(inputs, conv_review_reps, review_padding_mask)
inputs = self._ffn(inputs)
return inputs
class TransformerDecoderKGCoarse(nn.Module):
def __init__(
self,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding,
dropout=0.0,
attention_dropout=0.0,
relu_dropout=0.0,
embeddings_scale=True,
learn_positional_embeddings=False,
padding_idx=None,
n_positions=1024,
):
super().__init__()
self.embedding_size = embedding_size
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = embedding_size
self.embeddings_scale = embeddings_scale
self.dropout = nn.Dropout(dropout) # --dropout
self.out_dim = embedding_size
assert embedding_size % n_heads == 0, \
'Transformer embedding size must be a multiple of n_heads'
self.embeddings = embedding
self.position_embeddings = self._init_postision_embeddings(n_positions, embedding_size, learn_positional_embeddings)
self.layers = self._build_layers(n_heads, embedding_size, ffn_size, attention_dropout, relu_dropout, dropout)
def _init_postision_embeddings(self, n_positions, embedding_size, learn_positional_embeddings):
# create the positional embeddings
position_embeddings = nn.Embedding(n_positions, embedding_size)
if not learn_positional_embeddings:
create_position_codes(
n_positions, embedding_size, out=position_embeddings.weight
)
else:
nn.init.normal_(position_embeddings.weight, 0, embedding_size ** -0.5)
return position_embeddings
def _build_layers(self, n_heads, embedding_size, ffn_size, attention_dropout, relu_dropout, dropout):
layers = nn.ModuleList()
for _ in range(self.n_layers):
layers.append(TransformerDecoderLayerCoarse(
n_heads, embedding_size, ffn_size,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
dropout=dropout,
))
return layers
def forward(self,
inputs,
encoder_output, encoder_mask,
conv_entity_reps, entity_padding_mask,
conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask,
incr_state=None):
'''
input: (bs, seq_len2, dim)
encoder_output, encoder_mask: (bs, seq_len, dim), (bs, seq_len)
conv_entity_reps, entity_padding_mask: (bs, n_context_entities, ffn_size), (bs, entity_len)
conv_review_reps, review_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review)
review_token_reps, review_token_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review, seq_len3)
'''
inputs = self.embed_input(inputs) # (bs, seq_len2, dim)
for layer in self.layers:
inputs = layer(
inputs,
encoder_output, encoder_mask,
conv_entity_reps, entity_padding_mask,
conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask) # (bs, seq_len2, dim)
return inputs, None # (bs, seq_len, embed_dim)
def embed_input(self, input):
tensor = self.embeddings(input) # (bs, seq_len, embed_dim)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
positions_embedding = self.get_postition_embeddings(input, tensor)
tensor = tensor + positions_embedding
tensor = self.dropout(tensor) # --dropout
return tensor
def get_postition_embeddings(self, input, tensor):
seq_len = input.size(1)
positions = input.new(seq_len).long() # (seq_len)
positions = torch.arange(seq_len, out=positions).unsqueeze(0) # (1, seq_len)
positions_embedding = self.position_embeddings(positions).expand_as(tensor)
return positions_embedding
class TransformerDecoderKGSelection(TransformerDecoderKGCoarse):
def __init__(
self,
opt,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding,
dropout=0.0,
attention_dropout=0.0,
relu_dropout=0.0,
embeddings_scale=True,
learn_positional_embeddings=False,
padding_idx=None,
n_positions=1024,
):
self.opt = opt
super().__init__(
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding,
dropout,
attention_dropout,
relu_dropout,
embeddings_scale,
learn_positional_embeddings,
padding_idx,
n_positions)
def _build_layers(self, n_heads, embedding_size, ffn_size, attention_dropout, relu_dropout, dropout):
layers = nn.ModuleList()
for _ in range(self.n_layers):
layers.append(TransformerDecoderLayerSelection(
self.opt,
n_heads, embedding_size, ffn_size,
attention_dropout=attention_dropout,
relu_dropout=relu_dropout,
dropout=dropout,
))
return layers
class DecoderCNSelectionModel(nn.Module):
def __init__(self, opt, device, vocab, side_data, decoder_token_embedding):
super().__init__()
self.opt, self.device, self.vocab, self.side_data = opt, device, vocab, side_data
# vocab
self.vocab_size = vocab['vocab_size']
self.pad_token_idx = vocab['pad']
self.start_token_idx = vocab['start']
self.end_token_idx = vocab['end']
self.token_emb_dim = opt['token_emb_dim']
self.pretrained_embedding = side_data.get('embedding', None)
# kg
self.n_word = side_data['word_kg']['n_entity']
self.kg_name = opt['kg_name']
self.n_entity = side_data[self.kg_name]['n_entity']
self.pad_word_idx = vocab['pad_word']
self.pad_entity_idx = vocab['pad_entity']
entity_kg = side_data['entity_kg']
self.n_relation = entity_kg['n_relation']
entity_edges = entity_kg['edge']
self.entity_edge_idx, self.entity_edge_type = edge_to_pyg_format(entity_edges, 'RGCN')
self.entity_edge_idx = self.entity_edge_idx.to(device)
self.entity_edge_type = self.entity_edge_type.to(device)
word_edges = side_data['word_kg']['edge']
self.word_edges = edge_to_pyg_format(word_edges, 'GCN').to(device)
self.num_bases = opt['num_bases']
self.kg_emb_dim = opt['kg_emb_dim']
# transformer
self.n_heads = opt['n_heads']
self.n_layers = opt['n_layers']
self.ffn_size = opt['ffn_size']
self.dropout = opt['dropout']
self.attention_dropout = opt['attention_dropout']
self.relu_dropout = opt['relu_dropout']
self.learn_positional_embeddings = opt['learn_positional_embeddings']
self.embeddings_scale = opt['embeddings_scale']
self.reduction = opt['reduction']
self.n_positions = opt['n_positions']
self.response_truncate = opt.get('response_truncate', 20)
self.decoder_token_embedding = decoder_token_embedding
self.decoder_token_prob_weight = side_data.get('decoder_token_prob_weight', None)
if self.decoder_token_prob_weight is not None:
self.decoder_token_prob_weight = self.decoder_token_prob_weight.to(self.device)
self.is_weight_logits = opt.get('is_weight_logits', False)
self.is_coarse_weight_loss = opt.get('is_coarse_weight_loss', False)
# assert not(self.is_weight_logits and self.is_coarse_weight_loss)
self._build_model()
def _build_model(self):
self.register_buffer('START', torch.tensor([self.start_token_idx], dtype=torch.long))
self.conv_decoder = TransformerDecoderKGSelection(
self.opt,
self.n_heads, self.n_layers, self.token_emb_dim, self.ffn_size, self.vocab_size,
embedding=self.decoder_token_embedding,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
relu_dropout=self.relu_dropout,
embeddings_scale=self.embeddings_scale,
learn_positional_embeddings=self.learn_positional_embeddings,
padding_idx=self.pad_token_idx,
n_positions=self.n_positions
)
self.conv_loss = self.build_loss_func()
self._build_copy_network()
def build_loss_func(self):
if self.is_coarse_weight_loss:
conv_loss = Handle_Croess_Entropy_Loss(ignore_index=self.pad_token_idx, weight=self.decoder_token_prob_weight.squeeze())
# conv_loss = coarse_weight_loss(ignore_index=self.pad_token_idx)
else:
conv_loss = nn.CrossEntropyLoss(ignore_index=self.pad_token_idx)
return conv_loss
def _build_copy_network(self):
n_copy_source = 3 if '3' in self.opt['logit_type'] else 2
self.copy_norm = nn.Linear(self.ffn_size * n_copy_source, self.token_emb_dim)
self.copy_output = nn.Linear(self.token_emb_dim, self.vocab_size)
self.fusion_latent_norm = nn.Linear(self.ffn_size * n_copy_source, self.token_emb_dim)
def forward(self,
mode,
encoder_output, encoder_mask,
conv_entity_emb, conv_entity_reps, entity_padding_mask,
conv_review_emb, conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask,
response):
'''
encoder_output, encoder_mask: (bs, seq_len, dim), (bs, seq_len)
conv_entity_reps, entity_padding_mask: (bs, n_context_entities, ffn_size), (bs, entity_len)
conv_review_reps, review_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review)
review_token_reps, review_token_padding_mask: (bs, nb_review, ffn_size), (bs, nb_review, seq_len3)
response: (bs, seq_len)
'''
mode2decode_func = {
'train': self._decode_forced_with_kg,
'val': self._decode_forced_with_kg,
'test': self._decode_greedy_with_kg
}
decode_func = mode2decode_func[mode]
logits, preds, loss = decode_func(
encoder_output, encoder_mask,
conv_entity_emb, conv_entity_reps, entity_padding_mask,
conv_review_emb, conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask,
response)
return loss, preds
def _starts(self, bs):
"""Return bs start tokens."""
return self.START.detach().expand(bs, 1)
def _decode_forced_with_kg(
self,
encoder_output, encoder_mask,
conv_entity_emb, conv_entity_reps, entity_padding_mask,
conv_review_emb, conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask,
response):
batch_size, seq_len = response.shape
start = self._starts(batch_size)
inputs = torch.cat((start, response[:, :-1]), dim=-1).long() # (bs, seq_len)
# inputs = response[:, :] # (bs, seq_len)
dialog_latent, _ = self.conv_decoder(
inputs,
encoder_output, encoder_mask,
conv_entity_reps, entity_padding_mask,
conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask)
# (bs, seq_len, dim)
gen_logits, preds, loss = self._force_process_dialog_latent(conv_entity_emb, conv_review_emb, dialog_latent, response, conv_review_reps, review_padding_mask)
return gen_logits, preds, loss
def _force_process_dialog_latent(self, entity_latent, review_latent, dialog_latent, response, conv_review_reps, review_padding_mask):
# (bs, dim), (bs, dim), (bs, seq_len1, dim), (bs, seq_len), (bs, nb_review, ffn_size), (bs, nb_review)
batch_size, seq_len = response.shape
entity_latent = entity_latent.unsqueeze(1).expand(-1, seq_len, -1) # (bs, seq_len, ffn_size)
review_latent = review_latent.unsqueeze(1).expand(-1, seq_len, -1) # (bs, seq_len, ffn_size)
logits = self._get_logits(entity_latent, review_latent, dialog_latent, conv_review_reps, review_padding_mask) # (bs, seq_len, vocab_size)
preds = logits.argmax(dim=-1)
loss = self._force_get_gen_loss(logits, response)
return logits, preds, loss
def _force_get_gen_loss(self, logits, response):
# (bs, seq_len, vocab_size), (bs, seq_len)
logits = logits.view(-1, logits.shape[-1]) # (bs*seq_len, nb_tok)
response = response.view(-1) # (bs*seq_len)
# n = 2
# loss = self.conv_loss(logits[:n], response[:n])
# logger.info(f'{logits[:n]}')
# logger.info(f'{response[:n]}')
# logger.info(f'{loss}')
# ipdb.set_trace()
loss = self.conv_loss(logits, response)
return loss
def _decode_greedy_with_kg(
self,
encoder_output, encoder_mask,
conv_entity_emb, conv_entity_reps, entity_padding_mask,
conv_review_emb, conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask,
response):
bs = encoder_output.shape[0]
inputs = self._starts(bs).long() # (bs, 1)
incr_state = None
logits = []
for _ in range(self.response_truncate):
dialog_latent, incr_state = self.conv_decoder(
inputs,
encoder_output, encoder_mask,
conv_entity_reps, entity_padding_mask,
conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask,
incr_state)
# (bs, seq_len, dim), None
cur_time_logits, preds = self._greedy_process_dialog_latent(conv_entity_emb, conv_review_emb, dialog_latent, conv_review_reps, review_padding_mask)
logits.append(cur_time_logits)
inputs = torch.cat((inputs, preds), dim=1) # (bs, gen_response_len)
finished = ((inputs == self.end_token_idx).sum(dim=-1) > 0).sum().item() == bs
if finished:
break
logits = torch.cat(logits, dim=1) # (bs, response_truncate, nb_tok)
loss = None
return logits, inputs, loss # (bs, response_truncate, nb_tok),
def _greedy_process_dialog_latent(self, entity_latent, review_latent, dialog_latent, conv_review_reps, review_padding_mask):
# (bs, dim), (bs, dim), (bs, seq_len1, dim), (bs, seq_len)
entity_latent = entity_latent.unsqueeze(1) # (bs, 1, dim)
review_latent = review_latent.unsqueeze(1) # (bs, 1, dim)
dialog_latent = dialog_latent[:, -1:, :] # (bs, 1, dim)
logits = self._get_logits(entity_latent, review_latent, dialog_latent, conv_review_reps, review_padding_mask) # (bs, 1, nb_tok)
preds = logits.argmax(dim=-1).long() # (bs, 1)
return logits, preds
def _get_logits(self, entity_latent, review_latent, dialog_latent, conv_review_reps, review_padding_mask):
# (bs, seq_len, dim) * 3
logits = self._get_logits_hs_copy2(entity_latent, review_latent, dialog_latent, conv_review_reps, review_padding_mask) # (bs, seq_len, nb_tok)
logits = self.weight_logits(logits) # (bs, seq_len, nb_tok)
return logits # (bs, seq_len, nb_tok)
def weight_logits(self, logits):
# (bs, seq_len, nb_tok)
if self.is_weight_logits and self.decoder_token_prob_weight is not None:
logits = logits * self.decoder_token_prob_weight
return logits
def _get_logits_hs_copy2(self, entity_latent, review_latent, dialog_latent, conv_review_reps, review_padding_mask):
# (bs, seq_len, dim) * 3, (bs, nb_review, ffn_size)
fusion_latent = self.get_fusion_latent2(dialog_latent, conv_review_reps, review_padding_mask) # (bs, seq_len, ffn_size)
gen_logits = F.linear(fusion_latent, self.decoder_token_embedding.weight) # (bs, seq_len, nb_tok)
sum_logits = gen_logits # (bs, seq_len, nb_tok)
return sum_logits # (bs, seq_len, nb_tok)
def get_fusion_latent2(self, dialog_latent, conv_review_reps, review_padding_mask):
# (bs, seq_len, ffn_size), (bs, nb_review, ffn_size), (bs, nb_review)
bs, seq_len, _ = dialog_latent.shape
bs, nb_review, _ = conv_review_reps.shape
# dialog_latent = dialog_latent.transpose(0, 1).contiguous() # (seq_len, bs, ffn_size)
# dialog_latent = dialog_latent.unsqueeze(2).expand(-1, -1, nb_review, -1) # (seq_len, bs, nb_review, ffn_size)
# dialog_latent = dialog_latent.view(-1, self.ffn_size) # (seq_len*bs*nb_review, ffn_size)
# conv_review_reps = conv_review_reps.view(-1, self.ffn_size) # (bs*nb_review, ffn_size)
# conv_review_reps = conv_review_reps.expand(seq_len*bs*nb_review, -1) # (seq_len*bs*nb_review, ffn_size)
# dot_prod = dialog_latent * conv_review_reps # (seq_len*bs*nb_review)
# dot_prod = dot_prod.view(bs, seq_len, nb_review) # (seq_len, bs, nb_review)
# weight = F.softmax(atten, dim=-1).type_as(atten) # (seq_len, bs, nb_review)
dot_prod = dialog_latent.bmm(conv_review_reps.transpose(1, 2)) # (bs, seq_len, nb_review)
# dot_prod = dot_prod.transpose(0, 1).contiguous() # (seq_len, bs, nb_review)
attn_mask = review_padding_mask.unsqueeze(1).expand(-1, seq_len, -1) # (bs, seq_len, nb_review)
dot_prod.masked_fill_(~attn_mask.bool(), neginf(dot_prod.dtype)) # (bs, seq_len, nb_review)
weight = F.softmax(dot_prod, dim=-1).type_as(conv_review_reps) # (bs, seq_len, nb_review)
decode_atten_review_reps = weight.bmm(conv_review_reps) # (bs, seq_len, ffn_size)
decode_atten_review_reps = decode_atten_review_reps.view(bs, seq_len, self.ffn_size) # (bs, seq_len, ffn_size)
fusion_latent = torch.cat([dialog_latent, decode_atten_review_reps], dim=-1) # (bs, seq_len, ffn_size*2)
fusion_latent = self.fusion_latent_norm(fusion_latent) # (bs, seq_len, ffn_size)
return fusion_latent # (bs, seq_len, ffn_size)
class CFSelectionConvModel(nn.Module):
def __init__(self, opt, device, vocab, side_data, decoder_token_embedding):
"""
Args:
opt (dict): A dictionary record the hyper parameters.
device (torch.device): A variable indicating which device to place the data and model.
vocab (dict): A dictionary record the vocabulary information.
side_data (dict): A dictionary record the side data.
"""
super().__init__()
self.opt, self.device, self.vocab, self.side_data, self.decoder_token_embedding = opt, device, vocab, side_data, decoder_token_embedding
# vocab
self.vocab_size = vocab['vocab_size']
self.pad_token_idx = vocab['pad']
self.start_token_idx = vocab['start']
self.end_token_idx = vocab['end']
self.token_emb_dim = opt['token_emb_dim']
self.pretrained_embedding = side_data.get('embedding', None)
# kg
self.n_word = side_data['word_kg']['n_entity']
self.kg_name = opt['kg_name']
self.n_entity = side_data[self.kg_name]['n_entity']
self.pad_word_idx = vocab['pad_word']
self.pad_entity_idx = vocab['pad_entity']
entity_kg = side_data['entity_kg']
self.n_relation = entity_kg['n_relation']
entity_edges = entity_kg['edge']
self.entity_edge_idx, self.entity_edge_type = edge_to_pyg_format(entity_edges, 'RGCN')
self.entity_edge_idx = self.entity_edge_idx.to(device)
self.entity_edge_type = self.entity_edge_type.to(device)
word_edges = side_data['word_kg']['edge']
self.word_edges = edge_to_pyg_format(word_edges, 'GCN').to(device)
self.num_bases = opt['num_bases']
self.kg_emb_dim = opt['kg_emb_dim']
# transformer
self.n_heads = opt['n_heads']
self.n_layers = opt['n_layers']
self.ffn_size = opt['ffn_size']
self.dropout = opt['dropout']
self.attention_dropout = opt['attention_dropout']
self.relu_dropout = opt['relu_dropout']
self.learn_positional_embeddings = opt['learn_positional_embeddings']
self.embeddings_scale = opt['embeddings_scale']
self.reduction = opt['reduction']
self.n_positions = opt['n_positions']
self.response_truncate = opt.get('response_truncate', 20)
self.build_model()
def build_model(self):
self.db_model = DBModel(self.opt, self.device, self.vocab, self.side_data)
self.review_model = CoarseReviewModelForDecoder(self.opt, self.device, self.vocab, self.side_data)
self.decoder_model = DecoderCNSelectionModel(self.opt, self.device, self.vocab, self.side_data, self.decoder_token_embedding)
def model_context(self, conv_encoder, context_tokens):
encoder_output, encoder_mask = conv_encoder(context_tokens) # (last_hidden_state, mask) = (bs, seq_len, dim), (bs, seq_len)
return encoder_output, encoder_mask # (last_hidden_state, mask) = (bs, seq_len, dim), (bs, seq_len)
def forward(self, batch, mode, conv_encoder, kgModel, reviewModel):
# converse
context_tokens, context_entities, response = \
batch['context_tokens'], batch['context_entities'], batch['response']
entity_padding_mask = ~context_entities.eq(self.pad_entity_idx) # (bs, entity_len)
encoder_output, encoder_mask = self.model_context(conv_encoder, context_tokens) # (bs, seq_len, dim), (bs, seq_len)
# (bs, dim), (bs, n_context_entities, dim), (bs, ffn_size), (bs, n_context_entities, ffn_size)
entity_attn_rep, entity_representations, conv_entity_emb, conv_entity_reps = self.db_model(batch, mode, kgModel)
# (bs, ffn_size), (bs, n_review, ffn_size), (bs, nb_review), (bs, n_review, seq_len3, dim), (bs, n_review, seq_len3)
conv_review_emb, conv_review_reps, review_padding_mask, review_token_reps, review_token_padding_mask = self.review_model(
batch, mode, reviewModel)
loss, preds = self.decoder_model(
mode,
encoder_output, encoder_mask,
conv_entity_emb, conv_entity_reps, entity_padding_mask,
conv_review_emb, conv_review_reps, review_padding_mask,
review_token_reps, review_token_padding_mask,
response)
return loss, preds
|
[
"torch.nn.Dropout",
"numpy.sqrt",
"torch.nn.CrossEntropyLoss",
"math.sqrt",
"torch.nn.init.xavier_normal_",
"crslab.model.utils.modules.transformer.TransformerFFN",
"crslab.model.utils.modules.transformer.create_position_codes",
"torch.nn.functional.softmax",
"torch.arange",
"torch.nn.functional.linear",
"torch.nn.ModuleList",
"torch.nn.LayerNorm",
"crslab.model.utils.modules.transformer._create_selfattn_mask",
"crslab.model.utils.modules.transformer.MultiHeadAttention",
"crslab.model.utils.functions.edge_to_pyg_format",
"torch.nn.Embedding",
"crslab.model.utils.modules.attention.SelfAttentionSeq",
"torch.cat",
"torch.nn.init.normal_",
"loguru.logger.debug",
"torch.tensor",
"torch.nn.Linear",
"crslab.model.utils.modules.transformer._normalize"
] |
[((2147, 2187), 'crslab.model.utils.functions.edge_to_pyg_format', 'edge_to_pyg_format', (['entity_edges', '"""RGCN"""'], {}), "(entity_edges, 'RGCN')\n", (2165, 2187), False, 'from crslab.model.utils.functions import edge_to_pyg_format\n'), ((3280, 3321), 'torch.nn.Linear', 'nn.Linear', (['self.kg_emb_dim', 'self.ffn_size'], {}), '(self.kg_emb_dim, self.ffn_size)\n', (3289, 3321), False, 'from torch import nn\n'), ((3359, 3400), 'torch.nn.Linear', 'nn.Linear', (['self.kg_emb_dim', 'self.ffn_size'], {}), '(self.kg_emb_dim, self.ffn_size)\n', (3368, 3400), False, 'from torch import nn\n'), ((5720, 5760), 'crslab.model.utils.functions.edge_to_pyg_format', 'edge_to_pyg_format', (['entity_edges', '"""RGCN"""'], {}), "(entity_edges, 'RGCN')\n", (5738, 5760), False, 'from crslab.model.utils.functions import edge_to_pyg_format\n'), ((6862, 6906), 'torch.nn.Linear', 'nn.Linear', (['self.token_emb_dim', 'self.ffn_size'], {}), '(self.token_emb_dim, self.ffn_size)\n', (6871, 6906), False, 'from torch import nn\n'), ((6939, 6983), 'torch.nn.Linear', 'nn.Linear', (['self.token_emb_dim', 'self.ffn_size'], {}), '(self.token_emb_dim, self.ffn_size)\n', (6948, 6983), False, 'from torch import nn\n'), ((9317, 9338), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (9327, 9338), False, 'from torch import nn\n'), ((9383, 9402), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (9392, 9402), False, 'from torch import nn\n'), ((9424, 9443), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (9433, 9443), False, 'from torch import nn\n'), ((9465, 9484), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (9474, 9484), False, 'from torch import nn\n'), ((9543, 9584), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.q_lin.weight'], {}), '(self.q_lin.weight)\n', (9565, 9584), False, 'from torch import nn\n'), ((9593, 9634), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.k_lin.weight'], {}), '(self.k_lin.weight)\n', (9615, 9634), False, 'from torch import nn\n'), ((9643, 9684), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.v_lin.weight'], {}), '(self.v_lin.weight)\n', (9665, 9684), False, 'from torch import nn\n'), ((9738, 9757), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (9747, 9757), False, 'from torch import nn\n'), ((9766, 9809), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.out_lin.weight'], {}), '(self.out_lin.weight)\n', (9788, 9809), False, 'from torch import nn\n'), ((9855, 9909), 'crslab.model.utils.modules.attention.SelfAttentionSeq', 'SelfAttentionSeq', (['self.dim_per_head', 'self.dim_per_head'], {}), '(self.dim_per_head, self.dim_per_head)\n', (9871, 9909), False, 'from crslab.model.utils.modules.attention import SelfAttentionBatch, SelfAttentionSeq\n'), ((11242, 11265), 'math.sqrt', 'math.sqrt', (['dim_per_head'], {}), '(dim_per_head)\n', (11251, 11265), False, 'import math\n'), ((17829, 17850), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (17839, 17850), False, 'from torch import nn\n'), ((17882, 17952), 'crslab.model.utils.modules.transformer.MultiHeadAttention', 'MultiHeadAttention', (['n_heads', 'embedding_size'], {'dropout': 'attention_dropout'}), '(n_heads, embedding_size, dropout=attention_dropout)\n', (17900, 17952), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((17996, 18024), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {}), '(embedding_size)\n', (18008, 18024), False, 'from torch import nn\n'), ((18059, 18129), 'crslab.model.utils.modules.transformer.MultiHeadAttention', 'MultiHeadAttention', (['n_heads', 'embedding_size'], {'dropout': 'attention_dropout'}), '(n_heads, embedding_size, dropout=attention_dropout)\n', (18077, 18129), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((18173, 18201), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {}), '(embedding_size)\n', (18185, 18201), False, 'from torch import nn\n'), ((18239, 18309), 'crslab.model.utils.modules.transformer.MultiHeadAttention', 'MultiHeadAttention', (['n_heads', 'embedding_size'], {'dropout': 'attention_dropout'}), '(n_heads, embedding_size, dropout=attention_dropout)\n', (18257, 18309), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((18356, 18384), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {}), '(embedding_size)\n', (18368, 18384), False, 'from torch import nn\n'), ((18426, 18496), 'crslab.model.utils.modules.transformer.MultiHeadAttention', 'MultiHeadAttention', (['n_heads', 'embedding_size'], {'dropout': 'attention_dropout'}), '(n_heads, embedding_size, dropout=attention_dropout)\n', (18444, 18496), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((18547, 18575), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {}), '(embedding_size)\n', (18559, 18575), False, 'from torch import nn\n'), ((18752, 18780), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {}), '(embedding_size)\n', (18764, 18780), False, 'from torch import nn\n'), ((18801, 18868), 'crslab.model.utils.modules.transformer.TransformerFFN', 'TransformerFFN', (['embedding_size', 'ffn_size'], {'relu_dropout': 'relu_dropout'}), '(embedding_size, ffn_size, relu_dropout=relu_dropout)\n', (18815, 18868), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((18890, 18918), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['embedding_size'], {}), '(embedding_size)\n', (18902, 18918), False, 'from torch import nn\n'), ((20374, 20398), 'crslab.model.utils.modules.transformer._create_selfattn_mask', '_create_selfattn_mask', (['x'], {}), '(x)\n', (20395, 20398), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((20622, 20647), 'crslab.model.utils.modules.transformer._normalize', '_normalize', (['x', 'self.norm1'], {}), '(x, self.norm1)\n', (20632, 20647), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((21192, 21220), 'crslab.model.utils.modules.transformer._normalize', '_normalize', (['x', 'self.norm2_db'], {}), '(x, self.norm2_db)\n', (21202, 21220), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((21762, 21794), 'crslab.model.utils.modules.transformer._normalize', '_normalize', (['x', 'self.norm2_review'], {}), '(x, self.norm2_review)\n', (21772, 21794), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((22413, 22446), 'crslab.model.utils.modules.transformer._normalize', '_normalize', (['x', 'self.norm2_review2'], {}), '(x, self.norm2_review2)\n', (22423, 22446), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((22838, 22863), 'crslab.model.utils.modules.transformer._normalize', '_normalize', (['x', 'self.norm2'], {}), '(x, self.norm2)\n', (22848, 22863), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((23077, 23102), 'crslab.model.utils.modules.transformer._normalize', '_normalize', (['x', 'self.norm3'], {}), '(x, self.norm3)\n', (23087, 23102), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((24940, 24976), 'loguru.logger.debug', 'logger.debug', (['"""[forward_d_c_db_r_f]"""'], {}), "('[forward_d_c_db_r_f]')\n", (24952, 24976), False, 'from loguru import logger\n'), ((26131, 26150), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (26141, 26150), False, 'from torch import nn\n'), ((26779, 26820), 'torch.nn.Embedding', 'nn.Embedding', (['n_positions', 'embedding_size'], {}), '(n_positions, embedding_size)\n', (26791, 26820), False, 'from torch import nn\n'), ((27263, 27278), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (27276, 27278), False, 'from torch import nn\n'), ((30555, 30570), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (30568, 30570), False, 'from torch import nn\n'), ((31916, 31956), 'crslab.model.utils.functions.edge_to_pyg_format', 'edge_to_pyg_format', (['entity_edges', '"""RGCN"""'], {}), "(entity_edges, 'RGCN')\n", (31934, 31956), False, 'from crslab.model.utils.functions import edge_to_pyg_format\n'), ((34752, 34812), 'torch.nn.Linear', 'nn.Linear', (['(self.ffn_size * n_copy_source)', 'self.token_emb_dim'], {}), '(self.ffn_size * n_copy_source, self.token_emb_dim)\n', (34761, 34812), False, 'from torch import nn\n'), ((34840, 34886), 'torch.nn.Linear', 'nn.Linear', (['self.token_emb_dim', 'self.vocab_size'], {}), '(self.token_emb_dim, self.vocab_size)\n', (34849, 34886), False, 'from torch import nn\n'), ((34922, 34982), 'torch.nn.Linear', 'nn.Linear', (['(self.ffn_size * n_copy_source)', 'self.token_emb_dim'], {}), '(self.ffn_size * n_copy_source, self.token_emb_dim)\n', (34931, 34982), False, 'from torch import nn\n'), ((40066, 40090), 'torch.cat', 'torch.cat', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (40075, 40090), False, 'import torch\n'), ((41831, 41891), 'torch.nn.functional.linear', 'F.linear', (['fusion_latent', 'self.decoder_token_embedding.weight'], {}), '(fusion_latent, self.decoder_token_embedding.weight)\n', (41839, 41891), True, 'import torch.nn.functional as F\n'), ((43792, 43852), 'torch.cat', 'torch.cat', (['[dialog_latent, decode_atten_review_reps]'], {'dim': '(-1)'}), '([dialog_latent, decode_atten_review_reps], dim=-1)\n', (43801, 43852), False, 'import torch\n'), ((45425, 45465), 'crslab.model.utils.functions.edge_to_pyg_format', 'edge_to_pyg_format', (['entity_edges', '"""RGCN"""'], {}), "(entity_edges, 'RGCN')\n", (45443, 45465), False, 'from crslab.model.utils.functions import edge_to_pyg_format\n'), ((26877, 26964), 'crslab.model.utils.modules.transformer.create_position_codes', 'create_position_codes', (['n_positions', 'embedding_size'], {'out': 'position_embeddings.weight'}), '(n_positions, embedding_size, out=position_embeddings.\n weight)\n', (26898, 26964), False, 'from crslab.model.utils.modules.transformer import MultiHeadAttention, TransformerFFN, _create_selfattn_mask, _normalize, create_position_codes\n'), ((27016, 27086), 'torch.nn.init.normal_', 'nn.init.normal_', (['position_embeddings.weight', '(0)', '(embedding_size ** -0.5)'], {}), '(position_embeddings.weight, 0, embedding_size ** -0.5)\n', (27031, 27086), False, 'from torch import nn\n'), ((33493, 33547), 'torch.tensor', 'torch.tensor', (['[self.start_token_idx]'], {'dtype': 'torch.long'}), '([self.start_token_idx], dtype=torch.long)\n', (33505, 33547), False, 'import torch\n'), ((34538, 34590), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'self.pad_token_idx'}), '(ignore_index=self.pad_token_idx)\n', (34557, 34590), False, 'from torch import nn\n'), ((39850, 39883), 'torch.cat', 'torch.cat', (['(inputs, preds)'], {'dim': '(1)'}), '((inputs, preds), dim=1)\n', (39859, 39883), False, 'import torch\n'), ((2392, 2429), 'crslab.model.utils.functions.edge_to_pyg_format', 'edge_to_pyg_format', (['word_edges', '"""GCN"""'], {}), "(word_edges, 'GCN')\n", (2410, 2429), False, 'from crslab.model.utils.functions import edge_to_pyg_format\n'), ((5965, 6002), 'crslab.model.utils.functions.edge_to_pyg_format', 'edge_to_pyg_format', (['word_edges', '"""GCN"""'], {}), "(word_edges, 'GCN')\n", (5983, 6002), False, 'from crslab.model.utils.functions import edge_to_pyg_format\n'), ((15485, 15512), 'torch.nn.functional.softmax', 'F.softmax', (['dot_prod'], {'dim': '(-1)'}), '(dot_prod, dim=-1)\n', (15494, 15512), True, 'import torch.nn.functional as F\n'), ((28941, 28958), 'numpy.sqrt', 'np.sqrt', (['self.dim'], {}), '(self.dim)\n', (28948, 28958), True, 'import numpy as np\n'), ((29343, 29379), 'torch.arange', 'torch.arange', (['seq_len'], {'out': 'positions'}), '(seq_len, out=positions)\n', (29355, 29379), False, 'import torch\n'), ((32161, 32198), 'crslab.model.utils.functions.edge_to_pyg_format', 'edge_to_pyg_format', (['word_edges', '"""GCN"""'], {}), "(word_edges, 'GCN')\n", (32179, 32198), False, 'from crslab.model.utils.functions import edge_to_pyg_format\n'), ((36827, 36871), 'torch.cat', 'torch.cat', (['(start, response[:, :-1])'], {'dim': '(-1)'}), '((start, response[:, :-1]), dim=-1)\n', (36836, 36871), False, 'import torch\n'), ((43476, 43503), 'torch.nn.functional.softmax', 'F.softmax', (['dot_prod'], {'dim': '(-1)'}), '(dot_prod, dim=-1)\n', (43485, 43503), True, 'import torch.nn.functional as F\n'), ((45670, 45707), 'crslab.model.utils.functions.edge_to_pyg_format', 'edge_to_pyg_format', (['word_edges', '"""GCN"""'], {}), "(word_edges, 'GCN')\n", (45688, 45707), False, 'from crslab.model.utils.functions import edge_to_pyg_format\n')]
|
"""
Implementation of a standard financial plot visualization using Chaco
renderers and scales. This differs from the financial_plot.py example
in that it uses a date-oriented axis.
"""
# Major library imports
from numpy import abs, cumprod, linspace, random
import time
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayDataSource, BarPlot, DataRange1D, \
LinearMapper, VPlotContainer, PlotAxis, \
FilledLinePlot, add_default_grids, PlotLabel
from chaco.tools.api import PanTool, ZoomTool
from chaco.scales.api import CalendarScaleSystem
from chaco.scales_tick_generator import ScalesTickGenerator
def create_dates(numpoints, units="days"):
""" Returns **numpoints** number of dates that evenly bracket the current
date and time. **units** should be one of "weeks", "days", "hours"
"minutes", or "seconds".
"""
units_map = {
"weeks": 7 * 24 * 3600,
"days": 24 * 3600,
"hours": 3600,
"minutes": 60,
"seconds": 1
}
now = time.time()
dt = units_map[units]
dates = linspace(now, now + numpoints * dt, numpoints)
return dates
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create the data and datasource objects
# In order for the date axis to work, the index data points need to
# be in units of seconds since the epoch. This is because we are using
# the CalendarScaleSystem, whose formatters interpret the numerical values
# as seconds since the epoch.
numpoints = 500
index = create_dates(numpoints)
returns = random.lognormal(0.01, 0.1, size=numpoints)
price = 100.0 * cumprod(returns)
volume = abs(random.normal(1000.0, 1500.0, size=numpoints) + 2000.0)
time_ds = ArrayDataSource(index)
vol_ds = ArrayDataSource(volume, sort_order="none")
price_ds = ArrayDataSource(price, sort_order="none")
xmapper = LinearMapper(range=DataRange1D(time_ds))
vol_mapper = LinearMapper(range=DataRange1D(vol_ds))
price_mapper = LinearMapper(range=DataRange1D(price_ds))
price_plot = FilledLinePlot(
index=time_ds,
value=price_ds,
index_mapper=xmapper,
value_mapper=price_mapper,
edge_color="blue",
face_color="paleturquoise",
bgcolor="white",
border_visible=True)
price_plot.overlays.append(PlotAxis(price_plot, orientation='left')),
# Set the plot's bottom axis to use the Scales ticking system
bottom_axis = PlotAxis(
price_plot,
orientation="bottom", # mapper=xmapper,
tick_generator=ScalesTickGenerator(scale=CalendarScaleSystem()))
price_plot.overlays.append(bottom_axis)
hgrid, vgrid = add_default_grids(price_plot)
vgrid.tick_generator = bottom_axis.tick_generator
price_plot.tools.append(
PanTool(
price_plot, constrain=True, constrain_direction="x"))
price_plot.overlays.append(
ZoomTool(
price_plot,
drag_button="right",
always_on=True,
tool_mode="range",
axis="index",
max_zoom_out_factor=10.0, ))
vol_plot = BarPlot(
index=time_ds,
value=vol_ds,
index_mapper=xmapper,
value_mapper=vol_mapper,
line_color="transparent",
fill_color="black",
bar_width=1.0,
bar_width_type="screen",
antialias=False,
height=100,
resizable="h",
bgcolor="white",
border_visible=True)
hgrid, vgrid = add_default_grids(vol_plot)
# Use the same tick generator as the x-axis on the price plot
vgrid.tick_generator = bottom_axis.tick_generator
vol_plot.underlays.append(PlotAxis(vol_plot, orientation='left'))
vol_plot.tools.append(
PanTool(
vol_plot, constrain=True, constrain_direction="x"))
container = VPlotContainer(
bgcolor="lightblue", spacing=40, padding=50, fill_padding=False)
container.add(vol_plot)
container.add(price_plot)
container.overlays.append(
PlotLabel(
"Financial Plot with Date Axis",
component=container,
#font="Times New Roman 24"))
font="Arial 24"))
return container
#===============================================================================
# Attributes to use for the plot view.
size = (800, 600)
title = "Financial plot example"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item(
'plot', editor=ComponentEditor(size=size), show_label=False),
orientation="vertical"),
resizable=True,
title=title,
width=size[0],
height=size[1])
def _plot_default(self):
return _create_plot_component()
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
#--EOF---
|
[
"chaco.api.FilledLinePlot",
"traits.api.Instance",
"numpy.random.normal",
"chaco.tools.api.PanTool",
"chaco.tools.api.ZoomTool",
"numpy.cumprod",
"chaco.scales.api.CalendarScaleSystem",
"chaco.api.add_default_grids",
"chaco.api.BarPlot",
"chaco.api.DataRange1D",
"chaco.api.ArrayDataSource",
"numpy.linspace",
"enable.api.ComponentEditor",
"chaco.api.PlotAxis",
"chaco.api.PlotLabel",
"chaco.api.VPlotContainer",
"time.time",
"numpy.random.lognormal"
] |
[((1169, 1180), 'time.time', 'time.time', ([], {}), '()\n', (1178, 1180), False, 'import time\n'), ((1219, 1265), 'numpy.linspace', 'linspace', (['now', '(now + numpoints * dt)', 'numpoints'], {}), '(now, now + numpoints * dt, numpoints)\n', (1227, 1265), False, 'from numpy import abs, cumprod, linspace, random\n'), ((1881, 1924), 'numpy.random.lognormal', 'random.lognormal', (['(0.01)', '(0.1)'], {'size': 'numpoints'}), '(0.01, 0.1, size=numpoints)\n', (1897, 1924), False, 'from numpy import abs, cumprod, linspace, random\n'), ((2050, 2072), 'chaco.api.ArrayDataSource', 'ArrayDataSource', (['index'], {}), '(index)\n', (2065, 2072), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((2086, 2128), 'chaco.api.ArrayDataSource', 'ArrayDataSource', (['volume'], {'sort_order': '"""none"""'}), "(volume, sort_order='none')\n", (2101, 2128), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((2144, 2185), 'chaco.api.ArrayDataSource', 'ArrayDataSource', (['price'], {'sort_order': '"""none"""'}), "(price, sort_order='none')\n", (2159, 2185), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((2378, 2566), 'chaco.api.FilledLinePlot', 'FilledLinePlot', ([], {'index': 'time_ds', 'value': 'price_ds', 'index_mapper': 'xmapper', 'value_mapper': 'price_mapper', 'edge_color': '"""blue"""', 'face_color': '"""paleturquoise"""', 'bgcolor': '"""white"""', 'border_visible': '(True)'}), "(index=time_ds, value=price_ds, index_mapper=xmapper,\n value_mapper=price_mapper, edge_color='blue', face_color=\n 'paleturquoise', bgcolor='white', border_visible=True)\n", (2392, 2566), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((2997, 3026), 'chaco.api.add_default_grids', 'add_default_grids', (['price_plot'], {}), '(price_plot)\n', (3014, 3026), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((3443, 3707), 'chaco.api.BarPlot', 'BarPlot', ([], {'index': 'time_ds', 'value': 'vol_ds', 'index_mapper': 'xmapper', 'value_mapper': 'vol_mapper', 'line_color': '"""transparent"""', 'fill_color': '"""black"""', 'bar_width': '(1.0)', 'bar_width_type': '"""screen"""', 'antialias': '(False)', 'height': '(100)', 'resizable': '"""h"""', 'bgcolor': '"""white"""', 'border_visible': '(True)'}), "(index=time_ds, value=vol_ds, index_mapper=xmapper, value_mapper=\n vol_mapper, line_color='transparent', fill_color='black', bar_width=1.0,\n bar_width_type='screen', antialias=False, height=100, resizable='h',\n bgcolor='white', border_visible=True)\n", (3450, 3707), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((3820, 3847), 'chaco.api.add_default_grids', 'add_default_grids', (['vol_plot'], {}), '(vol_plot)\n', (3837, 3847), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((4163, 4242), 'chaco.api.VPlotContainer', 'VPlotContainer', ([], {'bgcolor': '"""lightblue"""', 'spacing': '(40)', 'padding': '(50)', 'fill_padding': '(False)'}), "(bgcolor='lightblue', spacing=40, padding=50, fill_padding=False)\n", (4177, 4242), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((4958, 4977), 'traits.api.Instance', 'Instance', (['Component'], {}), '(Component)\n', (4966, 4977), False, 'from traits.api import HasTraits, Instance\n'), ((1945, 1961), 'numpy.cumprod', 'cumprod', (['returns'], {}), '(returns)\n', (1952, 1961), False, 'from numpy import abs, cumprod, linspace, random\n'), ((3119, 3179), 'chaco.tools.api.PanTool', 'PanTool', (['price_plot'], {'constrain': '(True)', 'constrain_direction': '"""x"""'}), "(price_plot, constrain=True, constrain_direction='x')\n", (3126, 3179), False, 'from chaco.tools.api import PanTool, ZoomTool\n'), ((3234, 3354), 'chaco.tools.api.ZoomTool', 'ZoomTool', (['price_plot'], {'drag_button': '"""right"""', 'always_on': '(True)', 'tool_mode': '"""range"""', 'axis': '"""index"""', 'max_zoom_out_factor': '(10.0)'}), "(price_plot, drag_button='right', always_on=True, tool_mode='range',\n axis='index', max_zoom_out_factor=10.0)\n", (3242, 3354), False, 'from chaco.tools.api import PanTool, ZoomTool\n'), ((3998, 4036), 'chaco.api.PlotAxis', 'PlotAxis', (['vol_plot'], {'orientation': '"""left"""'}), "(vol_plot, orientation='left')\n", (4006, 4036), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((4073, 4131), 'chaco.tools.api.PanTool', 'PanTool', (['vol_plot'], {'constrain': '(True)', 'constrain_direction': '"""x"""'}), "(vol_plot, constrain=True, constrain_direction='x')\n", (4080, 4131), False, 'from chaco.tools.api import PanTool, ZoomTool\n'), ((4349, 4434), 'chaco.api.PlotLabel', 'PlotLabel', (['"""Financial Plot with Date Axis"""'], {'component': 'container', 'font': '"""Arial 24"""'}), "('Financial Plot with Date Axis', component=container, font='Arial 24'\n )\n", (4358, 4434), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((1979, 2024), 'numpy.random.normal', 'random.normal', (['(1000.0)', '(1500.0)'], {'size': 'numpoints'}), '(1000.0, 1500.0, size=numpoints)\n', (1992, 2024), False, 'from numpy import abs, cumprod, linspace, random\n'), ((2220, 2240), 'chaco.api.DataRange1D', 'DataRange1D', (['time_ds'], {}), '(time_ds)\n', (2231, 2240), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((2278, 2297), 'chaco.api.DataRange1D', 'DataRange1D', (['vol_ds'], {}), '(vol_ds)\n', (2289, 2297), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((2337, 2358), 'chaco.api.DataRange1D', 'DataRange1D', (['price_ds'], {}), '(price_ds)\n', (2348, 2358), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((2654, 2694), 'chaco.api.PlotAxis', 'PlotAxis', (['price_plot'], {'orientation': '"""left"""'}), "(price_plot, orientation='left')\n", (2662, 2694), False, 'from chaco.api import ArrayDataSource, BarPlot, DataRange1D, LinearMapper, VPlotContainer, PlotAxis, FilledLinePlot, add_default_grids, PlotLabel\n'), ((2910, 2931), 'chaco.scales.api.CalendarScaleSystem', 'CalendarScaleSystem', ([], {}), '()\n', (2929, 2931), False, 'from chaco.scales.api import CalendarScaleSystem\n'), ((5067, 5093), 'enable.api.ComponentEditor', 'ComponentEditor', ([], {'size': 'size'}), '(size=size)\n', (5082, 5093), False, 'from enable.api import Component, ComponentEditor\n')]
|
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lale.lib.autoai_libs import float32_transform
from lale.lib.lale import Hyperopt, ConcatFeatures, NoOp
from lale.lib.sklearn import LogisticRegression as LR
import autoai_libs.utils.fc_methods
import lale.lib.autoai_libs
import numpy as np
import sklearn.datasets
import sklearn.model_selection
import unittest
class TestAutoaiLibs(unittest.TestCase):
@classmethod
def setUpClass(cls):
iris = sklearn.datasets.load_iris()
iris_X, iris_y = iris.data, iris.target
iris_train_X, iris_test_X, iris_train_y, iris_test_y = \
sklearn.model_selection.train_test_split(iris_X, iris_y)
cls._iris = {'train_X': iris_train_X, 'train_y': iris_train_y,
'test_X': iris_test_X, 'test_y': iris_test_y}
def doTest(self, trainable, train_X, train_y, test_X, test_y):
trained = trainable.fit(train_X, train_y)
transformed = trained.transform(test_X)
with self.assertWarns(DeprecationWarning):
trainable.transform(train_X)
trainable.to_json()
trainable_pipeline = trainable >> float32_transform() >> LR()
trained_pipeline = trainable_pipeline.fit(train_X, train_y)
trained_pipeline.predict(test_X)
hyperopt = Hyperopt(estimator=trainable_pipeline, max_evals=1)
trained_hyperopt = hyperopt.fit(train_X, train_y)
trained_hyperopt.predict(test_X)
def do1DTest(self, trainable, train_X, train_y, test_X, test_y):
#Test for 1-D array as input to the transformers
train_X = train_X[:,0]
test_X = test_X[:,0]
trainable_pipeline = (trainable & NoOp()) >> ConcatFeatures() >> float32_transform() >> LR()
trained_pipeline = trainable_pipeline.fit(train_X, train_y)
trained_pipeline.predict(test_X)
hyperopt = Hyperopt(estimator=trainable_pipeline, max_evals=1)
trained_hyperopt = hyperopt.fit(train_X, train_y)
trained_hyperopt.predict(test_X)
def test_NumpyColumnSelector(self):
trainable = lale.lib.autoai_libs.NumpyColumnSelector()
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_CompressStrings(self):
n_columns = self._iris['train_X'].shape[1]
trainable = lale.lib.autoai_libs.CompressStrings(
dtypes_list=['int_num' for i in range(n_columns)],
misslist_list=[[] for i in range(n_columns)])
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_NumpyReplaceMissingValues(self):
trainable = lale.lib.autoai_libs.NumpyReplaceMissingValues()
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_NumpyReplaceUnknownValues(self):
trainable = lale.lib.autoai_libs.NumpyReplaceUnknownValues(
filling_values=42.0)
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_boolean2float(self):
trainable = lale.lib.autoai_libs.boolean2float()
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_CatImputer(self):
trainable = lale.lib.autoai_libs.CatImputer()
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_CatEncoder(self):
trainable = lale.lib.autoai_libs.CatEncoder()
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_float32_transform(self):
trainable = lale.lib.autoai_libs.float32_transform()
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_FloatStr2Float(self):
n_columns = self._iris['train_X'].shape[1]
trainable = lale.lib.autoai_libs.FloatStr2Float(
dtypes_list=['int_num' for i in range(n_columns)])
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_OptStandardScaler(self):
trainable = lale.lib.autoai_libs.OptStandardScaler()
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_NumImputer(self):
trainable = lale.lib.autoai_libs.NumImputer()
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_NumpyPermuteArray(self):
trainable = lale.lib.autoai_libs.NumpyPermuteArray(
axis=0, permutation_indices=[2,0,1,3])
self.doTest(trainable, **self._iris)
self.do1DTest(trainable, **self._iris)
def test_TA1(self):
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype('float32')
trainable = lale.lib.autoai_libs.TA1(
fun=np.rint, name='round',
datatypes=['numeric'], feat_constraints=[is_not_categorical],
col_names=['a', 'b', 'c', 'd'],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TA2(self):
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype('float32')
trainable = lale.lib.autoai_libs.TA2(
fun=np.add, name='sum',
datatypes1=['numeric'], feat_constraints1=[is_not_categorical],
datatypes2=['numeric'], feat_constraints2=[is_not_categorical],
col_names=['a', 'b', 'c', 'd'],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TB1(self):
from sklearn.preprocessing import StandardScaler
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype('float32')
trainable = lale.lib.autoai_libs.TB1(
tans_class=StandardScaler, name='stdscaler',
datatypes=['numeric'], feat_constraints=[is_not_categorical],
col_names=['a', 'b', 'c', 'd'],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TB2(self):
pass #TODO: not sure how to instantiate, what to pass for tans_class
def test_TAM(self):
from autoai_libs.cognito.transforms.transform_extras import IsolationForestAnomaly
float32 = np.dtype('float32')
trainable = lale.lib.autoai_libs.TAM(
tans_class=IsolationForestAnomaly, name='isoforestanomaly',
col_names=['a', 'b', 'c', 'd'],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_TGen(self):
from autoai_libs.cognito.transforms.transform_extras import NXOR
from autoai_libs.utils.fc_methods import is_not_categorical
float32 = np.dtype('float32')
trainable = lale.lib.autoai_libs.TGen(
fun=NXOR, name='nxor', arg_count=2,
datatypes_list=[['numeric'], ['numeric']],
feat_constraints_list=[[is_not_categorical], [is_not_categorical]],
col_names=['a', 'b', 'c', 'd'],
col_dtypes=[float32, float32, float32, float32],
)
self.doTest(trainable, **self._iris)
def test_FS1(self):
trainable = lale.lib.autoai_libs.FS1(
cols_ids_must_keep=[1],
additional_col_count_to_keep=3,
ptype='classification',
)
self.doTest(trainable, **self._iris)
def test_FS2(self):
from sklearn.ensemble import ExtraTreesClassifier
trainable = lale.lib.autoai_libs.FS2(
cols_ids_must_keep=[1],
additional_col_count_to_keep=3,
ptype='classification',
eval_algo=ExtraTreesClassifier,
)
self.doTest(trainable, **self._iris)
|
[
"numpy.dtype",
"lale.lib.lale.Hyperopt",
"lale.lib.sklearn.LogisticRegression",
"lale.lib.autoai_libs.float32_transform",
"lale.lib.lale.NoOp",
"lale.lib.lale.ConcatFeatures"
] |
[((1824, 1875), 'lale.lib.lale.Hyperopt', 'Hyperopt', ([], {'estimator': 'trainable_pipeline', 'max_evals': '(1)'}), '(estimator=trainable_pipeline, max_evals=1)\n', (1832, 1875), False, 'from lale.lib.lale import Hyperopt, ConcatFeatures, NoOp\n'), ((2391, 2442), 'lale.lib.lale.Hyperopt', 'Hyperopt', ([], {'estimator': 'trainable_pipeline', 'max_evals': '(1)'}), '(estimator=trainable_pipeline, max_evals=1)\n', (2399, 2442), False, 'from lale.lib.lale import Hyperopt, ConcatFeatures, NoOp\n'), ((5299, 5318), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (5307, 5318), True, 'import numpy as np\n'), ((5749, 5768), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (5757, 5768), True, 'import numpy as np\n'), ((6331, 6350), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (6339, 6350), True, 'import numpy as np\n'), ((6924, 6943), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (6932, 6943), True, 'import numpy as np\n'), ((7407, 7426), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (7415, 7426), True, 'import numpy as np\n'), ((1691, 1695), 'lale.lib.sklearn.LogisticRegression', 'LR', ([], {}), '()\n', (1693, 1695), True, 'from lale.lib.sklearn import LogisticRegression as LR\n'), ((2258, 2262), 'lale.lib.sklearn.LogisticRegression', 'LR', ([], {}), '()\n', (2260, 2262), True, 'from lale.lib.sklearn import LogisticRegression as LR\n'), ((1668, 1687), 'lale.lib.autoai_libs.float32_transform', 'float32_transform', ([], {}), '()\n', (1685, 1687), False, 'from lale.lib.autoai_libs import float32_transform\n'), ((2235, 2254), 'lale.lib.autoai_libs.float32_transform', 'float32_transform', ([], {}), '()\n', (2252, 2254), False, 'from lale.lib.autoai_libs import float32_transform\n'), ((2215, 2231), 'lale.lib.lale.ConcatFeatures', 'ConcatFeatures', ([], {}), '()\n', (2229, 2231), False, 'from lale.lib.lale import Hyperopt, ConcatFeatures, NoOp\n'), ((2204, 2210), 'lale.lib.lale.NoOp', 'NoOp', ([], {}), '()\n', (2208, 2210), False, 'from lale.lib.lale import Hyperopt, ConcatFeatures, NoOp\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import models.func as fc
# from models.func import node_deleting
import models.test as ts
# from models.test import test_part
import copy
import torch
import operator
import test
from torch import nn
from numpy import linalg as LA
import logging
import torch.nn.functional as F
import numpy as np
logger = logging.getLogger("main_fed")
logger.setLevel(level=logging.DEBUG)
def FedAvg(w, idxs_users):
# models = list(w.values())
w_avg = w[idxs_users[0]]
for k in w_avg.keys():
for i in range(1, len(idxs_users)):
w_avg[k] += w[idxs_users[i]][k]
w_avg[k] = torch.div(w_avg[k], len(idxs_users))
return w_avg
def average(grad_all):
value_list = list(grad_all.values())
w_avg = copy.deepcopy(value_list[0])
# print(type(w_avg))
for i in range(1, len(value_list)):
w_avg += value_list[i]
return w_avg / len(value_list)
def Feddel(net_glob, w_locals, gradient, idxs_users, max_now, dataset_test, test_sampler, args, test_count):
full_user = copy.copy(idxs_users)
# nr_th = len(idxs_users) * 0.7
gradient.pop('avg_grad')
expect_list = {}
labeled = []
while len(w_locals) > 8:
expect_list = fc.node_deleting(expect_list, max_now, idxs_users, gradient)
# print(len(w_locals), expect_list)
key = max(expect_list.items(), key=operator.itemgetter(1))[0]
if expect_list[key] <= expect_list["all"]:
# w_glob = FedAvg(w_locals, idxs_users)
w_locals, idxs_users
break
else:
labeled.append(key)
test_count[key][1] += 1
expect_list.pop("all")
# print(key)
loss_all, loss_pop, idxs_users= ts.test_part(net_glob, w_locals, idxs_users, key, dataset_test, test_sampler, args)
# print(loss_all, loss_pop)
if loss_all < loss_pop:
w_locals, idxs_users.append(key)
break
else:
# idxs_users.remove(key)
w_locals.pop(key)
gradient.pop(key)
max_now = expect_list[key]
expect_list.pop(key)
# print(idxs_users, len(w_locals), expect_list.keys())
# print(loss_all, loss_pop, worker_ind)
return w_locals, full_user, idxs_users, labeled, test_count
def Fedbn2(w, gradient):
g_norm = {}
for idx in list(gradient.keys()):
g_norm[idx] = LA.norm(gradient[idx])
avg_iid, avg_niid = get_avg(g_norm)
n_items = {k: w[k] for k in list(w)[:10]}
# logger.info('left %s', sorted(list(n_items.keys())))
w_agg = FedAvg(w, list(n_items.keys()))
return w_agg, avg_iid, avg_niid
def Diff(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
def get_avg(g_norm):
key = list(sorted(g_norm.keys()))
iid = []
for i in range(len(key)):
if key[i] < 25:
iid.append(key[i])
niid = Diff(key, iid)
# print(iid, niid)
avg = g_norm[iid[0]]
for i in range(len(iid)):
# print('iid', iid[i])
avg += g_norm[iid[i]]
avg_1 = g_norm[niid[00]]
for i in range(len(niid)):
# print('niid', niid[i])
avg_1 += g_norm[niid[i]]
return avg/len(iid), avg_1/len(niid)
# below function is for synthetic dataset
def Feddel_syn(net_glob, w_locals, gradient, idxs_users, max_now, dataset_test, args, test_count):
full_user = copy.copy(idxs_users)
# nr_th = len(idxs_users) * 0.7
gradient.pop('avg_grad')
expect_list = {}
labeled = []
while len(w_locals) > 8:
expect_list = fc.node_deleting(expect_list, max_now, idxs_users, gradient)
# print(len(w_locals), expect_list)
key = max(expect_list.items(), key=operator.itemgetter(1))[0]
if expect_list[key] <= expect_list["all"]:
# w_glob = FedAvg(w_locals, idxs_users)
w_locals, idxs_users
break
else:
labeled.append(key)
test_count[key][1] += 1
expect_list.pop("all")
loss_all, loss_pop, idxs_users= test_part(net_glob, w_locals, idxs_users, key, dataset_test, args)
if loss_all < loss_pop:
w_locals, idxs_users.append(key)
break
else:
w_locals.pop(key)
gradient.pop(key)
max_now = expect_list[key]
expect_list.pop(key)
return w_locals, full_user, idxs_users, labeled, test_count
def test_part(net_glob, w_locals, idxs_users, key, dataset_test_part, args):
net_all = FedAvg(w_locals, idxs_users)
# loss_all = 1
net_glob.load_state_dict(net_all)
acc, loss_all = test(net_glob, dataset_test_part, args)
idxs_users.remove(key)
net_part = FedAvg(w_locals, idxs_users)
net_glob.load_state_dict(net_part)
acc, loss_part = test(net_glob, dataset_test_part, args)
# print(loss_all)
return loss_all, loss_part, idxs_users
def test(net_g, test_data, args):
net_g.eval()
# testing
test_loss = 0
correct = 0
for i in range(int(args.num_users*args.ratio)):
data = test_data['user_data'][i]
for X, y in batch_data(data, args.local_bs):
log_probs = net_g(X)
# sum up batch loss
test_loss += F.cross_entropy(log_probs, y.long(), reduction='sum').item()
y_pred = log_probs.data.max(1, keepdim=True)[1]
correct += y_pred.eq(y.long().data.view_as(y_pred)).long().cpu().sum()
test_loss /= len(test_data['user_data'][0]['y'])*int(args.num_users*args.ratio)
accuracy = 100 * correct / (len(test_data['user_data'][0]['y'])*int(args.num_users*args.ratio))
return accuracy, test_loss
def batch_data(data, batch_size):
'''
data is a dict := {'x': [numpy array], 'y': [numpy array]} (on one client)
returns x, y, which are both numpy array of length: batch_size
'''
data_x = data['x']
data_y = data['y']
# randomly shuffle data
np.random.seed(100)
rng_state = np.random.get_state()
np.random.shuffle(data_x)
np.random.set_state(rng_state)
np.random.shuffle(data_y)
# loop through mini-batches
for i in range(0, len(data_x), batch_size):
batched_x = data_x[i:i+batch_size]
batched_y = data_y[i:i+batch_size]
X = torch.FloatTensor(batched_x)
y = torch.FloatTensor(batched_y)
yield (X, y)
|
[
"logging.getLogger",
"numpy.random.get_state",
"numpy.random.set_state",
"numpy.linalg.norm",
"test",
"numpy.random.seed",
"copy.deepcopy",
"operator.itemgetter",
"copy.copy",
"models.test.test_part",
"models.func.node_deleting",
"torch.FloatTensor",
"numpy.random.shuffle"
] |
[((374, 403), 'logging.getLogger', 'logging.getLogger', (['"""main_fed"""'], {}), "('main_fed')\n", (391, 403), False, 'import logging\n'), ((812, 840), 'copy.deepcopy', 'copy.deepcopy', (['value_list[0]'], {}), '(value_list[0])\n', (825, 840), False, 'import copy\n'), ((1104, 1125), 'copy.copy', 'copy.copy', (['idxs_users'], {}), '(idxs_users)\n', (1113, 1125), False, 'import copy\n'), ((3573, 3594), 'copy.copy', 'copy.copy', (['idxs_users'], {}), '(idxs_users)\n', (3582, 3594), False, 'import copy\n'), ((4842, 4881), 'test', 'test', (['net_glob', 'dataset_test_part', 'args'], {}), '(net_glob, dataset_test_part, args)\n', (4846, 4881), False, 'import test\n'), ((5013, 5052), 'test', 'test', (['net_glob', 'dataset_test_part', 'args'], {}), '(net_glob, dataset_test_part, args)\n', (5017, 5052), False, 'import test\n'), ((6175, 6194), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (6189, 6194), True, 'import numpy as np\n'), ((6211, 6232), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (6230, 6232), True, 'import numpy as np\n'), ((6237, 6262), 'numpy.random.shuffle', 'np.random.shuffle', (['data_x'], {}), '(data_x)\n', (6254, 6262), True, 'import numpy as np\n'), ((6267, 6297), 'numpy.random.set_state', 'np.random.set_state', (['rng_state'], {}), '(rng_state)\n', (6286, 6297), True, 'import numpy as np\n'), ((6302, 6327), 'numpy.random.shuffle', 'np.random.shuffle', (['data_y'], {}), '(data_y)\n', (6319, 6327), True, 'import numpy as np\n'), ((1280, 1340), 'models.func.node_deleting', 'fc.node_deleting', (['expect_list', 'max_now', 'idxs_users', 'gradient'], {}), '(expect_list, max_now, idxs_users, gradient)\n', (1296, 1340), True, 'import models.func as fc\n'), ((2551, 2573), 'numpy.linalg.norm', 'LA.norm', (['gradient[idx]'], {}), '(gradient[idx])\n', (2558, 2573), True, 'from numpy import linalg as LA\n'), ((3749, 3809), 'models.func.node_deleting', 'fc.node_deleting', (['expect_list', 'max_now', 'idxs_users', 'gradient'], {}), '(expect_list, max_now, idxs_users, gradient)\n', (3765, 3809), True, 'import models.func as fc\n'), ((6507, 6535), 'torch.FloatTensor', 'torch.FloatTensor', (['batched_x'], {}), '(batched_x)\n', (6524, 6535), False, 'import torch\n'), ((6548, 6576), 'torch.FloatTensor', 'torch.FloatTensor', (['batched_y'], {}), '(batched_y)\n', (6565, 6576), False, 'import torch\n'), ((1795, 1882), 'models.test.test_part', 'ts.test_part', (['net_glob', 'w_locals', 'idxs_users', 'key', 'dataset_test', 'test_sampler', 'args'], {}), '(net_glob, w_locals, idxs_users, key, dataset_test,\n test_sampler, args)\n', (1807, 1882), True, 'import models.test as ts\n'), ((1428, 1450), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1447, 1450), False, 'import operator\n'), ((3897, 3919), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3916, 3919), False, 'import operator\n')]
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
def nearest_neighbor_interp_np(X,
out_h,
out_w,
out_size=None,
actual_shape=None,
align_corners=True,
data_layout='NCHW'):
"""nearest neighbor interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
X = np.transpose(X, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
n, c, in_h, in_w = X.shape
ratio_h = ratio_w = 0.0
if (out_h > 1):
if (align_corners):
ratio_h = (in_h - 1.0) / (out_h - 1.0)
else:
ratio_h = 1.0 * in_h / out_h
if (out_w > 1):
if (align_corners):
ratio_w = (in_w - 1.0) / (out_w - 1.0)
else:
ratio_w = 1.0 * in_w / out_w
out = np.zeros((n, c, out_h, out_w))
if align_corners:
for i in range(out_h):
in_i = int(ratio_h * i + 0.5)
for j in range(out_w):
in_j = int(ratio_w * j + 0.5)
out[:, :, i, j] = X[:, :, in_i, in_j]
else:
for i in range(out_h):
in_i = int(ratio_h * i)
for j in range(out_w):
in_j = int(ratio_w * j)
out[:, :, i, j] = X[:, :, in_i, in_j]
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(X.dtype)
class TestNearestInterpOp(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.data_layout = 'NCHW'
self.init_test_case()
self.op_type = "nearest_interp"
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
if self.scale > 0:
out_h = int(in_h * self.scale)
out_w = int(in_w * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = nearest_neighbor_interp_np(
input_np, out_h, out_w, self.out_size, self.actual_shape,
self.align_corners, self.data_layout)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners,
'data_layout': self.data_layout
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
def test_check_grad(self):
self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 4, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 3]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpCase1(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase2(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase3(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase4(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [4, 1, 7, 8]
self.out_h = 1
self.out_w = 1
self.scale = 0.
self.out_size = np.array([2, 2]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpCase5(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = np.array([11, 11]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpCase6(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([65, 129]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpSame(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpActualShape(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpDataLayout(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 4, 4, 5]
self.out_h = 2
self.out_w = 2
self.scale = 0.
self.out_size = np.array([3, 8]).astype("int32")
self.align_corners = True
self.data_layout = "NHWC"
class TestNearestInterpOpUint8(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "nearest_interp"
self.check_eager = True
input_np = np.random.randint(
low=0, high=256, size=self.input_shape).astype("uint8")
if self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
else:
out_h = self.out_h
out_w = self.out_w
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w,
self.out_size, self.actual_shape,
self.align_corners)
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = {
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'interp_method': self.interp_method,
'align_corners': self.align_corners
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager)
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [1, 3, 9, 6]
self.out_h = 10
self.out_w = 9
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase1Uint8(TestNearestInterpOpUint8):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 3, 32, 64]
self.out_h = 80
self.out_w = 40
self.scale = 0.
self.align_corners = True
class TestNearestNeighborInterpCase2Uint8(TestNearestInterpOpUint8):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [4, 1, 7, 8]
self.out_h = 5
self.out_w = 13
self.scale = 0.
self.out_size = np.array([6, 15]).astype("int32")
self.align_corners = True
class TestNearestInterpWithoutCorners(TestNearestInterpOp):
def set_align_corners(self):
self.align_corners = False
class TestNearestNeighborInterpScale1(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 7, 5]
self.out_h = 64
self.out_w = 32
self.scale = 2.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpScale2(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 5, 7]
self.out_h = 64
self.out_w = 32
self.scale = 1.5
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
class TestNearestNeighborInterpScale3(TestNearestInterpOp):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 7, 5]
self.out_h = 64
self.out_w = 32
self.scale = 1.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
class TestNearestInterpOp_attr_tensor(OpTest):
def setUp(self):
self.out_size = None
self.actual_shape = None
self.init_test_case()
self.op_type = "nearest_interp"
self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False
self.attrs = {
'interp_method': self.interp_method,
'align_corners': self.align_corners,
}
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64")
self.inputs = {'X': input_np}
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float64")
elif self.scale > 0:
out_h = int(self.input_shape[2] * self.scale)
out_w = int(self.input_shape[3] * self.scale)
self.attrs['scale'] = self.scale
else:
out_h = self.out_h
out_w = self.out_w
if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None:
size_tensor = []
for index, ele in enumerate(self.out_size):
size_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
output_np = nearest_neighbor_interp_np(input_np, out_h, out_w,
self.out_size, self.actual_shape,
self.align_corners)
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
def test_check_grad(self):
self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager)
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [2, 5, 4, 4]
self.out_h = 3
self.out_w = 3
self.scale = 0.
self.out_size = [3, 3]
self.align_corners = True
# out_size is a tensor list
class TestNearestInterp_attr_tensor_Case1(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
self.scale = 0.
self.out_size = [8, 12]
self.align_corners = True
# out_size is a 1-D tensor
class TestNearestInterp_attr_tensor_Case2(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 0.
self.out_size = np.array([66, 40]).astype("int32")
self.align_corners = True
self.shape_by_1Dtensor = True
# scale is a 1-D tensor
class TestNearestInterp_attr_tensor_Case3(TestNearestInterpOp_attr_tensor):
def init_test_case(self):
self.interp_method = 'nearest'
self.input_shape = [3, 2, 32, 16]
self.out_h = 64
self.out_w = 32
self.scale = 2.0
self.out_size = None
self.align_corners = True
self.scale_by_1Dtensor = True
class TestNearestAPI(unittest.TestCase):
def test_case(self):
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
y = fluid.data(name="y", shape=[2, 6, 6, 3], dtype="float32")
dim = fluid.data(name="dim", shape=[1], dtype="int32")
shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
actual_size = fluid.data(name="actual_size", shape=[2], dtype="int32")
scale_tensor = fluid.data(
name="scale_tensor", shape=[1], dtype="float32")
out1 = fluid.layers.resize_nearest(
y, out_shape=[12, 12], data_format='NHWC')
out2 = fluid.layers.resize_nearest(x, out_shape=[12, dim])
out3 = fluid.layers.resize_nearest(x, out_shape=shape_tensor)
out4 = fluid.layers.resize_nearest(
x, out_shape=[4, 4], actual_shape=actual_size)
out5 = fluid.layers.resize_nearest(x, scale=scale_tensor)
x_data = np.random.random((2, 3, 6, 6)).astype("float32")
dim_data = np.array([12]).astype("int32")
shape_data = np.array([12, 12]).astype("int32")
actual_size_data = np.array([12, 12]).astype("int32")
scale_data = np.array([2.0]).astype("float32")
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(fluid.default_main_program(),
feed={
"x": x_data,
"y": np.transpose(x_data, (0, 2, 3, 1)),
"dim": dim_data,
"shape_tensor": shape_data,
"actual_size": actual_size_data,
"scale_tensor": scale_data
},
fetch_list=[out1, out2, out3, out4, out5],
return_numpy=True)
expect_res = nearest_neighbor_interp_np(
x_data, out_h=12, out_w=12, align_corners=True)
self.assertTrue(
np.allclose(results[0], np.transpose(expect_res, (0, 2, 3, 1))))
for i in range(len(results) - 1):
self.assertTrue(np.allclose(results[i + 1], expect_res))
class TestNearestInterpException(unittest.TestCase):
def test_exception(self):
input = fluid.data(name="input", shape=[1, 3, 6, 6], dtype="float32")
def attr_data_format():
# for 4-D input, data_format can only be NCHW or NHWC
out = fluid.layers.resize_nearest(
input, out_shape=[4, 8], data_format='NDHWC')
def attr_scale_type():
out = fluid.layers.resize_nearest(input, scale='scale')
def attr_scale_value():
out = fluid.layers.resize_nearest(input, scale=-0.3)
self.assertRaises(ValueError, attr_data_format)
self.assertRaises(TypeError, attr_scale_type)
self.assertRaises(ValueError, attr_scale_value)
if __name__ == "__main__":
import paddle
paddle.enable_static()
unittest.main()
|
[
"paddle.fluid.layers.resize_nearest",
"numpy.allclose",
"paddle.fluid.data",
"numpy.ones",
"numpy.random.random",
"paddle.fluid.default_startup_program",
"paddle.fluid.core.CUDAPlace",
"paddle.enable_static",
"paddle.fluid.default_main_program",
"numpy.zeros",
"paddle.fluid.Executor",
"numpy.array",
"numpy.random.randint",
"unittest.main",
"paddle.fluid.core.is_compiled_with_cuda",
"numpy.transpose",
"paddle.fluid.core.CPUPlace"
] |
[((1809, 1839), 'numpy.zeros', 'np.zeros', (['(n, c, out_h, out_w)'], {}), '((n, c, out_h, out_w))\n', (1817, 1839), True, 'import numpy as np\n'), ((17166, 17188), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (17186, 17188), False, 'import paddle\n'), ((17193, 17208), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17206, 17208), False, 'import unittest\n'), ((1202, 1231), 'numpy.transpose', 'np.transpose', (['X', '(0, 3, 1, 2)'], {}), '(X, (0, 3, 1, 2))\n', (1214, 1231), True, 'import numpy as np\n'), ((2322, 2353), 'numpy.transpose', 'np.transpose', (['out', '(0, 2, 3, 1)'], {}), '(out, (0, 2, 3, 1))\n', (2334, 2353), True, 'import numpy as np\n'), ((14126, 14183), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""x"""', 'shape': '[2, 3, 6, 6]', 'dtype': '"""float32"""'}), "(name='x', shape=[2, 3, 6, 6], dtype='float32')\n", (14136, 14183), True, 'import paddle.fluid as fluid\n'), ((14196, 14253), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""y"""', 'shape': '[2, 6, 6, 3]', 'dtype': '"""float32"""'}), "(name='y', shape=[2, 6, 6, 3], dtype='float32')\n", (14206, 14253), True, 'import paddle.fluid as fluid\n'), ((14269, 14317), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""dim"""', 'shape': '[1]', 'dtype': '"""int32"""'}), "(name='dim', shape=[1], dtype='int32')\n", (14279, 14317), True, 'import paddle.fluid as fluid\n'), ((14341, 14398), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""shape_tensor"""', 'shape': '[2]', 'dtype': '"""int32"""'}), "(name='shape_tensor', shape=[2], dtype='int32')\n", (14351, 14398), True, 'import paddle.fluid as fluid\n'), ((14421, 14477), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""actual_size"""', 'shape': '[2]', 'dtype': '"""int32"""'}), "(name='actual_size', shape=[2], dtype='int32')\n", (14431, 14477), True, 'import paddle.fluid as fluid\n'), ((14501, 14560), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""scale_tensor"""', 'shape': '[1]', 'dtype': '"""float32"""'}), "(name='scale_tensor', shape=[1], dtype='float32')\n", (14511, 14560), True, 'import paddle.fluid as fluid\n'), ((14590, 14660), 'paddle.fluid.layers.resize_nearest', 'fluid.layers.resize_nearest', (['y'], {'out_shape': '[12, 12]', 'data_format': '"""NHWC"""'}), "(y, out_shape=[12, 12], data_format='NHWC')\n", (14617, 14660), True, 'import paddle.fluid as fluid\n'), ((14689, 14740), 'paddle.fluid.layers.resize_nearest', 'fluid.layers.resize_nearest', (['x'], {'out_shape': '[12, dim]'}), '(x, out_shape=[12, dim])\n', (14716, 14740), True, 'import paddle.fluid as fluid\n'), ((14756, 14810), 'paddle.fluid.layers.resize_nearest', 'fluid.layers.resize_nearest', (['x'], {'out_shape': 'shape_tensor'}), '(x, out_shape=shape_tensor)\n', (14783, 14810), True, 'import paddle.fluid as fluid\n'), ((14826, 14900), 'paddle.fluid.layers.resize_nearest', 'fluid.layers.resize_nearest', (['x'], {'out_shape': '[4, 4]', 'actual_shape': 'actual_size'}), '(x, out_shape=[4, 4], actual_shape=actual_size)\n', (14853, 14900), True, 'import paddle.fluid as fluid\n'), ((14929, 14979), 'paddle.fluid.layers.resize_nearest', 'fluid.layers.resize_nearest', (['x'], {'scale': 'scale_tensor'}), '(x, scale=scale_tensor)\n', (14956, 14979), True, 'import paddle.fluid as fluid\n'), ((15282, 15310), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (15308, 15310), True, 'import paddle.fluid.core as core\n'), ((15414, 15435), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (15428, 15435), True, 'import paddle.fluid as fluid\n'), ((16480, 16541), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input"""', 'shape': '[1, 3, 6, 6]', 'dtype': '"""float32"""'}), "(name='input', shape=[1, 3, 6, 6], dtype='float32')\n", (16490, 16541), True, 'import paddle.fluid as fluid\n'), ((15332, 15349), 'paddle.fluid.core.CUDAPlace', 'core.CUDAPlace', (['(0)'], {}), '(0)\n', (15346, 15349), True, 'import paddle.fluid.core as core\n'), ((15384, 15399), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (15397, 15399), True, 'import paddle.fluid.core as core\n'), ((15452, 15483), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (15481, 15483), True, 'import paddle.fluid as fluid\n'), ((15511, 15539), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (15537, 15539), True, 'import paddle.fluid as fluid\n'), ((16659, 16732), 'paddle.fluid.layers.resize_nearest', 'fluid.layers.resize_nearest', (['input'], {'out_shape': '[4, 8]', 'data_format': '"""NDHWC"""'}), "(input, out_shape=[4, 8], data_format='NDHWC')\n", (16686, 16732), True, 'import paddle.fluid as fluid\n'), ((16800, 16849), 'paddle.fluid.layers.resize_nearest', 'fluid.layers.resize_nearest', (['input'], {'scale': '"""scale"""'}), "(input, scale='scale')\n", (16827, 16849), True, 'import paddle.fluid as fluid\n'), ((16901, 16947), 'paddle.fluid.layers.resize_nearest', 'fluid.layers.resize_nearest', (['input'], {'scale': '(-0.3)'}), '(input, scale=-0.3)\n', (16928, 16947), True, 'import paddle.fluid as fluid\n'), ((2677, 2711), 'numpy.random.random', 'np.random.random', (['self.input_shape'], {}), '(self.input_shape)\n', (2693, 2711), True, 'import numpy as np\n'), ((4335, 4351), 'numpy.array', 'np.array', (['[3, 3]'], {}), '([3, 3])\n', (4343, 4351), True, 'import numpy as np\n'), ((5494, 5510), 'numpy.array', 'np.array', (['[2, 2]'], {}), '([2, 2])\n', (5502, 5510), True, 'import numpy as np\n'), ((5827, 5845), 'numpy.array', 'np.array', (['[11, 11]'], {}), '([11, 11])\n', (5835, 5845), True, 'import numpy as np\n'), ((6164, 6183), 'numpy.array', 'np.array', (['[65, 129]'], {}), '([65, 129])\n', (6172, 6183), True, 'import numpy as np\n'), ((6785, 6803), 'numpy.array', 'np.array', (['[66, 40]'], {}), '([66, 40])\n', (6793, 6803), True, 'import numpy as np\n'), ((7123, 7139), 'numpy.array', 'np.array', (['[3, 8]'], {}), '([3, 8])\n', (7131, 7139), True, 'import numpy as np\n'), ((7470, 7527), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(256)', 'size': 'self.input_shape'}), '(low=0, high=256, size=self.input_shape)\n', (7487, 7527), True, 'import numpy as np\n'), ((8522, 8537), 'paddle.fluid.core.CPUPlace', 'core.CPUPlace', ([], {}), '()\n', (8535, 8537), True, 'import paddle.fluid.core as core\n'), ((9355, 9372), 'numpy.array', 'np.array', (['[6, 15]'], {}), '([6, 15])\n', (9363, 9372), True, 'import numpy as np\n'), ((9820, 9838), 'numpy.array', 'np.array', (['[66, 40]'], {}), '([66, 40])\n', (9828, 9838), True, 'import numpy as np\n'), ((10157, 10175), 'numpy.array', 'np.array', (['[66, 40]'], {}), '([66, 40])\n', (10165, 10175), True, 'import numpy as np\n'), ((10493, 10511), 'numpy.array', 'np.array', (['[66, 40]'], {}), '([66, 40])\n', (10501, 10511), True, 'import numpy as np\n'), ((11161, 11195), 'numpy.random.random', 'np.random.random', (['self.input_shape'], {}), '(self.input_shape)\n', (11177, 11195), True, 'import numpy as np\n'), ((13552, 13570), 'numpy.array', 'np.array', (['[66, 40]'], {}), '([66, 40])\n', (13560, 13570), True, 'import numpy as np\n'), ((14998, 15028), 'numpy.random.random', 'np.random.random', (['(2, 3, 6, 6)'], {}), '((2, 3, 6, 6))\n', (15014, 15028), True, 'import numpy as np\n'), ((15066, 15080), 'numpy.array', 'np.array', (['[12]'], {}), '([12])\n', (15074, 15080), True, 'import numpy as np\n'), ((15118, 15136), 'numpy.array', 'np.array', (['[12, 12]'], {}), '([12, 12])\n', (15126, 15136), True, 'import numpy as np\n'), ((15180, 15198), 'numpy.array', 'np.array', (['[12, 12]'], {}), '([12, 12])\n', (15188, 15198), True, 'import numpy as np\n'), ((15236, 15251), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (15244, 15251), True, 'import numpy as np\n'), ((16227, 16265), 'numpy.transpose', 'np.transpose', (['expect_res', '(0, 2, 3, 1)'], {}), '(expect_res, (0, 2, 3, 1))\n', (16239, 16265), True, 'import numpy as np\n'), ((16338, 16377), 'numpy.allclose', 'np.allclose', (['results[i + 1]', 'expect_res'], {}), '(results[i + 1], expect_res)\n', (16349, 16377), True, 'import numpy as np\n'), ((11323, 11345), 'numpy.array', 'np.array', (['[self.scale]'], {}), '([self.scale])\n', (11331, 11345), True, 'import numpy as np\n'), ((15652, 15686), 'numpy.transpose', 'np.transpose', (['x_data', '(0, 2, 3, 1)'], {}), '(x_data, (0, 2, 3, 1))\n', (15664, 15686), True, 'import numpy as np\n'), ((11933, 11943), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (11940, 11943), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
def test_camera_display_create():
from ctapipe.visualization.bokeh import CameraDisplay
CameraDisplay()
def test_camera_geom(example_event, example_subarray):
from ctapipe.visualization.bokeh import CameraDisplay
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
c_display = CameraDisplay(geom)
assert (c_display.cdsource.data["x"] == geom.pix_x.value).all()
assert (c_display.cdsource.data["y"] == geom.pix_y.value).all()
t = list(example_event.r0.tel.keys())[1]
geom = example_subarray.tel[t].camera.geometry
c_display.geom = geom
assert (c_display.cdsource.data["x"] == geom.pix_x.value).all()
assert (c_display.cdsource.data["y"] == geom.pix_y.value).all()
def test_camera_image(example_event, example_subarray):
from ctapipe.visualization.bokeh import CameraDisplay, intensity_to_hex
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
n_pixels = geom.pix_x.value.size
image = np.ones(n_pixels)
colors = intensity_to_hex(image)
with pytest.raises(ValueError):
CameraDisplay(None, image)
c_display = CameraDisplay(geom, image)
assert (c_display.cdsource.data["image"] == colors).all()
assert c_display.image_min == 0
assert c_display.image_max == 2
image[5] = 5
colors = intensity_to_hex(image)
c_display.image = image
assert (c_display.cdsource.data["image"] == colors).all()
assert c_display.image_min == image.min()
assert c_display.image_max == image.max()
def test_camera_enable_pixel_picker(example_event, example_subarray):
from ctapipe.visualization.bokeh import CameraDisplay
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
n_pixels = geom.pix_x.value.size
image = np.ones(n_pixels)
c_display = CameraDisplay(geom, image)
c_display.enable_pixel_picker(2)
assert len(c_display.active_pixels) == 2
c_display.enable_pixel_picker(3)
assert len(c_display.active_pixels) == 3
def test_fast_camera_display_create(example_event, example_subarray):
from ctapipe.visualization.bokeh import FastCameraDisplay
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
x = geom.pix_x.value
y = geom.pix_y.value
area = geom.pix_area.value
size = np.sqrt(area)
FastCameraDisplay(x, y, size)
def test_fast_camera_image(example_event, example_subarray):
from ctapipe.visualization.bokeh import FastCameraDisplay, intensity_to_hex
t = list(example_event.r0.tel.keys())[0]
geom = example_subarray.tel[t].camera.geometry
x = geom.pix_x.value
y = geom.pix_y.value
area = geom.pix_area.value
size = np.sqrt(area)
c_display = FastCameraDisplay(x, y, size)
image = np.ones(x.size)
colors = intensity_to_hex(image)
c_display.image = colors
assert (c_display.cdsource.data["image"] == colors).all()
def test_waveform_display_create():
from ctapipe.visualization.bokeh import WaveformDisplay
WaveformDisplay()
def test_waveform_values():
from ctapipe.visualization.bokeh import WaveformDisplay
wf = np.ones(30)
w_display = WaveformDisplay(wf)
assert (w_display.cdsource.data["samples"] == wf).all()
assert (w_display.cdsource.data["t"] == np.arange(wf.size)).all()
wf[5] = 5
w_display.waveform = wf
assert (w_display.cdsource.data["samples"] == wf).all()
assert (w_display.cdsource.data["t"] == np.arange(wf.size)).all()
def test_span():
from ctapipe.visualization.bokeh import WaveformDisplay
wf = np.ones(30)
w_display = WaveformDisplay(wf)
w_display.enable_time_picker()
w_display.active_time = 4
assert w_display.span.location == 4
w_display.active_time = -3
assert w_display.active_time == 0
assert w_display.span.location == 0
w_display.active_time = wf.size + 10
assert w_display.active_time == wf.size - 1
assert w_display.span.location == wf.size - 1
|
[
"numpy.sqrt",
"numpy.ones",
"ctapipe.visualization.bokeh.CameraDisplay",
"ctapipe.visualization.bokeh.FastCameraDisplay",
"pytest.raises",
"ctapipe.visualization.bokeh.intensity_to_hex",
"ctapipe.visualization.bokeh.WaveformDisplay",
"numpy.arange"
] |
[((132, 147), 'ctapipe.visualization.bokeh.CameraDisplay', 'CameraDisplay', ([], {}), '()\n', (145, 147), False, 'from ctapipe.visualization.bokeh import CameraDisplay\n'), ((376, 395), 'ctapipe.visualization.bokeh.CameraDisplay', 'CameraDisplay', (['geom'], {}), '(geom)\n', (389, 395), False, 'from ctapipe.visualization.bokeh import CameraDisplay\n'), ((1072, 1089), 'numpy.ones', 'np.ones', (['n_pixels'], {}), '(n_pixels)\n', (1079, 1089), True, 'import numpy as np\n'), ((1103, 1126), 'ctapipe.visualization.bokeh.intensity_to_hex', 'intensity_to_hex', (['image'], {}), '(image)\n', (1119, 1126), False, 'from ctapipe.visualization.bokeh import FastCameraDisplay, intensity_to_hex\n'), ((1216, 1242), 'ctapipe.visualization.bokeh.CameraDisplay', 'CameraDisplay', (['geom', 'image'], {}), '(geom, image)\n', (1229, 1242), False, 'from ctapipe.visualization.bokeh import CameraDisplay\n'), ((1408, 1431), 'ctapipe.visualization.bokeh.intensity_to_hex', 'intensity_to_hex', (['image'], {}), '(image)\n', (1424, 1431), False, 'from ctapipe.visualization.bokeh import FastCameraDisplay, intensity_to_hex\n'), ((1890, 1907), 'numpy.ones', 'np.ones', (['n_pixels'], {}), '(n_pixels)\n', (1897, 1907), True, 'import numpy as np\n'), ((1924, 1950), 'ctapipe.visualization.bokeh.CameraDisplay', 'CameraDisplay', (['geom', 'image'], {}), '(geom, image)\n', (1937, 1950), False, 'from ctapipe.visualization.bokeh import CameraDisplay\n'), ((2441, 2454), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (2448, 2454), True, 'import numpy as np\n'), ((2460, 2489), 'ctapipe.visualization.bokeh.FastCameraDisplay', 'FastCameraDisplay', (['x', 'y', 'size'], {}), '(x, y, size)\n', (2477, 2489), False, 'from ctapipe.visualization.bokeh import FastCameraDisplay, intensity_to_hex\n'), ((2823, 2836), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (2830, 2836), True, 'import numpy as np\n'), ((2854, 2883), 'ctapipe.visualization.bokeh.FastCameraDisplay', 'FastCameraDisplay', (['x', 'y', 'size'], {}), '(x, y, size)\n', (2871, 2883), False, 'from ctapipe.visualization.bokeh import FastCameraDisplay, intensity_to_hex\n'), ((2897, 2912), 'numpy.ones', 'np.ones', (['x.size'], {}), '(x.size)\n', (2904, 2912), True, 'import numpy as np\n'), ((2926, 2949), 'ctapipe.visualization.bokeh.intensity_to_hex', 'intensity_to_hex', (['image'], {}), '(image)\n', (2942, 2949), False, 'from ctapipe.visualization.bokeh import FastCameraDisplay, intensity_to_hex\n'), ((3145, 3162), 'ctapipe.visualization.bokeh.WaveformDisplay', 'WaveformDisplay', ([], {}), '()\n', (3160, 3162), False, 'from ctapipe.visualization.bokeh import WaveformDisplay\n'), ((3263, 3274), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3270, 3274), True, 'import numpy as np\n'), ((3291, 3310), 'ctapipe.visualization.bokeh.WaveformDisplay', 'WaveformDisplay', (['wf'], {}), '(wf)\n', (3306, 3310), False, 'from ctapipe.visualization.bokeh import WaveformDisplay\n'), ((3705, 3716), 'numpy.ones', 'np.ones', (['(30)'], {}), '(30)\n', (3712, 3716), True, 'import numpy as np\n'), ((3733, 3752), 'ctapipe.visualization.bokeh.WaveformDisplay', 'WaveformDisplay', (['wf'], {}), '(wf)\n', (3748, 3752), False, 'from ctapipe.visualization.bokeh import WaveformDisplay\n'), ((1137, 1162), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1150, 1162), False, 'import pytest\n'), ((1172, 1198), 'ctapipe.visualization.bokeh.CameraDisplay', 'CameraDisplay', (['None', 'image'], {}), '(None, image)\n', (1185, 1198), False, 'from ctapipe.visualization.bokeh import CameraDisplay\n'), ((3416, 3434), 'numpy.arange', 'np.arange', (['wf.size'], {}), '(wf.size)\n', (3425, 3434), True, 'import numpy as np\n'), ((3590, 3608), 'numpy.arange', 'np.arange', (['wf.size'], {}), '(wf.size)\n', (3599, 3608), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 30 16:23:35 2020
@author: elizabeth_mckenzie
"""
import numpy as np
from scipy.ndimage.interpolation import rotate as ndrotate
import tensorflow as tf
from skimage.transform import resize
import matplotlib.pyplot as plt
order = 0
# input_shape = (128, 128, 128, 1)
input_shape = (32, 256, 256, 1)
def first_rot(ubermask, angle_xy):
ubermask = ndrotate(ubermask.numpy(), angle_xy.numpy(), axes=(0, 1), reshape=False, order=order)
return ubermask
def second_rot(ubermask, angle_xz):
ndrotate(ubermask.numpy(), angle_xz.numpy(), axes=(0, 2), reshape=False, order=order)
return ubermask
def third_rot(ubermask, angle_yz):
ndrotate(ubermask.numpy(), angle_yz.numpy(), axes=(1, 2), reshape=False, order=order)
return ubermask
def crop_the_mask(crop_mask, crop_amount, top):
# generate mask for cropping and crop to zeros
crop_mask = crop_mask.numpy()
if top:
crop_top = crop_amount
crop_mask[0:crop_top, :, :] = 0.0
else:
crop_bottom = crop_amount
crop_mask[-crop_bottom:, :, :] = 0.0
return crop_mask
def rotate_and_crop(x, crop_amount, top=True):
x = tf.cast(x, dtype=tf.float64)
# generate random angles for rotation. Most is in chin up/down direction
# angle_xy = tf.random.uniform([], minval=-45, maxval=0, dtype=tf.dtypes.int32)
# angle_xz = tf.random.uniform([], minval=-5, maxval=5, dtype=tf.dtypes.int32)
# angle_yz = tf.random.uniform([], minval=-5, maxval=5, dtype=tf.dtypes.int32)
angle_xy = tf.constant(0, dtype=tf.dtypes.int32)
angle_xz = tf.constant(0, dtype=tf.dtypes.int32)
angle_yz = tf.constant(0, dtype=tf.dtypes.int32)
ubermask = tf.ones_like(x, dtype=tf.float64) # mask for the mask
# rotate mask's mask
ubermask = tf.py_function(func=first_rot, inp=[ubermask, angle_xy], Tout=tf.float64)
ubermask = tf.py_function(func=second_rot, inp=[ubermask, angle_xz], Tout=tf.float64)
ubermask = tf.py_function(func=third_rot, inp=[ubermask, angle_yz], Tout=tf.float64)
# generate mask for cropping and crop to zeros
crop_mask = tf.ones_like(ubermask, dtype=tf.float64)
crop_mask = tf.py_function(func=crop_the_mask, inp=[crop_mask, crop_amount, top], Tout=tf.float64)
# crop image
cropped_mask = ubermask * crop_mask
# rotate back
cropped_mask = tf.py_function(func=third_rot, inp=[cropped_mask, -angle_yz], Tout=tf.float64)
cropped_mask = tf.py_function(func=second_rot, inp=[cropped_mask, -angle_xz], Tout=tf.float64)
cropped_mask = tf.py_function(func=first_rot, inp=[cropped_mask, -angle_xy], Tout=tf.float64)
output_img = x * cropped_mask
output_img = tf.cast(output_img, dtype=tf.float16)
cropped_mask = tf.cast(cropped_mask, dtype=tf.float16)
return (output_img, cropped_mask)
def circlemask_cropped(x):
D, H, W, _ = x.shape
x, y = np.ogrid[:H, :W]
cx, cy = H / 2, W / 2
radius = int(np.random.uniform(0.7, 0.7) * H / 2)
r2 = (x - cx) * (x - cx) + (y - cy) * (y - cy)
circmask = r2 <= radius * radius
mask = np.expand_dims(circmask, axis=[0,-1]).repeat([D, ], axis=0)
return mask
# def crop_data(x):
# x.set_shape(input_shape) # hardcoded in here to get .map(tffunction) to work
#
# # generate cropping amounts
# crop_bottom = 50 # tf.random.uniform([], minval=40, maxval=70, dtype=tf.dtypes.int32)
# top_max = (x.shape[1] - crop_bottom) - 35 # (128 -70) = 58, - 35 = 23 as max
# # crop_top = tf.random.uniform([], minval=10, maxval=top_max,
# # dtype=tf.dtypes.int32) # want to guarentee there is something left
# crop_top = tf.constant(30, dtype=tf.dtypes.int32)
#
# (cropped_top_image, mask_Top) = rotate_and_crop(x, crop_top, top=True)
# (cropped_bottom_image, mask_Bottom) = rotate_and_crop(cropped_top_image, crop_bottom, top=False)
#
# final_cropped_image = cropped_bottom_image
# final_cropped_mask = mask_Top * mask_Bottom
# return (final_cropped_image, x, final_cropped_mask)
def crop_data(x):
x = tf.cast(x, dtype=tf.float16)
x.set_shape(input_shape) # hardcoded in here to get .map(tffunction) to work
cropped_mask = tf.py_function(func=circlemask_cropped, inp=[x], Tout=tf.float16)
cropped_image = x * cropped_mask
cropped_image = tf.cast(cropped_image, dtype=tf.float16)
cropped_mask = tf.cast(cropped_mask, dtype=tf.float16)
return (cropped_image, x, cropped_mask)
npy_name = 'dataset/train/Y170109.npz'
img_files = np.load(npy_name)
img = img_files['vol_data']
img = resize(img, (32,256,256))
# img = np.expand_dims(img, axis=-1)
# x = tf.convert_to_tensor(img)
# y = crop_data(x)
# plt.imshow(final_cropped_mask.numpy()[64,:,:].astype(np.float64), cmap='gray'); plt.show()
|
[
"tensorflow.py_function",
"numpy.random.uniform",
"tensorflow.constant",
"numpy.expand_dims",
"tensorflow.ones_like",
"tensorflow.cast",
"skimage.transform.resize",
"numpy.load"
] |
[((4569, 4586), 'numpy.load', 'np.load', (['npy_name'], {}), '(npy_name)\n', (4576, 4586), True, 'import numpy as np\n'), ((4621, 4648), 'skimage.transform.resize', 'resize', (['img', '(32, 256, 256)'], {}), '(img, (32, 256, 256))\n', (4627, 4648), False, 'from skimage.transform import resize\n'), ((1210, 1238), 'tensorflow.cast', 'tf.cast', (['x'], {'dtype': 'tf.float64'}), '(x, dtype=tf.float64)\n', (1217, 1238), True, 'import tensorflow as tf\n'), ((1582, 1619), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.dtypes.int32'}), '(0, dtype=tf.dtypes.int32)\n', (1593, 1619), True, 'import tensorflow as tf\n'), ((1635, 1672), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.dtypes.int32'}), '(0, dtype=tf.dtypes.int32)\n', (1646, 1672), True, 'import tensorflow as tf\n'), ((1688, 1725), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.dtypes.int32'}), '(0, dtype=tf.dtypes.int32)\n', (1699, 1725), True, 'import tensorflow as tf\n'), ((1742, 1775), 'tensorflow.ones_like', 'tf.ones_like', (['x'], {'dtype': 'tf.float64'}), '(x, dtype=tf.float64)\n', (1754, 1775), True, 'import tensorflow as tf\n'), ((1838, 1911), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'first_rot', 'inp': '[ubermask, angle_xy]', 'Tout': 'tf.float64'}), '(func=first_rot, inp=[ubermask, angle_xy], Tout=tf.float64)\n', (1852, 1911), True, 'import tensorflow as tf\n'), ((1927, 2001), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'second_rot', 'inp': '[ubermask, angle_xz]', 'Tout': 'tf.float64'}), '(func=second_rot, inp=[ubermask, angle_xz], Tout=tf.float64)\n', (1941, 2001), True, 'import tensorflow as tf\n'), ((2017, 2090), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'third_rot', 'inp': '[ubermask, angle_yz]', 'Tout': 'tf.float64'}), '(func=third_rot, inp=[ubermask, angle_yz], Tout=tf.float64)\n', (2031, 2090), True, 'import tensorflow as tf\n'), ((2159, 2199), 'tensorflow.ones_like', 'tf.ones_like', (['ubermask'], {'dtype': 'tf.float64'}), '(ubermask, dtype=tf.float64)\n', (2171, 2199), True, 'import tensorflow as tf\n'), ((2217, 2308), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'crop_the_mask', 'inp': '[crop_mask, crop_amount, top]', 'Tout': 'tf.float64'}), '(func=crop_the_mask, inp=[crop_mask, crop_amount, top], Tout=\n tf.float64)\n', (2231, 2308), True, 'import tensorflow as tf\n'), ((2400, 2478), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'third_rot', 'inp': '[cropped_mask, -angle_yz]', 'Tout': 'tf.float64'}), '(func=third_rot, inp=[cropped_mask, -angle_yz], Tout=tf.float64)\n', (2414, 2478), True, 'import tensorflow as tf\n'), ((2498, 2577), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'second_rot', 'inp': '[cropped_mask, -angle_xz]', 'Tout': 'tf.float64'}), '(func=second_rot, inp=[cropped_mask, -angle_xz], Tout=tf.float64)\n', (2512, 2577), True, 'import tensorflow as tf\n'), ((2597, 2675), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'first_rot', 'inp': '[cropped_mask, -angle_xy]', 'Tout': 'tf.float64'}), '(func=first_rot, inp=[cropped_mask, -angle_xy], Tout=tf.float64)\n', (2611, 2675), True, 'import tensorflow as tf\n'), ((2729, 2766), 'tensorflow.cast', 'tf.cast', (['output_img'], {'dtype': 'tf.float16'}), '(output_img, dtype=tf.float16)\n', (2736, 2766), True, 'import tensorflow as tf\n'), ((2786, 2825), 'tensorflow.cast', 'tf.cast', (['cropped_mask'], {'dtype': 'tf.float16'}), '(cropped_mask, dtype=tf.float16)\n', (2793, 2825), True, 'import tensorflow as tf\n'), ((4119, 4147), 'tensorflow.cast', 'tf.cast', (['x'], {'dtype': 'tf.float16'}), '(x, dtype=tf.float16)\n', (4126, 4147), True, 'import tensorflow as tf\n'), ((4249, 4314), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'circlemask_cropped', 'inp': '[x]', 'Tout': 'tf.float16'}), '(func=circlemask_cropped, inp=[x], Tout=tf.float16)\n', (4263, 4314), True, 'import tensorflow as tf\n'), ((4372, 4412), 'tensorflow.cast', 'tf.cast', (['cropped_image'], {'dtype': 'tf.float16'}), '(cropped_image, dtype=tf.float16)\n', (4379, 4412), True, 'import tensorflow as tf\n'), ((4432, 4471), 'tensorflow.cast', 'tf.cast', (['cropped_mask'], {'dtype': 'tf.float16'}), '(cropped_mask, dtype=tf.float16)\n', (4439, 4471), True, 'import tensorflow as tf\n'), ((3126, 3164), 'numpy.expand_dims', 'np.expand_dims', (['circmask'], {'axis': '[0, -1]'}), '(circmask, axis=[0, -1])\n', (3140, 3164), True, 'import numpy as np\n'), ((2990, 3017), 'numpy.random.uniform', 'np.random.uniform', (['(0.7)', '(0.7)'], {}), '(0.7, 0.7)\n', (3007, 3017), True, 'import numpy as np\n')]
|
import collections
import itertools
import logging
import operator
from typing import Any, Callable, DefaultDict, Dict, List, Optional, Sequence, Tuple, Union
import networkx as nx
import numpy as np
from cytoolz import itertoolz
from spacy.tokens import Span, Token
from . import utils as ke_utils
LOGGER = logging.getLogger(__name__)
def build_graph_from_terms(
terms: Union[Sequence[str], Sequence[Token], Sequence[Span]],
*,
normalize: Optional[Union[str, Callable[[Token], str]]] = "lemma",
window_size: int = 10,
edge_weighting: str = "count",
) -> nx.Graph:
"""
Transform an ordered list of non-overlapping terms into a graph,
where each term is represented by a node with weighted edges linking it to
other terms that co-occur within ``window_size`` terms of itself.
Args:
terms
normalize: If "lemma", lemmatize terms; if "lower", lowercase terms;
if falsy, use the form of terms as they appear in ``terms``;
if a callable, must accept a ``Token`` and return a str,
e.g. :func:`textacy.spacier.utils.get_normalized_text()`.
.. note:: This is applied to the elements of ``terms`` *only* if
it's a list of ``Token`` or ``Span``.
window_size: Size of sliding window over ``terms`` that determines
which are said to co-occur. If 2, only immediately adjacent terms
have edges in the returned network.
edge_weighting ({"count", "binary"}): If "count", the nodes for
all co-occurring terms are connected by edges with weight equal to
the number of times they co-occurred within a sliding window;
if "binary", all such edges have weight = 1.
Returns:
Networkx Graph whose nodes correspond to individual terms;
those that co-occur are connected by edges with weights determined
by ``edge_weighting``.
"""
if window_size < 2:
raise ValueError(
"window_size = {} is invalid; value must be >= 2".format(window_size)
)
if not terms:
LOGGER.warning("input `terms` is empty, so output graph is also empty")
return nx.Graph()
# if len(terms) < window_size, cytoolz throws a StopIteration error; prevent it
if len(terms) < window_size:
LOGGER.info(
"`terms` has fewer items (%s) than `window_size` (%s); "
"setting window width to %s",
len(terms),
window_size,
len(terms),
)
window_size = len(terms)
first_term, terms = itertoolz.peek(terms)
if isinstance(first_term, str):
windows = itertoolz.sliding_window(window_size, terms)
elif isinstance(first_term, (Span, Token)):
windows = itertoolz.sliding_window(
window_size, ke_utils.normalize_terms(terms, normalize))
else:
raise TypeError(
"items in `terms` must be strings or spacy tokens, not {}".format(
type(first_term)
)
)
graph = nx.Graph()
if edge_weighting == "count":
cooc_mat = collections.Counter(
w1_w2
for window in windows
for w1_w2 in itertools.combinations(sorted(window), 2)
)
graph.add_edges_from(
(w1, w2, {"weight": weight})
for (w1, w2), weight in cooc_mat.items()
)
elif edge_weighting == "binary":
graph.add_edges_from(
w1_w2 for window in windows for w1_w2 in itertools.combinations(window, 2)
)
else:
raise ValueError(
"edge_weighting = {} is invalid; must be one of {}".format(
edge_weighting, {"count", "binary"})
)
return graph
def rank_nodes_by_pagerank(
graph: nx.Graph,
weight: str = "weight",
**kwargs,
) -> Dict[Any, float]:
"""
Rank nodes in graph using the Pagegrank algorithm.
Args:
graph
weight
**kwargs
Returns:
Dict[object, float]
"""
return nx.pagerank_scipy(graph, weight=weight, **kwargs)
def rank_nodes_by_bestcoverage(
graph: nx.Graph,
k: int,
c: int = 1,
alpha: float = 1.0,
weight: str = "weight",
) -> Dict[Any, float]:
"""
Rank nodes in a network using the BestCoverage algorithm that attempts to
balance between node centrality and diversity.
Args:
graph
k: Number of results to return for top-k search.
c: *l* parameter for *l*-step expansion; best if 1 or 2
alpha: Float in [0.0, 1.0] specifying how much of central vertex's score
to remove from its *l*-step neighbors; smaller value puts more emphasis
on centrality, larger value puts more emphasis on diversity
weight: Key in edge data that holds weights.
Returns:
Top ``k`` nodes as ranked by bestcoverage algorithm; keys as node
identifiers, values as corresponding ranking scores
References:
<NAME>., <NAME>., <NAME>., & <NAME>. (2013, May).
Diversified recommendation on graphs: pitfalls, measures, and algorithms.
In Proceedings of the 22nd international conference on World Wide Web
(pp. 715-726). International World Wide Web Conferences Steering Committee.
http://www2013.wwwconference.org/proceedings/p715.pdf
"""
alpha = float(alpha)
nodes_list = [node for node in graph]
if len(nodes_list) == 0:
LOGGER.warning("`graph` is empty")
return {}
# ranks: array of PageRank values, summing up to 1
ranks = nx.pagerank_scipy(
graph, alpha=0.85, max_iter=100, tol=1e-08, weight=weight
)
sorted_ranks = sorted(ranks.items(), key=operator.itemgetter(1), reverse=True)
avg_degree = sum(dict(graph.degree()).values()) / len(nodes_list)
# relaxation parameter, k' in the paper
k_prime = int(k * avg_degree * c)
top_k_sorted_ranks = sorted_ranks[:k_prime]
def get_l_step_expanded_set(vertices, l):
"""
Args:
vertices (iterable[str]): vertices to be expanded
l (int): how many steps to expand vertices set
Returns:
set: the l-step expanded set of vertices
"""
# add vertices to s
s = set(vertices)
# s.update(vertices)
# for each step
for _ in range(l):
# for each node
next_vertices = []
for vertex in vertices:
# add its neighbors to the next list
neighbors = graph.neighbors(vertex)
next_vertices.extend(neighbors)
s.update(neighbors)
vertices = set(next_vertices)
return s
top_k_exp_vertices = get_l_step_expanded_set(
[item[0] for item in top_k_sorted_ranks], c
)
# compute initial exprel contribution
taken: DefaultDict = collections.defaultdict(bool)
contrib = {}
for vertex in nodes_list:
# get l-step expanded set
s = get_l_step_expanded_set([vertex], c)
# sum up neighbors ranks, i.e. l-step expanded relevance
contrib[vertex] = sum(ranks[v] for v in s)
sum_contrib = 0.0
results = {}
# greedily select to maximize exprel metric
for _ in range(k):
if not contrib:
break
# find word with highest l-step expanded relevance score
max_word_score = sorted(
contrib.items(), key=operator.itemgetter(1), reverse=True
)[0]
sum_contrib += max_word_score[1] # contrib[max_word[0]]
results[max_word_score[0]] = max_word_score[1]
# find its l-step expanded set
l_step_expanded_set = get_l_step_expanded_set([max_word_score[0]], c)
# for each vertex found
for vertex in l_step_expanded_set:
# already removed its contribution from neighbors
if taken[vertex] is True:
continue
# remove the contribution of vertex (or some fraction) from its l-step neighbors
s1 = get_l_step_expanded_set([vertex], c)
for w in s1:
try:
contrib[w] -= alpha * ranks[vertex]
except KeyError:
LOGGER.error(
"Word %s not in contrib dict! We're approximating...", w
)
taken[vertex] = True
contrib[max_word_score[0]] = 0
return results
def rank_nodes_by_divrank(
graph: nx.Graph,
r: Optional[np.ndarray] = None,
lambda_: float = 0.5,
alpha: float = 0.5,
) -> Dict[str, float]:
"""
Rank nodes in a network using the DivRank algorithm that attempts to
balance between node centrality and diversity.
Args:
graph
r: The "personalization vector"; by default, ``r = ones(1, n)/n``
lambda_: Float in [0.0, 1.0]
alpha: Float in [0.0, 1.0] that controls the strength of self-links.
Returns:
Mapping of node to score ordered by descending divrank score
References:
<NAME>., <NAME>., & <NAME>. (2010, July). Divrank: the interplay of
prestige and diversity in information networks. In Proceedings of the
16th ACM SIGKDD international conference on Knowledge discovery and data
mining (pp. 1009-1018). ACM.
http://clair.si.umich.edu/~radev/papers/SIGKDD2010.pdf
"""
# check function arguments
if len(graph) == 0:
LOGGER.warning("`graph` is empty")
return {}
# specify the order of nodes to use in creating the matrix
# and then later recovering the values from the order index
nodes_list = [node for node in graph]
# create adjacency matrix, i.e.
# n x n matrix where entry W_ij is the weight of the edge from V_i to V_j
W = nx.to_numpy_matrix(graph, nodelist=nodes_list, weight="weight").A
n = W.shape[1]
# create flat prior personalization vector if none given
if r is None:
r = np.array([n * [1 / float(n)]])
# Specify some constants
max_iter = 1000
diff = 1e10
tol = 1e-3
pr = np.array([n * [1 / float(n)]])
# Get p0(v -> u), i.e. transition probability prior to reinforcement
tmp = np.reshape(np.sum(W, axis=1), (n, 1))
idx_nan = np.flatnonzero(tmp == 0)
W0 = W / np.tile(tmp, (1, n))
W0[idx_nan, :] = 0
del W
# DivRank algorithm
i = 0
while i < max_iter and diff > tol:
W1 = alpha * W0 * np.tile(pr, (n, 1))
W1 = W1 - np.diag(W1[:, 0]) + (1 - alpha) * np.diag(pr[0, :])
tmp1 = np.reshape(np.sum(W1, axis=1), (n, 1))
P = W1 / np.tile(tmp1, (1, n))
P = ((1 - lambda_) * P) + (lambda_ * np.tile(r, (n, 1)))
pr_new = np.dot(pr, P)
i += 1
diff = np.sum(np.abs(pr_new - pr)) / np.sum(pr)
pr = pr_new
# sort nodes by divrank score
results = sorted(
((i, score) for i, score in enumerate(pr.flatten().tolist())),
key=operator.itemgetter(1),
reverse=True,
)
# replace node number by node value
divranks = {nodes_list[result[0]]: result[1] for result in results}
return divranks
|
[
"logging.getLogger",
"numpy.tile",
"numpy.abs",
"networkx.pagerank_scipy",
"numpy.flatnonzero",
"networkx.Graph",
"cytoolz.itertoolz.sliding_window",
"numpy.diag",
"itertools.combinations",
"numpy.sum",
"numpy.dot",
"cytoolz.itertoolz.peek",
"collections.defaultdict",
"operator.itemgetter",
"networkx.to_numpy_matrix"
] |
[((311, 338), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (328, 338), False, 'import logging\n'), ((2597, 2618), 'cytoolz.itertoolz.peek', 'itertoolz.peek', (['terms'], {}), '(terms)\n', (2611, 2618), False, 'from cytoolz import itertoolz\n'), ((3063, 3073), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3071, 3073), True, 'import networkx as nx\n'), ((4061, 4110), 'networkx.pagerank_scipy', 'nx.pagerank_scipy', (['graph'], {'weight': 'weight'}), '(graph, weight=weight, **kwargs)\n', (4078, 4110), True, 'import networkx as nx\n'), ((5605, 5681), 'networkx.pagerank_scipy', 'nx.pagerank_scipy', (['graph'], {'alpha': '(0.85)', 'max_iter': '(100)', 'tol': '(1e-08)', 'weight': 'weight'}), '(graph, alpha=0.85, max_iter=100, tol=1e-08, weight=weight)\n', (5622, 5681), True, 'import networkx as nx\n'), ((6911, 6940), 'collections.defaultdict', 'collections.defaultdict', (['bool'], {}), '(bool)\n', (6934, 6940), False, 'import collections\n'), ((10286, 10310), 'numpy.flatnonzero', 'np.flatnonzero', (['(tmp == 0)'], {}), '(tmp == 0)\n', (10300, 10310), True, 'import numpy as np\n'), ((2195, 2205), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2203, 2205), True, 'import networkx as nx\n'), ((2673, 2717), 'cytoolz.itertoolz.sliding_window', 'itertoolz.sliding_window', (['window_size', 'terms'], {}), '(window_size, terms)\n', (2697, 2717), False, 'from cytoolz import itertoolz\n'), ((9824, 9887), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['graph'], {'nodelist': 'nodes_list', 'weight': '"""weight"""'}), "(graph, nodelist=nodes_list, weight='weight')\n", (9842, 9887), True, 'import networkx as nx\n'), ((10245, 10262), 'numpy.sum', 'np.sum', (['W'], {'axis': '(1)'}), '(W, axis=1)\n', (10251, 10262), True, 'import numpy as np\n'), ((10324, 10344), 'numpy.tile', 'np.tile', (['tmp', '(1, n)'], {}), '(tmp, (1, n))\n', (10331, 10344), True, 'import numpy as np\n'), ((10743, 10756), 'numpy.dot', 'np.dot', (['pr', 'P'], {}), '(pr, P)\n', (10749, 10756), True, 'import numpy as np\n'), ((5741, 5763), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (5760, 5763), False, 'import operator\n'), ((10478, 10497), 'numpy.tile', 'np.tile', (['pr', '(n, 1)'], {}), '(pr, (n, 1))\n', (10485, 10497), True, 'import numpy as np\n'), ((10594, 10612), 'numpy.sum', 'np.sum', (['W1'], {'axis': '(1)'}), '(W1, axis=1)\n', (10600, 10612), True, 'import numpy as np\n'), ((10639, 10660), 'numpy.tile', 'np.tile', (['tmp1', '(1, n)'], {}), '(tmp1, (1, n))\n', (10646, 10660), True, 'import numpy as np\n'), ((10817, 10827), 'numpy.sum', 'np.sum', (['pr'], {}), '(pr)\n', (10823, 10827), True, 'import numpy as np\n'), ((10988, 11010), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (11007, 11010), False, 'import operator\n'), ((10516, 10533), 'numpy.diag', 'np.diag', (['W1[:, 0]'], {}), '(W1[:, 0])\n', (10523, 10533), True, 'import numpy as np\n'), ((10550, 10567), 'numpy.diag', 'np.diag', (['pr[0, :]'], {}), '(pr[0, :])\n', (10557, 10567), True, 'import numpy as np\n'), ((10706, 10724), 'numpy.tile', 'np.tile', (['r', '(n, 1)'], {}), '(r, (n, 1))\n', (10713, 10724), True, 'import numpy as np\n'), ((10794, 10813), 'numpy.abs', 'np.abs', (['(pr_new - pr)'], {}), '(pr_new - pr)\n', (10800, 10813), True, 'import numpy as np\n'), ((7471, 7493), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (7490, 7493), False, 'import operator\n'), ((3531, 3564), 'itertools.combinations', 'itertools.combinations', (['window', '(2)'], {}), '(window, 2)\n', (3553, 3564), False, 'import itertools\n')]
|
"""
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import itertools
import numbers
import warnings
import numpy as np
from sklearn.utils import check_array, resample
from .bootstrap import BootstrapResult
from .direct_lingam import DirectLiNGAM
from .hsic import hsic_test_gamma
from .utils import predict_adaptive_lasso
class MultiGroupDirectLiNGAM(DirectLiNGAM):
"""Implementation of DirectLiNGAM Algorithm with multiple groups [1]_
References
----------
.. [1] <NAME>. Joint estimation of linear non-Gaussian acyclic models. Neurocomputing, 81: 104-107, 2012.
"""
def __init__(self, random_state=None, prior_knowledge=None, apply_prior_knowledge_softly=False):
"""Construct a model.
Parameters
----------
random_state : int, optional (default=None)
``random_state`` is the seed used by the random number generator.
prior_knowledge : array-like, shape (n_features, n_features), optional (default=None)
Prior background_knowledge used for causal discovery, where ``n_features`` is the number of features.
The elements of prior background_knowledge matrix are defined as follows [1]_:
* ``0`` : :math:`x_i` does not have a directed path to :math:`x_j`
* ``1`` : :math:`x_i` has a directed path to :math:`x_j`
* ``-1`` : No prior background_knowledge is available to know if either of the two cases above (0 or 1) is true.
apply_prior_knowledge_softly : boolean, optional (default=False)
If True, apply prior background_knowledge softly.
"""
super().__init__(random_state, prior_knowledge, apply_prior_knowledge_softly)
def fit(self, X_list):
"""Fit the model to multiple datasets.
Parameters
----------
X_list : list, shape [X, ...]
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
# Check parameters
X_list = self._check_X_list(X_list)
if self._Aknw is not None:
if (self._n_features, self._n_features) != self._Aknw.shape:
raise ValueError(
'The shape of prior background_knowledge must be (n_features, n_features)')
# Causal discovery
U = np.arange(self._n_features)
K = []
X_list_ = [np.copy(X) for X in X_list]
for _ in range(self._n_features):
m = self._search_causal_order(X_list_, U)
for i in U:
if i != m:
for d in range(len(X_list_)):
X_list_[d][:, i] = self._residual(
X_list_[d][:, i], X_list_[d][:, m])
K.append(m)
U = U[U != m]
if (self._Aknw is not None) and (not self._apply_prior_knowledge_softly):
self._partial_orders = self._partial_orders[self._partial_orders[:, 0] != m]
self._causal_order = K
self._adjacency_matrices = []
for X in X_list:
self._estimate_adjacency_matrix(X, prior_knowledge=self._Aknw)
self._adjacency_matrices.append(self._adjacency_matrix)
return self
def bootstrap(self, X_list, n_sampling):
"""Evaluate the statistical reliability of DAG based on the bootstrapping.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
n_sampling : int
Number of bootstrapping samples.
Returns
-------
results : array-like, shape (BootstrapResult, ...)
Returns the results of bootstrapping for multiple datasets.
"""
# Check parameters
X_list = self._check_X_list(X_list)
if isinstance(n_sampling, (numbers.Integral, np.integer)):
if not 0 < n_sampling:
raise ValueError(
'n_sampling must be an integer greater than 0.')
else:
raise ValueError('n_sampling must be an integer greater than 0.')
# Bootstrapping
adjacency_matrices_list = np.zeros(
[len(X_list), n_sampling, self._n_features, self._n_features])
total_effects_list = np.zeros(
[len(X_list), n_sampling, self._n_features, self._n_features])
for n in range(n_sampling):
resampled_X_list = [resample(X) for X in X_list]
self.fit(resampled_X_list)
for i, am in enumerate(self._adjacency_matrices):
adjacency_matrices_list[i][n] = am
# Calculate total effects
for c, from_ in enumerate(self._causal_order):
for to in self._causal_order[c + 1:]:
effects = self.estimate_total_effect(
resampled_X_list, from_, to)
for i, effect in enumerate(effects):
total_effects_list[i, n, to, from_] = effect
result_list = []
for am, te in zip(adjacency_matrices_list, total_effects_list):
result_list.append(BootstrapResult(am, te))
return result_list
def estimate_total_effect(self, X_list, from_index, to_index):
"""Estimate total effect using causal model.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
from_index :
Index of source variable to estimate total effect.
to_index :
Index of destination variable to estimate total effect.
Returns
-------
total_effect : float
Estimated total effect.
"""
# Check parameters
X_list = self._check_X_list(X_list)
# Check from/to causal order
from_order = self._causal_order.index(from_index)
to_order = self._causal_order.index(to_index)
if from_order > to_order:
warnings.warn(f'The estimated causal effect may be incorrect because '
f'the causal order of the destination variable (to_index={to_index}) '
f'is earlier than the source variable (from_index={from_index}).')
effects = []
for X, am in zip(X_list, self._adjacency_matrices):
# from_index + parents indices
parents = np.where(np.abs(am[from_index]) > 0)[0]
predictors = [from_index]
predictors.extend(parents)
# Estimate total effect
coefs = predict_adaptive_lasso(X, predictors, to_index)
effects.append(coefs[0])
return effects
def get_error_independence_p_values(self, X_list):
"""Calculate the p-value matrix of independence between error variables.
Parameters
----------
X_list : array-like, shape (X, ...)
Multiple datasets for training, where ``X`` is an dataset.
The shape of ''X'' is (n_samples, n_features),
where ``n_samples`` is the number of samples and ``n_features`` is the number of features.
Returns
-------
independence_p_values : array-like, shape (n_datasets, n_features, n_features)
p-value matrix of independence between error variables.
"""
# Check parameters
X_list = self._check_X_list(X_list)
p_values = np.zeros([len(X_list), self._n_features, self._n_features])
for d, (X, am) in enumerate(zip(X_list, self._adjacency_matrices)):
n_samples = X.shape[0]
E = X - np.dot(am, X.T).T
for i, j in itertools.combinations(range(self._n_features), 2):
_, p_value = hsic_test_gamma(np.reshape(E[:, i], [n_samples, 1]),
np.reshape(E[:, j], [n_samples, 1]))
p_values[d, i, j] = p_value
p_values[d, j, i] = p_value
return p_values
def _check_X_list(self, X_list):
"""Check input X list."""
if not isinstance(X_list, list):
raise ValueError('X_list must be a list.')
if len(X_list) < 2:
raise ValueError(
'X_list must be a list containing at least two items')
self._n_features = check_array(X_list[0]).shape[1]
X_list_ = []
for X in X_list:
X_ = check_array(X)
if X_.shape[1] != self._n_features:
raise ValueError(
'X_list must be a list with the same number of features')
X_list_.append(X_)
return np.array(X_list_)
def _search_causal_order(self, X_list, U):
"""Search the causal ordering."""
Uc, Vj = self._search_candidate(U)
if len(Uc) == 1:
return Uc[0]
total_size = 0
for X in X_list:
total_size += len(X)
MG_list = []
for i in Uc:
MG = 0
for X in X_list:
M = 0
for j in U:
if i != j:
xi_std = (X[:, i] - np.mean(X[:, i])) / np.std(X[:, i])
xj_std = (X[:, j] - np.mean(X[:, j])) / np.std(X[:, j])
ri_j = xi_std if i in Vj and j in Uc else self._residual(
xi_std, xj_std)
rj_i = xj_std if j in Vj and i in Uc else self._residual(
xj_std, xi_std)
M += np.min([0, self._diff_mutual_info(xi_std,
xj_std, ri_j, rj_i)]) ** 2
MG += M * (len(X) / total_size)
MG_list.append(-1.0 * MG)
return Uc[np.argmax(MG_list)]
@property
def adjacency_matrices_(self):
"""Estimated adjacency matrices.
Returns
-------
adjacency_matrices_ : array-like, shape (B, ...)
The list of adjacency matrix B for multiple datasets.
The shape of B is (n_features, n_features), where
n_features is the number of features.
"""
return self._adjacency_matrices
|
[
"numpy.copy",
"numpy.abs",
"numpy.mean",
"numpy.reshape",
"numpy.argmax",
"numpy.array",
"sklearn.utils.resample",
"numpy.dot",
"sklearn.utils.check_array",
"numpy.std",
"warnings.warn",
"numpy.arange"
] |
[((2618, 2645), 'numpy.arange', 'np.arange', (['self._n_features'], {}), '(self._n_features)\n', (2627, 2645), True, 'import numpy as np\n'), ((9286, 9303), 'numpy.array', 'np.array', (['X_list_'], {}), '(X_list_)\n', (9294, 9303), True, 'import numpy as np\n'), ((2680, 2690), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (2687, 2690), True, 'import numpy as np\n'), ((6644, 6854), 'warnings.warn', 'warnings.warn', (['f"""The estimated causal effect may be incorrect because the causal order of the destination variable (to_index={to_index}) is earlier than the source variable (from_index={from_index})."""'], {}), "(\n f'The estimated causal effect may be incorrect because the causal order of the destination variable (to_index={to_index}) is earlier than the source variable (from_index={from_index}).'\n )\n", (6657, 6854), False, 'import warnings\n'), ((9064, 9078), 'sklearn.utils.check_array', 'check_array', (['X'], {}), '(X)\n', (9075, 9078), False, 'from sklearn.utils import check_array, resample\n'), ((10418, 10436), 'numpy.argmax', 'np.argmax', (['MG_list'], {}), '(MG_list)\n', (10427, 10436), True, 'import numpy as np\n'), ((4903, 4914), 'sklearn.utils.resample', 'resample', (['X'], {}), '(X)\n', (4911, 4914), False, 'from sklearn.utils import check_array, resample\n'), ((8969, 8991), 'sklearn.utils.check_array', 'check_array', (['X_list[0]'], {}), '(X_list[0])\n', (8980, 8991), False, 'from sklearn.utils import check_array, resample\n'), ((8272, 8287), 'numpy.dot', 'np.dot', (['am', 'X.T'], {}), '(am, X.T)\n', (8278, 8287), True, 'import numpy as np\n'), ((8411, 8446), 'numpy.reshape', 'np.reshape', (['E[:, i]', '[n_samples, 1]'], {}), '(E[:, i], [n_samples, 1])\n', (8421, 8446), True, 'import numpy as np\n'), ((8493, 8528), 'numpy.reshape', 'np.reshape', (['E[:, j]', '[n_samples, 1]'], {}), '(E[:, j], [n_samples, 1])\n', (8503, 8528), True, 'import numpy as np\n'), ((7061, 7083), 'numpy.abs', 'np.abs', (['am[from_index]'], {}), '(am[from_index])\n', (7067, 7083), True, 'import numpy as np\n'), ((9805, 9820), 'numpy.std', 'np.std', (['X[:, i]'], {}), '(X[:, i])\n', (9811, 9820), True, 'import numpy as np\n'), ((9885, 9900), 'numpy.std', 'np.std', (['X[:, j]'], {}), '(X[:, j])\n', (9891, 9900), True, 'import numpy as np\n'), ((9785, 9801), 'numpy.mean', 'np.mean', (['X[:, i]'], {}), '(X[:, i])\n', (9792, 9801), True, 'import numpy as np\n'), ((9865, 9881), 'numpy.mean', 'np.mean', (['X[:, j]'], {}), '(X[:, j])\n', (9872, 9881), True, 'import numpy as np\n')]
|
"""
This file contains the solution to the problem
2.8.4 of the book `Nonlinear Dynamics and Chaos` by <NAME>.
"""
import numpy as np
import matplotlib.pyplot as plt
def exact_solution(t, x0=1.0):
"""
The implementation of the exact solution to the differential equation
x' = -x
with the initial condition x(t=0)=`x0` (default x0=1, as in the problem).
"""
return x0 * np.exp(-t)
def improved_euler_method(f, t0: float, x0: float, timestep: float, end: float, exact_solution=None):
"""
Implementation of the improved euler method to numerically compute the solution
to the differential equation
x'=f(x)
Parameters
----------
f: function
The implementation of the function `f` appearing in the differential
equation.
t0: float
The initial time.
x0: float
The initial condition to the differential equation, i.e. the value
of x(t=t0).
timestep: float
The timestep to employ for the numerical solution of the differential
equation.
end: float
The maximal time step up to which to compute the the solution.
exact_solution: function
The exact solution. If the value is different from `None` the exact
solution will
be evaluated at each time step and the corresponding values will be
returned in order
to be able to check the convergence of the numerical solution.
"""
if end < t0:
raise ValueError("Initial time is larger than the end time!")
# Store the time steps
time_steps = [t0]
# Store the value at each time step
values = [x0]
# Store the exact values of the solutions at each time step, if the exact
# solution is provided
if exact_solution:
exact_values = [exact_solution(t0)]
# Now start solving the differential equation numerically
t = t0
x = x0
while t < end:
t = t + timestep
time_steps.append(t)
x_tilde = x + f(x) * timestep
x = x + 0.5 * (f(x) + f(x_tilde)) * timestep
values.append(x)
if exact_solution:
exact_values.append(exact_solution(t))
return time_steps, values, None if not exact_solution else exact_values
if __name__ == "__main__":
x0 = 1
t0 = 0
stop = 1.0
# Part a) of the exercise
print(f"\nPart a):")
print(f"The exact value of x(t) is 1/e, or approximately 1/e = {np.exp(-1)}")
# Part b) of the exercise
print(f"\nPart b):")
time_steps, values, exact_values = improved_euler_method(lambda x: -x, t0, x0, 1.0, stop, exact_solution)
print(f"time steps : {time_steps}")
print(f"values : {values}")
print(f"exact values: {exact_values}")
step_size_solutions = {}
for n in [1, 2, 3, 4]:
time_steps, values, exact_values = improved_euler_method(lambda x: -x, t0, x0, 10**(-n), stop, exact_solution)
step_size_solutions[n] = {"time_steps": time_steps, "values": values, "exact": exact_values}
# Plot the different solutions and compare
# Exact
exact_time = np.linspace(0, 1, 100)
exact = exact_solution(exact_time)
plt.plot(exact_time, exact, label='exact')
for n in [1, 2, 3, 4]:
plt.plot(step_size_solutions[n]['time_steps'], step_size_solutions[n]['values'], label=f"n={n}")
plt.title(r'Numerical vs. exact solution for different time steps ($10^{-n}$)')
plt.xlabel(r'$t$')
plt.ylabel(r'$x(t)$')
plt.legend()
plt.show()
# Part a) of the exercise
print(f"\nPart c):")
print(f"Plotting error vs. step size.")
errors = []
for n in [1, 2, 3, 4]:
error = np.abs(step_size_solutions[n]['values'][-1] - step_size_solutions[n]['exact'][-1])
errors.append(error)
plt.plot([1, 2, 3, 4], errors)
plt.title(r'Error vs step size $10^{-n}$')
plt.xlabel(r'$n$')
plt.ylabel(r'$|\hat{x}(1)-x(1)|$')
plt.tight_layout()
plt.show()
# plt.savefig('error_vs_stepsize.pdf')
# Plot ln(E) vs ln(t)
print("Plotting error evolution with time")
for n in [1, 2, 3, 4]:
t = step_size_solutions[n]['time_steps']
values = np.array(step_size_solutions[n]['values'])
exact = np.array(step_size_solutions[n]['exact'])
errors = np.abs(values-exact)
plt.plot(np.log(t), np.log(errors), label=f"n={n}")
plt.title(r'Error evolution with time for different step sizes ($10^{-n}$).')
plt.xlabel(r'$\ln{(t)}$')
plt.ylabel(r'$\ln{(|\hat{x}(t)-x(t)|)}$')
plt.legend()
plt.minorticks_on()
plt.grid(True, which='both')
plt.show()
# plt.savefig('error_evolution_in_time_log_scale.pdf')
|
[
"numpy.abs",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"matplotlib.pyplot.minorticks_on",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((3112, 3134), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (3123, 3134), True, 'import numpy as np\n'), ((3178, 3220), 'matplotlib.pyplot.plot', 'plt.plot', (['exact_time', 'exact'], {'label': '"""exact"""'}), "(exact_time, exact, label='exact')\n", (3186, 3220), True, 'import matplotlib.pyplot as plt\n'), ((3362, 3440), 'matplotlib.pyplot.title', 'plt.title', (['"""Numerical vs. exact solution for different time steps ($10^{-n}$)"""'], {}), "('Numerical vs. exact solution for different time steps ($10^{-n}$)')\n", (3371, 3440), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3463), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t$"""'], {}), "('$t$')\n", (3456, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3489), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x(t)$"""'], {}), "('$x(t)$')\n", (3479, 3489), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3507), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3505, 3507), True, 'import matplotlib.pyplot as plt\n'), ((3512, 3522), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3520, 3522), True, 'import matplotlib.pyplot as plt\n'), ((3799, 3829), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 2, 3, 4]', 'errors'], {}), '([1, 2, 3, 4], errors)\n', (3807, 3829), True, 'import matplotlib.pyplot as plt\n'), ((3834, 3875), 'matplotlib.pyplot.title', 'plt.title', (['"""Error vs step size $10^{-n}$"""'], {}), "('Error vs step size $10^{-n}$')\n", (3843, 3875), True, 'import matplotlib.pyplot as plt\n'), ((3881, 3898), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$n$"""'], {}), "('$n$')\n", (3891, 3898), True, 'import matplotlib.pyplot as plt\n'), ((3904, 3938), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$|\\\\hat{x}(1)-x(1)|$"""'], {}), "('$|\\\\hat{x}(1)-x(1)|$')\n", (3914, 3938), True, 'import matplotlib.pyplot as plt\n'), ((3943, 3961), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3959, 3961), True, 'import matplotlib.pyplot as plt\n'), ((3966, 3976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3974, 3976), True, 'import matplotlib.pyplot as plt\n'), ((4396, 4472), 'matplotlib.pyplot.title', 'plt.title', (['"""Error evolution with time for different step sizes ($10^{-n}$)."""'], {}), "('Error evolution with time for different step sizes ($10^{-n}$).')\n", (4405, 4472), True, 'import matplotlib.pyplot as plt\n'), ((4478, 4503), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\ln{(t)}$"""'], {}), "('$\\\\ln{(t)}$')\n", (4488, 4503), True, 'import matplotlib.pyplot as plt\n'), ((4508, 4550), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\ln{(|\\\\hat{x}(t)-x(t)|)}$"""'], {}), "('$\\\\ln{(|\\\\hat{x}(t)-x(t)|)}$')\n", (4518, 4550), True, 'import matplotlib.pyplot as plt\n'), ((4554, 4566), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4564, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4590), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (4588, 4590), True, 'import matplotlib.pyplot as plt\n'), ((4595, 4623), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""'}), "(True, which='both')\n", (4603, 4623), True, 'import matplotlib.pyplot as plt\n'), ((4628, 4638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4636, 4638), True, 'import matplotlib.pyplot as plt\n'), ((395, 405), 'numpy.exp', 'np.exp', (['(-t)'], {}), '(-t)\n', (401, 405), True, 'import numpy as np\n'), ((3256, 3357), 'matplotlib.pyplot.plot', 'plt.plot', (["step_size_solutions[n]['time_steps']", "step_size_solutions[n]['values']"], {'label': 'f"""n={n}"""'}), "(step_size_solutions[n]['time_steps'], step_size_solutions[n][\n 'values'], label=f'n={n}')\n", (3264, 3357), True, 'import matplotlib.pyplot as plt\n'), ((3682, 3769), 'numpy.abs', 'np.abs', (["(step_size_solutions[n]['values'][-1] - step_size_solutions[n]['exact'][-1])"], {}), "(step_size_solutions[n]['values'][-1] - step_size_solutions[n][\n 'exact'][-1])\n", (3688, 3769), True, 'import numpy as np\n'), ((4192, 4234), 'numpy.array', 'np.array', (["step_size_solutions[n]['values']"], {}), "(step_size_solutions[n]['values'])\n", (4200, 4234), True, 'import numpy as np\n'), ((4251, 4292), 'numpy.array', 'np.array', (["step_size_solutions[n]['exact']"], {}), "(step_size_solutions[n]['exact'])\n", (4259, 4292), True, 'import numpy as np\n'), ((4310, 4332), 'numpy.abs', 'np.abs', (['(values - exact)'], {}), '(values - exact)\n', (4316, 4332), True, 'import numpy as np\n'), ((4348, 4357), 'numpy.log', 'np.log', (['t'], {}), '(t)\n', (4354, 4357), True, 'import numpy as np\n'), ((4359, 4373), 'numpy.log', 'np.log', (['errors'], {}), '(errors)\n', (4365, 4373), True, 'import numpy as np\n'), ((2449, 2459), 'numpy.exp', 'np.exp', (['(-1)'], {}), '(-1)\n', (2455, 2459), True, 'import numpy as np\n')]
|
import os
import time
import IPython
import numpy as np
import scipy.stats as st
from sklearn.metrics import confusion_matrix
import gym
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def get_action(actions, env):
if type(env.action_space) is gym.spaces.Discrete:
# Get index
action = actions.max(1)[1].data.numpy()[0]
elif type(env.action_space) is gym.spaces.Box:
# Get values
action = actions.data.numpy().flatten()
if np.prod(action.shape) == 1:
# Index into array
action = action[0]
return action
def gym_rollout(model, env, random_seed, mseed=None, silent=False, collect_inputs=False, do_cuda=False, max_episode_length=int(1e6), **kwargs):
"""
Function to do rollouts of a policy defined by `model` in given environment
"""
# Reset environment
if mseed is not None:
# Seed environment identically across workers
env.seed(mseed)
else:
# Random init
env.seed(np.random.randint(0, 10**16))
state = env.reset()
state = Variable(torch.from_numpy(state).float(), requires_grad=True).unsqueeze(0)
retrn = 0
n_observations = 0
done = False
if collect_inputs:
# Collect `collect_inputs` observations
prealdim = (int(collect_inputs),)
for d in state.size()[1:]:
prealdim = prealdim + (d,)
inputs = torch.zeros(prealdim)
# Rollout
while not done and n_observations < max_episode_length:
# Collect states as batch inputs
if collect_inputs and collect_inputs > n_observations:
inputs[n_observations,] = state.data
# Choose action
actions = model(state)
action = get_action(actions, env)
# Step
state, reward, done, _ = env.step(action)
retrn += reward
n_observations += 1
# Cast state
state = Variable(torch.from_numpy(state).float(), requires_grad=True).unsqueeze(0)
out = {'seed': random_seed, 'return': float(retrn), 'observations': n_observations}
if collect_inputs:
if collect_inputs is not True and n_observations < collect_inputs:
# collect_inputs is a number and smaller than observations seens
inputs = inputs[:n_observations,]
out['inputs'] = inputs.numpy()
queue = kwargs.get('return_queue')
if queue:
queue.put(out)
return out
def gym_render(model, env, max_episode_length):
"""
Renders the learned model on the environment for testing.
"""
try:
while True:
# Reset environment
state = env.reset()
state = Variable(torch.from_numpy(state).float(), volatile=True).unsqueeze(0)
this_model_return = 0
this_model_num_steps = 0
done = False
# Rollout
while not done and this_model_num_steps < max_episode_length:
# Choose action
actions = model(state)
action = get_action(actions, env)
# Step
state, reward, done, _ = env.step(action)
this_model_return += reward
this_model_num_steps += 1
# Cast state
state = Variable(torch.from_numpy(state).float(), volatile=True).unsqueeze(0)
env.render()
print('Reward: %f' % this_model_return)
except KeyboardInterrupt:
print("\nEnded test session by keyboard interrupt")
def gym_test(model, env, max_episode_length, n_episodes, chkpt_dir=None, **kwargs):
"""
Tests the learned model on the environment.
"""
returns = [0]*n_episodes
for i_episode in range(n_episodes):
print('Episode {:d}/{:d}'.format(i_episode, n_episodes))
# Reset environment
state = env.reset()
state = Variable(torch.from_numpy(state).float(), volatile=True).unsqueeze(0)
this_model_num_steps = 0
done = False
# Rollout
while not done and this_model_num_steps < max_episode_length:
# Choose action
actions = model(state)
action = get_action(actions, env)
# Step
state, reward, done, _ = env.step(action)
returns[i_episode] += reward
this_model_num_steps += 1
# Cast state
state = Variable(torch.from_numpy(state).float(), volatile=True).unsqueeze(0)
mean = np.mean(returns) # Mean return
sem = st.sem(returns) # Standard error of mean
s = ''
for conf in [0.9, 0.95, 0.975, 0.99]:
interval = st.norm.interval(conf, loc=mean, scale=sem)
half_width = (interval[1] - interval[0])/2
s += "{:2d}% CI = {:5.2f} +/- {:<5.2f}, [{:>5.2f}, {:<5.2f}]\n".format(int(conf*100), mean, half_width, interval[0], interval[1])
if chkpt_dir is not None:
with open(os.path.join(chkpt_dir, 'test.log'), 'w') as f:
f.write("Confidence intervals computed on " + str(n_episodes) + " episodes.")
f.write(s)
print(s)
def supervised_eval(model, train_loader, random_seed, mseed=None, silent=False, collect_inputs=False, do_cuda=False, **kwargs):
"""
Function to evaluate the fitness of a supervised model.
For supervised training, the training data set loader is viewed as the "environment"
and is passed in the env variable (train_loader).
"""
if mseed is not None:
# Use common random numbers
torch.manual_seed(mseed)
(data, target) = next(iter(train_loader))
else:
# Sample unique batch
(data, target) = next(iter(train_loader))
data, target = Variable(data), Variable(target)
if do_cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
retrn = -F.nll_loss(output, target)
if do_cuda:
retrn = retrn.cpu()
retrn = retrn.data.numpy()[0]
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
accuracy = pred.eq(target.data.view_as(pred)).sum()/target.data.size()[0]
out = {'seed': random_seed, 'return': retrn, 'observations': data.data.size()[0], 'accuracy': accuracy}
if collect_inputs:
# NOTE It is necessary to convert the torch.autograd.Variable to numpy array
# in order to correctly transfer this data from the worker thread to the main thread.
# This is an unfortunate result of how Python pickling handles sending file descriptors.
# Torch sends tensors via shared memory instead of writing the values to the queue.
# The steps are roughly:
# 1. Background process sends token mp.Queue.
# 2. When the main process reads the token, it opens a unix socket to the background process.
# 3. The background process sends the file descriptor via the unix socket.
out['inputs'] = data.data.numpy()
# Also print correct prediction ratio
queue = kwargs.get('return_queue')
if queue:
queue.put(out)
return out
def supervised_test(model, test_loader, cuda=False, chkpt_dir=None):
"""
Function to test the performance of a supervised classification model
"""
model.eval()
test_loss = 0
correct = 0
predictions = []
targets = []
for data, target in test_loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
predictions.extend(pred.cpu().numpy().flatten())
targets.extend(target.cpu().data.numpy().flatten())
test_loss /= len(test_loader.dataset)
s = 'Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset))
cm = confusion_matrix(targets, predictions)
if chkpt_dir is not None:
with open(os.path.join(chkpt_dir, 'test.log'), 'w') as f:
f.write(s)
f.write(str(cm))
print(s)
print(cm)
|
[
"numpy.mean",
"torch.manual_seed",
"numpy.prod",
"torch.nn.functional.nll_loss",
"os.path.join",
"scipy.stats.norm.interval",
"torch.from_numpy",
"numpy.random.randint",
"scipy.stats.sem",
"torch.autograd.Variable",
"torch.zeros",
"sklearn.metrics.confusion_matrix"
] |
[((4493, 4509), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (4500, 4509), True, 'import numpy as np\n'), ((4535, 4550), 'scipy.stats.sem', 'st.sem', (['returns'], {}), '(returns)\n', (4541, 4550), True, 'import scipy.stats as st\n'), ((8138, 8176), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['targets', 'predictions'], {}), '(targets, predictions)\n', (8154, 8176), False, 'from sklearn.metrics import confusion_matrix\n'), ((1434, 1455), 'torch.zeros', 'torch.zeros', (['prealdim'], {}), '(prealdim)\n', (1445, 1455), False, 'import torch\n'), ((4651, 4694), 'scipy.stats.norm.interval', 'st.norm.interval', (['conf'], {'loc': 'mean', 'scale': 'sem'}), '(conf, loc=mean, scale=sem)\n', (4667, 4694), True, 'import scipy.stats as st\n'), ((5527, 5551), 'torch.manual_seed', 'torch.manual_seed', (['mseed'], {}), '(mseed)\n', (5544, 5551), False, 'import torch\n'), ((5711, 5725), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (5719, 5725), False, 'from torch.autograd import Variable\n'), ((5727, 5743), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (5735, 5743), False, 'from torch.autograd import Variable\n'), ((5848, 5874), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {}), '(output, target)\n', (5858, 5874), True, 'import torch.nn.functional as F\n'), ((1035, 1065), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10 ** 16)'], {}), '(0, 10 ** 16)\n', (1052, 1065), True, 'import numpy as np\n'), ((7461, 7490), 'torch.autograd.Variable', 'Variable', (['data'], {'volatile': '(True)'}), '(data, volatile=True)\n', (7469, 7490), False, 'from torch.autograd import Variable\n'), ((7492, 7508), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (7500, 7508), False, 'from torch.autograd import Variable\n'), ((508, 529), 'numpy.prod', 'np.prod', (['action.shape'], {}), '(action.shape)\n', (515, 529), True, 'import numpy as np\n'), ((4933, 4968), 'os.path.join', 'os.path.join', (['chkpt_dir', '"""test.log"""'], {}), "(chkpt_dir, 'test.log')\n", (4945, 4968), False, 'import os\n'), ((7559, 7605), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'size_average': '(False)'}), '(output, target, size_average=False)\n', (7569, 7605), True, 'import torch.nn.functional as F\n'), ((8225, 8260), 'os.path.join', 'os.path.join', (['chkpt_dir', '"""test.log"""'], {}), "(chkpt_dir, 'test.log')\n", (8237, 8260), False, 'import os\n'), ((1110, 1133), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (1126, 1133), False, 'import torch\n'), ((1943, 1966), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (1959, 1966), False, 'import torch\n'), ((3898, 3921), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (3914, 3921), False, 'import torch\n'), ((2698, 2721), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (2714, 2721), False, 'import torch\n'), ((4416, 4439), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (4432, 4439), False, 'import torch\n'), ((3301, 3324), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (3317, 3324), False, 'import torch\n')]
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from skbio.sequence import NucleotideSequence
from skbio.util import classproperty
class ExampleNucleotideSequence(NucleotideSequence):
@classproperty
def degenerate_map(cls):
return {"X": set("AB"), "Y": set("BC"), "Z": set("AC")}
@classproperty
def nondegenerate_chars(cls):
return set("ABC")
@classproperty
def complement_map(cls):
comp_map = {
'A': 'C', 'C': 'A',
'B': 'B',
'X': 'Y', 'Y': 'X',
'Z': 'Z'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
class ExampleNucleotideSequenceSubclass(ExampleNucleotideSequence):
pass
class TestNucelotideSequence(unittest.TestCase):
def setUp(self):
self.sequence_kinds = frozenset([
str,
ExampleNucleotideSequence,
lambda s: np.fromstring(s, dtype='|S1'),
lambda s: np.fromstring(s, dtype=np.uint8)])
def test_instantiation_with_no_implementation(self):
class NucleotideSequenceSubclassNoImplementation(NucleotideSequence):
pass
with self.assertRaises(TypeError) as cm:
NucleotideSequenceSubclassNoImplementation()
self.assertIn("abstract class", str(cm.exception))
self.assertIn("nondegenerate_chars", str(cm.exception))
self.assertIn("degenerate_map", str(cm.exception))
self.assertIn("complement_map", str(cm.exception))
def test_complement_map(self):
expected = {
'A': 'C', 'C': 'A',
'B': 'B',
'X': 'Y', 'Y': 'X',
'Z': 'Z',
'.': '.',
'-': '-'
}
self.assertEqual(ExampleNucleotideSequence.complement_map, expected)
ExampleNucleotideSequence.complement_map['W'] = 'X'
ExampleNucleotideSequence.complement_map['X'] = 'W'
self.assertEqual(ExampleNucleotideSequence.complement_map, expected)
self.assertEqual(ExampleNucleotideSequence('').complement_map,
expected)
with self.assertRaises(AttributeError):
ExampleNucleotideSequence('').complement_map = {'W': 'X'}
def test_complement_without_reverse_empty(self):
# without optional attributes
comp = ExampleNucleotideSequence('').complement()
self.assertEqual(comp, ExampleNucleotideSequence(''))
# with optional attributes
comp = ExampleNucleotideSequence(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}).complement()
self.assertEqual(
comp,
ExampleNucleotideSequence(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}))
def test_complement_without_reverse_non_empty(self):
comp = ExampleNucleotideSequence('ABCXYZ.-BBZ').complement()
self.assertEqual(comp, ExampleNucleotideSequence('CBAYXZ.-BBZ'))
comp = ExampleNucleotideSequence(
'ABCXYZ.-BBZ',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': range(11)}).complement()
self.assertEqual(
comp,
ExampleNucleotideSequence(
'CBAYXZ.-BBZ',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': range(11)}))
def test_complement_with_reverse_empty(self):
rc = ExampleNucleotideSequence('').complement(reverse=True)
self.assertEqual(rc, ExampleNucleotideSequence(''))
rc = ExampleNucleotideSequence(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}).complement(reverse=True)
self.assertEqual(
rc,
ExampleNucleotideSequence(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': []}))
def test_complement_with_reverse_non_empty(self):
rc = ExampleNucleotideSequence('ABCXYZ.-BBZ').complement(reverse=True)
self.assertEqual(rc, ExampleNucleotideSequence('ZBB-.ZXYABC'))
rc = ExampleNucleotideSequence(
'ABCXYZ.-BBZ',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality':
range(11)}).complement(reverse=True)
self.assertEqual(
rc,
ExampleNucleotideSequence(
'ZBB-.ZXYABC',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': list(range(11))[::-1]}))
def test_reverse_complement(self):
# light tests because this just calls
# NucleotideSequence.complement(reverse=True), which is tested more
# extensively
rc = ExampleNucleotideSequence(
'ABCXYZ.-BBZ',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': range(11)}).reverse_complement()
self.assertEqual(
rc,
ExampleNucleotideSequence(
'ZBB-.ZXYABC',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': list(range(11))[::-1]}))
def test_is_reverse_complement_varied_types(self):
tested = 0
for constructor in self.sequence_kinds:
tested += 1
seq1 = ExampleNucleotideSequence('ABCXYZ.-BBZ')
seq2 = constructor('ZBB-.ZXYABC')
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertEqual(tested, 4)
def test_is_reverse_complement_empty(self):
seq1 = ExampleNucleotideSequence('')
self.assertTrue(seq1.is_reverse_complement(seq1))
# optional attributes are ignored, only the sequence is compared
seq2 = ExampleNucleotideSequence(
'',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality':
np.array([], dtype=np.int64)})
self.assertTrue(seq2.is_reverse_complement(seq2))
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertTrue(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_metadata_ignored(self):
seq1 = ExampleNucleotideSequence('ABCXYZ.-BBZ')
seq2 = ExampleNucleotideSequence(
'ZBB-.ZXYABC',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': range(11)})
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertTrue(seq1.is_reverse_complement(seq2))
self.assertTrue(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_non_reverse_complements(self):
# same length
seq1 = ExampleNucleotideSequence('AABC')
seq2 = ExampleNucleotideSequence('ABCX')
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertFalse(seq1.is_reverse_complement(seq2))
self.assertFalse(seq2.is_reverse_complement(seq1))
# different length
seq1 = ExampleNucleotideSequence('AABC')
seq2 = ExampleNucleotideSequence('ABCXZ')
self.assertFalse(seq1.is_reverse_complement(seq1))
self.assertFalse(seq2.is_reverse_complement(seq2))
self.assertFalse(seq1.is_reverse_complement(seq2))
self.assertFalse(seq2.is_reverse_complement(seq1))
def test_is_reverse_complement_type_mismatch(self):
seq1 = ExampleNucleotideSequence('ABC')
seq2 = ExampleNucleotideSequenceSubclass('ABC')
with self.assertRaises(TypeError):
seq1.is_reverse_complement(seq2)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"numpy.array",
"numpy.fromstring"
] |
[((8409, 8424), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8422, 8424), False, 'import unittest\n'), ((1325, 1354), 'numpy.fromstring', 'np.fromstring', (['s'], {'dtype': '"""|S1"""'}), "(s, dtype='|S1')\n", (1338, 1354), True, 'import numpy as np\n'), ((1378, 1410), 'numpy.fromstring', 'np.fromstring', (['s'], {'dtype': 'np.uint8'}), '(s, dtype=np.uint8)\n', (1391, 1410), True, 'import numpy as np\n'), ((6596, 6624), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (6604, 6624), True, 'import numpy as np\n')]
|
import copy
import logging
import dask
import numpy as np
import xarray as xr
from numcodecs.compat import ensure_ndarray
from xarray.backends.zarr import (
DIMENSION_KEY,
encode_zarr_attr_value,
encode_zarr_variable,
extract_zarr_variable_encoding,
)
from zarr.meta import encode_fill_value
from zarr.storage import array_meta_key, attrs_key, default_compressor, group_meta_key
from zarr.util import normalize_shape
from .api import DATASET_ID_ATTR_KEY
dask_array_type = (dask.array.Array,)
zarr_format = 2
zarr_consolidated_format = 1
zarr_metadata_key = '.zmetadata'
logger = logging.getLogger('api')
def _extract_dataset_zattrs(dataset: xr.Dataset):
"""helper function to create zattrs dictionary from Dataset global attrs"""
zattrs = {}
for k, v in dataset.attrs.items():
zattrs[k] = encode_zarr_attr_value(v)
# remove xpublish internal attribute
zattrs.pop(DATASET_ID_ATTR_KEY, None)
return zattrs
def _extract_dataarray_zattrs(da):
"""helper function to extract zattrs dictionary from DataArray"""
zattrs = {}
for k, v in da.attrs.items():
zattrs[k] = encode_zarr_attr_value(v)
zattrs[DIMENSION_KEY] = list(da.dims)
# We don't want `_FillValue` in `.zattrs`
# It should go in `fill_value` section of `.zarray`
_ = zattrs.pop('_FillValue', None)
return zattrs
def _extract_fill_value(da, dtype):
"""helper function to extract fill value from DataArray."""
fill_value = da.attrs.pop('_FillValue', None)
return encode_fill_value(fill_value, dtype)
def _extract_zarray(da, encoding, dtype):
"""helper function to extract zarr array metadata."""
meta = {
'compressor': encoding.get('compressor', da.encoding.get('compressor', default_compressor)),
'filters': encoding.get('filters', da.encoding.get('filters', None)),
'chunks': encoding.get('chunks', None),
'dtype': dtype.str,
'fill_value': _extract_fill_value(da, dtype),
'order': 'C',
'shape': list(normalize_shape(da.shape)),
'zarr_format': zarr_format,
}
if meta['chunks'] is None:
meta['chunks'] = da.shape
# validate chunks
if isinstance(da.data, dask_array_type):
var_chunks = tuple([c[0] for c in da.data.chunks])
else:
var_chunks = da.shape
if not var_chunks == tuple(meta['chunks']):
raise ValueError('Encoding chunks do not match inferred chunks')
meta['chunks'] = list(meta['chunks']) # return chunks as a list
return meta
def create_zvariables(dataset):
"""Helper function to create a dictionary of zarr encoded variables."""
zvariables = {}
for key, da in dataset.variables.items():
encoded_da = encode_zarr_variable(da, name=key)
zvariables[key] = encoded_da
return zvariables
def create_zmetadata(dataset):
"""Helper function to create a consolidated zmetadata dictionary."""
zmeta = {'zarr_consolidated_format': zarr_consolidated_format, 'metadata': {}}
zmeta['metadata'][group_meta_key] = {'zarr_format': zarr_format}
zmeta['metadata'][attrs_key] = _extract_dataset_zattrs(dataset)
for key, da in dataset.variables.items():
encoded_da = encode_zarr_variable(da, name=key)
encoding = extract_zarr_variable_encoding(da)
zmeta['metadata'][f'{key}/{attrs_key}'] = _extract_dataarray_zattrs(encoded_da)
zmeta['metadata'][f'{key}/{array_meta_key}'] = _extract_zarray(
encoded_da, encoding, encoded_da.dtype
)
return zmeta
def jsonify_zmetadata(dataset: xr.Dataset, zmetadata: dict) -> dict:
"""Helper function to convert zmetadata dictionary to a json
compatible dictionary.
"""
zjson = copy.deepcopy(zmetadata)
for key in list(dataset.variables):
# convert compressor to dict
compressor = zjson['metadata'][f'{key}/{array_meta_key}']['compressor']
if compressor is not None:
compressor_config = zjson['metadata'][f'{key}/{array_meta_key}'][
'compressor'
].get_config()
zjson['metadata'][f'{key}/{array_meta_key}']['compressor'] = compressor_config
return zjson
def encode_chunk(chunk, filters=None, compressor=None):
"""helper function largely copied from zarr.Array"""
# apply filters
if filters:
for f in filters:
chunk = f.encode(chunk)
# check object encoding
if ensure_ndarray(chunk).dtype == object:
raise RuntimeError('cannot write object array without object codec')
# compress
if compressor:
cdata = compressor.encode(chunk)
else:
cdata = chunk
return cdata
def get_data_chunk(da, chunk_id, out_shape):
"""Get one chunk of data from this DataArray (da).
If this is an incomplete edge chunk, pad the returned array to match out_shape.
"""
ikeys = tuple(map(int, chunk_id.split('.')))
if isinstance(da, dask_array_type):
chunk_data = da.blocks[ikeys]
else:
if ikeys != ((0,) * da.ndim):
raise ValueError(
'Invalid chunk_id for numpy array: %s. Should have been: %s'
% (chunk_id, ((0,) * da.ndim))
)
chunk_data = np.asarray(da)
logger.debug('checking chunk output size, %s == %s' % (chunk_data.shape, out_shape))
if isinstance(chunk_data, dask_array_type):
chunk_data = chunk_data.compute()
# zarr expects full edge chunks, contents out of bounds for the array are undefined
if chunk_data.shape != tuple(out_shape):
new_chunk = np.empty_like(chunk_data, shape=out_shape)
write_slice = tuple([slice(0, s) for s in chunk_data.shape])
new_chunk[write_slice] = chunk_data
return new_chunk
else:
return chunk_data
|
[
"logging.getLogger",
"zarr.meta.encode_fill_value",
"xarray.backends.zarr.encode_zarr_attr_value",
"numpy.asarray",
"numcodecs.compat.ensure_ndarray",
"numpy.empty_like",
"zarr.util.normalize_shape",
"xarray.backends.zarr.encode_zarr_variable",
"copy.deepcopy",
"xarray.backends.zarr.extract_zarr_variable_encoding"
] |
[((599, 623), 'logging.getLogger', 'logging.getLogger', (['"""api"""'], {}), "('api')\n", (616, 623), False, 'import logging\n'), ((1529, 1565), 'zarr.meta.encode_fill_value', 'encode_fill_value', (['fill_value', 'dtype'], {}), '(fill_value, dtype)\n', (1546, 1565), False, 'from zarr.meta import encode_fill_value\n'), ((3745, 3769), 'copy.deepcopy', 'copy.deepcopy', (['zmetadata'], {}), '(zmetadata)\n', (3758, 3769), False, 'import copy\n'), ((831, 856), 'xarray.backends.zarr.encode_zarr_attr_value', 'encode_zarr_attr_value', (['v'], {}), '(v)\n', (853, 856), False, 'from xarray.backends.zarr import DIMENSION_KEY, encode_zarr_attr_value, encode_zarr_variable, extract_zarr_variable_encoding\n'), ((1137, 1162), 'xarray.backends.zarr.encode_zarr_attr_value', 'encode_zarr_attr_value', (['v'], {}), '(v)\n', (1159, 1162), False, 'from xarray.backends.zarr import DIMENSION_KEY, encode_zarr_attr_value, encode_zarr_variable, extract_zarr_variable_encoding\n'), ((2743, 2777), 'xarray.backends.zarr.encode_zarr_variable', 'encode_zarr_variable', (['da'], {'name': 'key'}), '(da, name=key)\n', (2763, 2777), False, 'from xarray.backends.zarr import DIMENSION_KEY, encode_zarr_attr_value, encode_zarr_variable, extract_zarr_variable_encoding\n'), ((3233, 3267), 'xarray.backends.zarr.encode_zarr_variable', 'encode_zarr_variable', (['da'], {'name': 'key'}), '(da, name=key)\n', (3253, 3267), False, 'from xarray.backends.zarr import DIMENSION_KEY, encode_zarr_attr_value, encode_zarr_variable, extract_zarr_variable_encoding\n'), ((3287, 3321), 'xarray.backends.zarr.extract_zarr_variable_encoding', 'extract_zarr_variable_encoding', (['da'], {}), '(da)\n', (3317, 3321), False, 'from xarray.backends.zarr import DIMENSION_KEY, encode_zarr_attr_value, encode_zarr_variable, extract_zarr_variable_encoding\n'), ((5256, 5270), 'numpy.asarray', 'np.asarray', (['da'], {}), '(da)\n', (5266, 5270), True, 'import numpy as np\n'), ((5606, 5648), 'numpy.empty_like', 'np.empty_like', (['chunk_data'], {'shape': 'out_shape'}), '(chunk_data, shape=out_shape)\n', (5619, 5648), True, 'import numpy as np\n'), ((2034, 2059), 'zarr.util.normalize_shape', 'normalize_shape', (['da.shape'], {}), '(da.shape)\n', (2049, 2059), False, 'from zarr.util import normalize_shape\n'), ((4455, 4476), 'numcodecs.compat.ensure_ndarray', 'ensure_ndarray', (['chunk'], {}), '(chunk)\n', (4469, 4476), False, 'from numcodecs.compat import ensure_ndarray\n')]
|
import numpy as np
import dynet as dy
from xnmt import logger
import xnmt.batcher
from xnmt.events import register_xnmt_handler, handle_xnmt_event
from xnmt.expression_sequence import ExpressionSequence, LazyNumpyExpressionSequence
from xnmt.linear import Linear
from xnmt.param_collection import ParamManager
from xnmt.param_init import GlorotInitializer, ZeroInitializer
from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare
class Embedder(object):
"""
An embedder takes in word IDs and outputs continuous vectors.
This can be done on a word-by-word basis, or over a sequence.
"""
def embed(self, word):
"""Embed a single word.
Args:
word: This will generally be an integer word ID, but could also be something like a string. It could
also be batched, in which case the input will be a :class:`xnmt.batcher.Batch` of integers or other things.
Returns:
A DyNet Expression corresponding to the embedding of the word(s), possibly batched using :class:`xnmt.batcher.Batch`.
"""
raise NotImplementedError('embed must be implemented in Embedder subclasses')
def embed_sent(self, sent):
"""Embed a full sentence worth of words. By default, just do a for loop.
Args:
sent: This will generally be a list of word IDs, but could also be a list of strings or some other format.
It could also be batched, in which case it will be a (possibly masked) :class:`xnmt.batcher.Batch` object
Returns:
xnmt.expression_sequence.ExpressionSequence: An expression sequence representing vectors of each word in the input.
"""
# single mode
if not xnmt.batcher.is_batched(sent):
embeddings = [self.embed(word) for word in sent]
# minibatch mode
else:
embeddings = []
seq_len = len(sent[0])
for single_sent in sent: assert len(single_sent)==seq_len
for word_i in range(seq_len):
batch = xnmt.batcher.mark_as_batch([single_sent[word_i] for single_sent in sent])
embeddings.append(self.embed(batch))
return ExpressionSequence(expr_list=embeddings, mask=sent.mask if xnmt.batcher.is_batched(sent) else None)
def choose_vocab(self, vocab, yaml_path, src_reader, trg_reader):
"""Choose the vocab for the embedder basd on the passed arguments
This is done in order of priority of vocab, model+yaml_path
Args:
vocab (Vocab): If None, try to obtain from ``src_reader`` or ``trg_reader``, depending on the ``yaml_path``
yaml_path (Path): Path of this embedder in the component hierarchy. Automatically determined when deserializing the YAML model.
src_reader (InputReader): Model's src_reader, if exists and unambiguous.
trg_reader (InputReader): Model's trg_reader, if exists and unambiguous.
Returns:
xnmt.vocab.Vocab: chosen vocab
"""
if vocab is not None:
return len(vocab)
elif "src_embedder" in yaml_path:
if src_reader is None or src_reader.vocab is None:
raise ValueError("Could not determine src_embedder's vocabulary. Please set its vocab member explicitly, or specify the vocabulary of src_reader ahead of time.")
return len(src_reader.vocab)
elif "trg_embedder" in yaml_path or "output_projector" in yaml_path:
if trg_reader is None or trg_reader.vocab is None:
raise ValueError("Could not determine trg_embedder's vocabulary. Please set its vocab member explicitly, or specify the vocabulary of trg_reader ahead of time.")
return len(trg_reader.vocab)
else:
raise ValueError("Attempted to determine vocab size of {} (path: {}), but path was not src_embedder, trg_embedder, or output_projector, so it could not determine what part of the model to use. Please set vocab_size or vocab explicitly.".format(self.__class__, yaml_path))
def choose_vocab_size(self, vocab_size, vocab, yaml_path, src_reader, trg_reader):
"""Choose the vocab size for the embedder basd on the passed arguments
This is done in order of priority of vocab_size, vocab, model+yaml_path
Args:
vocab_size (int): vocab size or None
vocab (Vocab): vocab or None
yaml_path (Path): Path of this embedder in the component hierarchy. Automatically determined when deserializing the YAML model.
src_reader (InputReader): Model's src_reader, if exists and unambiguous.
trg_reader (InputReader): Model's trg_reader, if exists and unambiguous.
Returns:
int: chosen vocab size
"""
if vocab_size is not None:
return vocab_size
elif vocab is not None:
return len(vocab)
elif "src_embedder" in yaml_path:
if src_reader is None or src_reader.vocab is None:
raise ValueError("Could not determine src_embedder's size. Please set its vocab_size or vocab member explicitly, or specify the vocabulary of src_reader ahead of time.")
return len(src_reader.vocab)
elif "trg_embedder" in yaml_path or "output_projector" in yaml_path:
if trg_reader is None or trg_reader.vocab is None:
raise ValueError("Could not determine target embedder's size. Please set its vocab_size or vocab member explicitly, or specify the vocabulary of trg_reader ahead of time.")
return len(trg_reader.vocab)
else:
raise ValueError("Attempted to determine vocab size of {} (path: {}), but path was not src_embedder, trg_embedder, or output_projector, so it could not determine what part of the model to use. Please set vocab_size or vocab explicitly.".format(self.__class__, yaml_path))
class DenseWordEmbedder(Embedder, Linear, Serializable):
"""
Word embeddings via full matrix.
Args:
emb_dim (int): embedding dimension
weight_noise (float): apply Gaussian noise with given standard deviation to embeddings
word_dropout (float): drop out word types with a certain probability, sampling word types on a per-sentence level, see https://arxiv.org/abs/1512.05287
fix_norm (float): fix the norm of word vectors to be radius r, see https://arxiv.org/abs/1710.01329
param_init (ParamInitializer): how to initialize weight matrices
bias_init (ParamInitializer): how to initialize bias vectors
vocab_size (int): vocab size or None
vocab (Vocab): vocab or None
yaml_path (Path): Path of this embedder in the component hierarchy. Automatically set by the YAML deserializer.
src_reader (InputReader): A reader for the source side. Automatically set by the YAML deserializer.
trg_reader (InputReader): A reader for the target side. Automatically set by the YAML deserializer.
"""
yaml_tag = "!DenseWordEmbedder"
@register_xnmt_handler
@serializable_init
def __init__(self,
emb_dim=Ref("exp_global.default_layer_dim"),
weight_noise=Ref("exp_global.weight_noise", default=0.0),
word_dropout=0.0,
fix_norm=None,
param_init=Ref("exp_global.param_init", default=bare(GlorotInitializer)),
bias_init=Ref("exp_global.bias_init", default=bare(ZeroInitializer)),
vocab_size=None,
vocab=None,
yaml_path=None,
src_reader=Ref("model.src_reader", default=None),
trg_reader=Ref("model.trg_reader", default=None)):
self.fix_norm = fix_norm
self.weight_noise = weight_noise
self.word_dropout = word_dropout
self.emb_dim = emb_dim
param_collection = ParamManager.my_params(self)
self.vocab_size = self.choose_vocab_size(vocab_size, vocab, yaml_path, src_reader, trg_reader)
self.save_processed_arg("vocab_size", self.vocab_size)
self.embeddings = param_collection.add_parameters((self.vocab_size, self.emb_dim), init=param_init.initializer((self.vocab_size, self.emb_dim), is_lookup=True))
self.bias = param_collection.add_parameters((self.vocab_size,), init=bias_init.initializer((self.vocab_size,)))
@handle_xnmt_event
def on_start_sent(self, src):
self.word_id_mask = None
@handle_xnmt_event
def on_set_train(self, val):
self.train = val
def embed(self, x):
if self.train and self.word_dropout > 0.0 and self.word_id_mask is None:
batch_size = len(x) if xnmt.batcher.is_batched(x) else 1
self.word_id_mask = [set(np.random.choice(self.vocab_size, int(self.vocab_size * self.word_dropout), replace=False)) for _ in range(batch_size)]
emb_e = dy.parameter(self.embeddings)
# single mode
if not xnmt.batcher.is_batched(x):
if self.train and self.word_id_mask and x in self.word_id_mask[0]:
ret = dy.zeros((self.emb_dim,))
else:
ret = dy.pick(emb_e, index=x)
if self.fix_norm is not None:
ret = dy.cdiv(ret, dy.l2_norm(ret))
if self.fix_norm != 1:
ret *= self.fix_norm
# minibatch mode
else:
ret = dy.pick_batch(emb_e, x)
if self.fix_norm is not None:
ret = dy.cdiv(ret, dy.l2_norm(ret))
if self.fix_norm != 1:
ret *= self.fix_norm
if self.train and self.word_id_mask and any(x[i] in self.word_id_mask[i] for i in range(len(x))):
dropout_mask = dy.inputTensor(np.transpose([[0.0]*self.emb_dim if x[i] in self.word_id_mask[i] else [1.0]*self.emb_dim for i in range(len(x))]), batched=True)
ret = dy.cmult(ret, dropout_mask)
if self.train and self.weight_noise > 0.0:
ret = dy.noise(ret, self.weight_noise)
return ret
def __call__(self, input_expr):
W1 = dy.parameter(self.embeddings)
b1 = dy.parameter(self.bias)
return dy.affine_transform([b1, W1, input_expr])
class SimpleWordEmbedder(Embedder, Serializable):
"""
Simple word embeddings via lookup.
Args:
emb_dim (int): embedding dimension
weight_noise (float): apply Gaussian noise with given standard deviation to embeddings
word_dropout (float): drop out word types with a certain probability, sampling word types on a per-sentence level, see https://arxiv.org/abs/1512.05287
fix_norm (float): fix the norm of word vectors to be radius r, see https://arxiv.org/abs/1710.01329
param_init (ParamInitializer): how to initialize lookup matrices
vocab_size (int): vocab size or None
vocab (Vocab): vocab or None
yaml_path (Path): Path of this embedder in the component hierarchy. Automatically set by the YAML deserializer.
src_reader (InputReader): A reader for the source side. Automatically set by the YAML deserializer.
trg_reader (InputReader): A reader for the target side. Automatically set by the YAML deserializer.
"""
yaml_tag = '!SimpleWordEmbedder'
@register_xnmt_handler
@serializable_init
def __init__(self,
emb_dim=Ref("exp_global.default_layer_dim"),
weight_noise=Ref("exp_global.weight_noise", default=0.0),
word_dropout=0.0,
fix_norm=None,
param_init=Ref("exp_global.param_init", default=bare(GlorotInitializer)),
vocab_size = None,
vocab = None,
yaml_path = None,
src_reader = Ref("model.src_reader", default=None),
trg_reader = Ref("model.trg_reader", default=None)):
#print(f"embedder received param_init: {param_init}")
self.emb_dim = emb_dim
self.weight_noise = weight_noise
self.word_dropout = word_dropout
self.fix_norm = fix_norm
self.word_id_mask = None
self.train = False
param_collection = ParamManager.my_params(self)
self.vocab_size = self.choose_vocab_size(vocab_size, vocab, yaml_path, src_reader, trg_reader)
self.save_processed_arg("vocab_size", self.vocab_size)
self.embeddings = param_collection.add_lookup_parameters((self.vocab_size, self.emb_dim),
init=param_init.initializer((self.vocab_size, self.emb_dim), is_lookup=True))
@handle_xnmt_event
def on_set_train(self, val):
self.train = val
@handle_xnmt_event
def on_start_sent(self, src):
self.word_id_mask = None
def embed(self, x):
if self.train and self.word_dropout > 0.0 and self.word_id_mask is None:
batch_size = len(x) if xnmt.batcher.is_batched(x) else 1
self.word_id_mask = [set(np.random.choice(self.vocab_size, int(self.vocab_size * self.word_dropout), replace=False)) for _ in range(batch_size)]
# single mode
if not xnmt.batcher.is_batched(x):
if self.train and self.word_id_mask and x in self.word_id_mask[0]:
ret = dy.zeros((self.emb_dim,))
else:
ret = self.embeddings[x]
if self.fix_norm is not None:
ret = dy.cdiv(ret, dy.l2_norm(ret))
if self.fix_norm != 1:
ret *= self.fix_norm
# minibatch mode
else:
ret = self.embeddings.batch(x)
if self.fix_norm is not None:
ret = dy.cdiv(ret, dy.l2_norm(ret))
if self.fix_norm != 1:
ret *= self.fix_norm
if self.train and self.word_id_mask and any(x[i] in self.word_id_mask[i] for i in range(len(x))):
dropout_mask = dy.inputTensor(np.transpose([[0.0]*self.emb_dim if x[i] in self.word_id_mask[i] else [1.0]*self.emb_dim for i in range(len(x))]), batched=True)
ret = dy.cmult(ret, dropout_mask)
if self.train and self.weight_noise > 0.0:
ret = dy.noise(ret, self.weight_noise)
return ret
class NoopEmbedder(Embedder, Serializable):
"""
This embedder performs no lookups but only passes through the inputs.
Normally, the input is an Input object, which is converted to an expression.
Args:
emb_dim (int): Size of the inputs (not required)
"""
yaml_tag = '!NoopEmbedder'
@serializable_init
def __init__(self, emb_dim):
self.emb_dim = emb_dim
def embed(self, x):
return dy.inputTensor(x, batched=xnmt.batcher.is_batched(x))
def embed_sent(self, sent):
# TODO refactor: seems a bit too many special cases that need to be distinguished
batched = xnmt.batcher.is_batched(sent)
first_sent = sent[0] if batched else sent
if hasattr(first_sent, "get_array"):
if not batched:
return LazyNumpyExpressionSequence(lazy_data=sent.get_array())
else:
return LazyNumpyExpressionSequence(lazy_data=xnmt.batcher.mark_as_batch(
map(lambda s: s.get_array(), sent)),
mask=sent.mask)
else:
if not batched:
embeddings = [self.embed(word) for word in sent]
else:
embeddings = []
for word_i in range(len(first_sent)):
embeddings.append(self.embed(xnmt.batcher.mark_as_batch([single_sent[word_i] for single_sent in sent])))
return ExpressionSequence(expr_list=embeddings, mask=sent.mask)
class PretrainedSimpleWordEmbedder(SimpleWordEmbedder, Serializable):
"""
Simple word embeddings via lookup. Initial pretrained embeddings must be supplied in FastText text format.
Args:
filename (str): Filename for the pretrained embeddings
emb_dim (int): embedding dimension; if None, use exp_global.default_layer_dim
weight_noise (float): apply Gaussian noise with given standard deviation to embeddings; if ``None``, use exp_global.weight_noise
word_dropout (float): drop out word types with a certain probability, sampling word types on a per-sentence level, see https://arxiv.org/abs/1512.05287
fix_norm (float): fix the norm of word vectors to be radius r, see https://arxiv.org/abs/1710.01329
vocab (Vocab): vocab or None
yaml_path (Path): Path of this embedder in the component hierarchy. Automatically set by the YAML deserializer.
src_reader (InputReader): A reader for the source side. Automatically set by the YAML deserializer.
trg_reader (InputReader): A reader for the target side. Automatically set by the YAML deserializer.
"""
yaml_tag = '!PretrainedSimpleWordEmbedder'
@register_xnmt_handler
@serializable_init
def __init__(self,
filename,
emb_dim=Ref("exp_global.default_layer_dim"),
weight_noise=Ref("exp_global.weight_noise", default=0.0),
word_dropout=0.0,
fix_norm = None,
vocab = None,
yaml_path = None,
src_reader = Ref("model.src_reader", default=None),
trg_reader = Ref("model.trg_reader", default=None)):
self.emb_dim = emb_dim
self.weight_noise = weight_noise
self.word_dropout = word_dropout
self.word_id_mask = None
self.train = False
self.fix_norm = fix_norm
self.pretrained_filename = filename
param_collection = ParamManager.my_params(self)
self.vocab = self.choose_vocab(vocab, yaml_path, src_reader, trg_reader)
self.vocab_size = len(vocab)
self.save_processed_arg("vocab", self.vocab)
with open(self.pretrained_filename, encoding='utf-8') as embeddings_file:
total_embs, in_vocab, missing, initial_embeddings = self._read_fasttext_embeddings(vocab, embeddings_file)
self.embeddings = param_collection.lookup_parameters_from_numpy(initial_embeddings)
logger.info(f"{in_vocab} vocabulary matches out of {total_embs} total embeddings; "
f"{missing} vocabulary words without a pretrained embedding out of {self.vocab_size}")
def _read_fasttext_embeddings(self, vocab, embeddings_file_handle):
"""
Reads FastText embeddings from a file. Also prints stats about the loaded embeddings for sanity checking.
Args:
vocab: a `Vocab` object containing the vocabulary for the experiment
embeddings_file_handle: A file handle on the embeddings file. The embeddings must be in FastText text
format.
Returns:
tuple: A tuple of (total number of embeddings read, # embeddings that match vocabulary words, # vocabulary words
without a matching embedding, embeddings array).
"""
_, dimension = next(embeddings_file_handle).split()
if int(dimension) != self.emb_dim:
raise Exception(f"An embedding size of {self.emb_dim} was specified, but the pretrained embeddings have size {dimension}")
# Poor man's Glorot initializer for missing embeddings
bound = np.sqrt(6/(self.vocab_size + self.emb_dim))
total_embs = 0
in_vocab = 0
missing = 0
embeddings = np.empty((self.vocab_size, self.emb_dim), dtype='float')
found = np.zeros(self.vocab_size, dtype='bool_')
for line in embeddings_file_handle:
total_embs += 1
word, vals = line.strip().split(' ', 1)
if word in vocab.w2i:
in_vocab += 1
index = vocab.w2i[word]
embeddings[index] = np.fromstring(vals, sep=" ")
found[index] = True
for i in range(self.vocab_size):
if not found[i]:
missing += 1
embeddings[i] = np.random.uniform(-bound, bound, self.emb_dim)
return total_embs, in_vocab, missing, embeddings
|
[
"dynet.parameter",
"xnmt.param_collection.ParamManager.my_params",
"numpy.sqrt",
"dynet.affine_transform",
"xnmt.logger.info",
"xnmt.persistence.bare",
"dynet.zeros",
"numpy.zeros",
"numpy.random.uniform",
"numpy.empty",
"dynet.pick_batch",
"dynet.pick",
"xnmt.persistence.Ref",
"dynet.l2_norm",
"xnmt.expression_sequence.ExpressionSequence",
"numpy.fromstring",
"dynet.cmult",
"dynet.noise"
] |
[((6731, 6766), 'xnmt.persistence.Ref', 'Ref', (['"""exp_global.default_layer_dim"""'], {}), "('exp_global.default_layer_dim')\n", (6734, 6766), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((6796, 6839), 'xnmt.persistence.Ref', 'Ref', (['"""exp_global.weight_noise"""'], {'default': '(0.0)'}), "('exp_global.weight_noise', default=0.0)\n", (6799, 6839), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((7194, 7231), 'xnmt.persistence.Ref', 'Ref', (['"""model.src_reader"""'], {'default': 'None'}), "('model.src_reader', default=None)\n", (7197, 7231), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((7259, 7296), 'xnmt.persistence.Ref', 'Ref', (['"""model.trg_reader"""'], {'default': 'None'}), "('model.trg_reader', default=None)\n", (7262, 7296), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((7452, 7480), 'xnmt.param_collection.ParamManager.my_params', 'ParamManager.my_params', (['self'], {}), '(self)\n', (7474, 7480), False, 'from xnmt.param_collection import ParamManager\n'), ((8403, 8432), 'dynet.parameter', 'dy.parameter', (['self.embeddings'], {}), '(self.embeddings)\n', (8415, 8432), True, 'import dynet as dy\n'), ((9476, 9505), 'dynet.parameter', 'dy.parameter', (['self.embeddings'], {}), '(self.embeddings)\n', (9488, 9505), True, 'import dynet as dy\n'), ((9515, 9538), 'dynet.parameter', 'dy.parameter', (['self.bias'], {}), '(self.bias)\n', (9527, 9538), True, 'import dynet as dy\n'), ((9550, 9591), 'dynet.affine_transform', 'dy.affine_transform', (['[b1, W1, input_expr]'], {}), '([b1, W1, input_expr])\n', (9569, 9591), True, 'import dynet as dy\n'), ((10686, 10721), 'xnmt.persistence.Ref', 'Ref', (['"""exp_global.default_layer_dim"""'], {}), "('exp_global.default_layer_dim')\n", (10689, 10721), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((10751, 10794), 'xnmt.persistence.Ref', 'Ref', (['"""exp_global.weight_noise"""'], {'default': '(0.0)'}), "('exp_global.weight_noise', default=0.0)\n", (10754, 10794), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((11072, 11109), 'xnmt.persistence.Ref', 'Ref', (['"""model.src_reader"""'], {'default': 'None'}), "('model.src_reader', default=None)\n", (11075, 11109), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((11139, 11176), 'xnmt.persistence.Ref', 'Ref', (['"""model.trg_reader"""'], {'default': 'None'}), "('model.trg_reader', default=None)\n", (11142, 11176), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((11442, 11470), 'xnmt.param_collection.ParamManager.my_params', 'ParamManager.my_params', (['self'], {}), '(self)\n', (11464, 11470), False, 'from xnmt.param_collection import ParamManager\n'), ((15951, 15986), 'xnmt.persistence.Ref', 'Ref', (['"""exp_global.default_layer_dim"""'], {}), "('exp_global.default_layer_dim')\n", (15954, 15986), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((16016, 16059), 'xnmt.persistence.Ref', 'Ref', (['"""exp_global.weight_noise"""'], {'default': '(0.0)'}), "('exp_global.weight_noise', default=0.0)\n", (16019, 16059), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((16216, 16253), 'xnmt.persistence.Ref', 'Ref', (['"""model.src_reader"""'], {'default': 'None'}), "('model.src_reader', default=None)\n", (16219, 16253), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((16283, 16320), 'xnmt.persistence.Ref', 'Ref', (['"""model.trg_reader"""'], {'default': 'None'}), "('model.trg_reader', default=None)\n", (16286, 16320), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((16568, 16596), 'xnmt.param_collection.ParamManager.my_params', 'ParamManager.my_params', (['self'], {}), '(self)\n', (16590, 16596), False, 'from xnmt.param_collection import ParamManager\n'), ((17040, 17216), 'xnmt.logger.info', 'logger.info', (['f"""{in_vocab} vocabulary matches out of {total_embs} total embeddings; {missing} vocabulary words without a pretrained embedding out of {self.vocab_size}"""'], {}), "(\n f'{in_vocab} vocabulary matches out of {total_embs} total embeddings; {missing} vocabulary words without a pretrained embedding out of {self.vocab_size}'\n )\n", (17051, 17216), False, 'from xnmt import logger\n'), ((18138, 18183), 'numpy.sqrt', 'np.sqrt', (['(6 / (self.vocab_size + self.emb_dim))'], {}), '(6 / (self.vocab_size + self.emb_dim))\n', (18145, 18183), True, 'import numpy as np\n'), ((18253, 18309), 'numpy.empty', 'np.empty', (['(self.vocab_size, self.emb_dim)'], {'dtype': '"""float"""'}), "((self.vocab_size, self.emb_dim), dtype='float')\n", (18261, 18309), True, 'import numpy as np\n'), ((18322, 18362), 'numpy.zeros', 'np.zeros', (['self.vocab_size'], {'dtype': '"""bool_"""'}), "(self.vocab_size, dtype='bool_')\n", (18330, 18362), True, 'import numpy as np\n'), ((8846, 8869), 'dynet.pick_batch', 'dy.pick_batch', (['emb_e', 'x'], {}), '(emb_e, x)\n', (8859, 8869), True, 'import dynet as dy\n'), ((9384, 9416), 'dynet.noise', 'dy.noise', (['ret', 'self.weight_noise'], {}), '(ret, self.weight_noise)\n', (9392, 9416), True, 'import dynet as dy\n'), ((13248, 13280), 'dynet.noise', 'dy.noise', (['ret', 'self.weight_noise'], {}), '(ret, self.weight_noise)\n', (13256, 13280), True, 'import dynet as dy\n'), ((14639, 14695), 'xnmt.expression_sequence.ExpressionSequence', 'ExpressionSequence', ([], {'expr_list': 'embeddings', 'mask': 'sent.mask'}), '(expr_list=embeddings, mask=sent.mask)\n', (14657, 14695), False, 'from xnmt.expression_sequence import ExpressionSequence, LazyNumpyExpressionSequence\n'), ((6967, 6990), 'xnmt.persistence.bare', 'bare', (['GlorotInitializer'], {}), '(GlorotInitializer)\n', (6971, 6990), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((7054, 7075), 'xnmt.persistence.bare', 'bare', (['ZeroInitializer'], {}), '(ZeroInitializer)\n', (7058, 7075), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((8577, 8602), 'dynet.zeros', 'dy.zeros', (['(self.emb_dim,)'], {}), '((self.emb_dim,))\n', (8585, 8602), True, 'import dynet as dy\n'), ((8629, 8652), 'dynet.pick', 'dy.pick', (['emb_e'], {'index': 'x'}), '(emb_e, index=x)\n', (8636, 8652), True, 'import dynet as dy\n'), ((9297, 9324), 'dynet.cmult', 'dy.cmult', (['ret', 'dropout_mask'], {}), '(ret, dropout_mask)\n', (9305, 9324), True, 'import dynet as dy\n'), ((10922, 10945), 'xnmt.persistence.bare', 'bare', (['GlorotInitializer'], {}), '(GlorotInitializer)\n', (10926, 10945), False, 'from xnmt.persistence import serializable_init, Serializable, Ref, Path, bare\n'), ((12445, 12470), 'dynet.zeros', 'dy.zeros', (['(self.emb_dim,)'], {}), '((self.emb_dim,))\n', (12453, 12470), True, 'import dynet as dy\n'), ((13161, 13188), 'dynet.cmult', 'dy.cmult', (['ret', 'dropout_mask'], {}), '(ret, dropout_mask)\n', (13169, 13188), True, 'import dynet as dy\n'), ((18582, 18610), 'numpy.fromstring', 'np.fromstring', (['vals'], {'sep': '""" """'}), "(vals, sep=' ')\n", (18595, 18610), True, 'import numpy as np\n'), ((18745, 18791), 'numpy.random.uniform', 'np.random.uniform', (['(-bound)', 'bound', 'self.emb_dim'], {}), '(-bound, bound, self.emb_dim)\n', (18762, 18791), True, 'import numpy as np\n'), ((8933, 8948), 'dynet.l2_norm', 'dy.l2_norm', (['ret'], {}), '(ret)\n', (8943, 8948), True, 'import dynet as dy\n'), ((12797, 12812), 'dynet.l2_norm', 'dy.l2_norm', (['ret'], {}), '(ret)\n', (12807, 12812), True, 'import dynet as dy\n'), ((8720, 8735), 'dynet.l2_norm', 'dy.l2_norm', (['ret'], {}), '(ret)\n', (8730, 8735), True, 'import dynet as dy\n'), ((12583, 12598), 'dynet.l2_norm', 'dy.l2_norm', (['ret'], {}), '(ret)\n', (12593, 12598), True, 'import dynet as dy\n')]
|
import ctypes as ct
import numpy as np
import scipy.io as scio
import matplotlib.pyplot as plt
# Init ctypes types
DOUBLE = ct.c_double
PtrDOUBLE = ct.POINTER(DOUBLE)
PtrPtrDOUBLE = ct.POINTER(PtrDOUBLE)
PtrPtrPtrDOUBLE = ct.POINTER(PtrPtrDOUBLE)
class TestStruct(ct.Structure):
_fields_ = [
("ScanR", ct.c_double), # Radius of the scanning trajectory of x-ray source
("DecFanAng", ct.c_double), # Fan angle coverage of the detector element along the horizontal diretion
("DecHeigh", ct.c_double), # Physical heigth of the detector along the vertical direction
("YL", ct.c_int), # Detector cell number on each row along the horizontal direction
("ZL", ct.c_int), # Detector cell number on each column along the vertical direction
("YOffSet", ct.c_double), # Detector offset along the horizontal direction (pixel, e.g. quarter pixel)
("ZOffSet", ct.c_double), # Detector offset along the vertical direcion (pixel, e.g. quarter pixel)
("AngleNumber", ct.c_int), # Number of view samples on the scanning trajectory
("DistD", ct.c_double), # Distance between the x-ray source and the detector
("Radius", ct.c_double), # Radius of the phantom
("RecSize", ct.c_int), # Reconstructed size
("centerX", ct.c_int), # Reconstructed center on x axis
("centerY", ct.c_int), # Reconstructed center on y axis
("centerZ", ct.c_int), # Reconstructed center on z axis
("FOILength", ct.c_int), # Reconstructed length on x axis
("FOIWidth", ct.c_int), # Reconstructed length on y axis
("FOIHeigh", ct.c_int), # Reconstructed length on z axis
("GF", PtrPtrPtrDOUBLE), # Projection data/ Sinogram data
("RecIm", PtrPtrPtrDOUBLE) # Reconstructed 3D image data
]
def double3darray2pointer(arr):
# Converts a 3D numpy to ctypes 3D array.
arr_dimx = DOUBLE * arr.shape[2]
arr_dimy = PtrDOUBLE * arr.shape[1]
arr_dimz = PtrPtrDOUBLE * arr.shape[0]
arr_ptr = arr_dimz()
for i, row in enumerate(arr):
arr_ptr[i] = arr_dimy()
for j, col in enumerate(row):
arr_ptr[i][j] = arr_dimx()
for k, val in enumerate(col):
arr_ptr[i][j][k] = val
return arr_ptr
def double3dpointer2array(ptr, n, m, o):
# Converts ctypes 3D array into a 3D numpy array.
arr = np.zeros(shape=(n, m, o))
for i in range(n):
for j in range(m):
for k in range(o):
arr[i, j, k] = ptr[i][j][k]
return arr
# Load the compiled library
recon = ct.CDLL("./fdk_equiAngle.dll")
# Define arguments of the C function
recon.fbp.argtypes = [ct.POINTER(TestStruct)]
# Define the return type of the C function
recon.fbp.restype = None
# Load the data
dataFile = './data/FDK_Filtering_curve.mat'
data = scio.loadmat(dataFile)
# init the struct
t = TestStruct()
t.ScanR = data['ScanR']
t.DistD = data['DistD']
t.DecFanAng = data['DecFanAng']
t.DecHeigh = data['DecHeigh']
t.YL = data['YL']
t.ZL = data['ZL']
t.AngleNumber = data['ProjScale']
t.Radius = data['Radius']
# These are flexible parameters.
t.RecSize = 128
t.centerX = 64
t.centerY = 64
t.centerZ = 64
t.FOILength = 128
t.FOIWidth = 128
t.FOIHeigh = 128
# Generate a 2D ctypes array from numpy array
GF = data['GF']
GF_ptr = double3darray2pointer(GF)
t.GF = GF_ptr
# RecIm = np.zeros(shape=(t.RecSize, t.RecSize, t.RecSize))
RecIm = np.zeros(shape=(t.FOILength, t.FOIWidth, t.FOIHeigh))
RecIm_ptr = double3darray2pointer(RecIm)
t.RecIm = RecIm_ptr
# interface with C function
recon.fbp(ct.byref(t))
# Convert ctypes 2D arrays to numpy arrays
RecA = double3dpointer2array(RecIm_ptr, *RecIm.shape)
# save result
dataNew = './data/FDK_RecImage_curve.mat'
scio.savemat(dataNew,
{'Rec': RecA})
plt.figure()
plt.imshow(RecA[:, :, 80], cmap='gray')
plt.show()
|
[
"matplotlib.pyplot.imshow",
"ctypes.byref",
"ctypes.POINTER",
"scipy.io.savemat",
"scipy.io.loadmat",
"numpy.zeros",
"matplotlib.pyplot.figure",
"ctypes.CDLL",
"matplotlib.pyplot.show"
] |
[((149, 167), 'ctypes.POINTER', 'ct.POINTER', (['DOUBLE'], {}), '(DOUBLE)\n', (159, 167), True, 'import ctypes as ct\n'), ((183, 204), 'ctypes.POINTER', 'ct.POINTER', (['PtrDOUBLE'], {}), '(PtrDOUBLE)\n', (193, 204), True, 'import ctypes as ct\n'), ((223, 247), 'ctypes.POINTER', 'ct.POINTER', (['PtrPtrDOUBLE'], {}), '(PtrPtrDOUBLE)\n', (233, 247), True, 'import ctypes as ct\n'), ((2900, 2930), 'ctypes.CDLL', 'ct.CDLL', (['"""./fdk_equiAngle.dll"""'], {}), "('./fdk_equiAngle.dll')\n", (2907, 2930), True, 'import ctypes as ct\n'), ((3151, 3173), 'scipy.io.loadmat', 'scio.loadmat', (['dataFile'], {}), '(dataFile)\n', (3163, 3173), True, 'import scipy.io as scio\n'), ((3747, 3800), 'numpy.zeros', 'np.zeros', ([], {'shape': '(t.FOILength, t.FOIWidth, t.FOIHeigh)'}), '(shape=(t.FOILength, t.FOIWidth, t.FOIHeigh))\n', (3755, 3800), True, 'import numpy as np\n'), ((4069, 4105), 'scipy.io.savemat', 'scio.savemat', (['dataNew', "{'Rec': RecA}"], {}), "(dataNew, {'Rec': RecA})\n", (4081, 4105), True, 'import scipy.io as scio\n'), ((4120, 4132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4130, 4132), True, 'import matplotlib.pyplot as plt\n'), ((4133, 4172), 'matplotlib.pyplot.imshow', 'plt.imshow', (['RecA[:, :, 80]'], {'cmap': '"""gray"""'}), "(RecA[:, :, 80], cmap='gray')\n", (4143, 4172), True, 'import matplotlib.pyplot as plt\n'), ((4173, 4183), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4181, 4183), True, 'import matplotlib.pyplot as plt\n'), ((2694, 2719), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, m, o)'}), '(shape=(n, m, o))\n', (2702, 2719), True, 'import numpy as np\n'), ((2990, 3012), 'ctypes.POINTER', 'ct.POINTER', (['TestStruct'], {}), '(TestStruct)\n', (3000, 3012), True, 'import ctypes as ct\n'), ((3901, 3912), 'ctypes.byref', 'ct.byref', (['t'], {}), '(t)\n', (3909, 3912), True, 'import ctypes as ct\n')]
|
import argparse
import cv2
import numpy as np
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
from facexlib.matting import init_matting_model
from facexlib.utils import img2tensor
def main(args):
modnet = init_matting_model()
# read image
img = cv2.imread(args.img_path) / 255.
# unify image channels to 3
if len(img.shape) == 2:
img = img[:, :, None]
if img.shape[2] == 1:
img = np.repeat(img, 3, axis=2)
elif img.shape[2] == 4:
img = img[:, :, 0:3]
img_t = img2tensor(img, bgr2rgb=True, float32=True)
normalize(img_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
img_t = img_t.unsqueeze(0).cuda()
# resize image for input
_, _, im_h, im_w = img_t.shape
ref_size = 512
if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size:
if im_w >= im_h:
im_rh = ref_size
im_rw = int(im_w / im_h * ref_size)
elif im_w < im_h:
im_rw = ref_size
im_rh = int(im_h / im_w * ref_size)
else:
im_rh = im_h
im_rw = im_w
im_rw = im_rw - im_rw % 32
im_rh = im_rh - im_rh % 32
img_t = F.interpolate(img_t, size=(im_rh, im_rw), mode='area')
# inference
_, _, matte = modnet(img_t, True)
# resize and save matte
matte = F.interpolate(matte, size=(im_h, im_w), mode='area')
matte = matte[0][0].data.cpu().numpy()
cv2.imwrite(args.save_path, (matte * 255).astype('uint8'))
# get foreground
matte = matte[:, :, None]
foreground = img * matte + np.full(img.shape, 1) * (1 - matte)
cv2.imwrite(args.save_path.replace('.png', '_fg.png'), foreground * 255)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='assets/test.jpg')
parser.add_argument('--save_path', type=str, default='test_matting.png')
args = parser.parse_args()
main(args)
|
[
"numpy.repeat",
"argparse.ArgumentParser",
"facexlib.utils.img2tensor",
"torch.nn.functional.interpolate",
"numpy.full",
"torchvision.transforms.functional.normalize",
"facexlib.matting.init_matting_model",
"cv2.imread"
] |
[((252, 272), 'facexlib.matting.init_matting_model', 'init_matting_model', ([], {}), '()\n', (270, 272), False, 'from facexlib.matting import init_matting_model\n'), ((560, 603), 'facexlib.utils.img2tensor', 'img2tensor', (['img'], {'bgr2rgb': '(True)', 'float32': '(True)'}), '(img, bgr2rgb=True, float32=True)\n', (570, 603), False, 'from facexlib.utils import img2tensor\n'), ((608, 672), 'torchvision.transforms.functional.normalize', 'normalize', (['img_t', '(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {'inplace': '(True)'}), '(img_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)\n', (617, 672), False, 'from torchvision.transforms.functional import normalize\n'), ((1191, 1245), 'torch.nn.functional.interpolate', 'F.interpolate', (['img_t'], {'size': '(im_rh, im_rw)', 'mode': '"""area"""'}), "(img_t, size=(im_rh, im_rw), mode='area')\n", (1204, 1245), True, 'import torch.nn.functional as F\n'), ((1342, 1394), 'torch.nn.functional.interpolate', 'F.interpolate', (['matte'], {'size': '(im_h, im_w)', 'mode': '"""area"""'}), "(matte, size=(im_h, im_w), mode='area')\n", (1355, 1394), True, 'import torch.nn.functional as F\n'), ((1739, 1764), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1762, 1764), False, 'import argparse\n'), ((301, 326), 'cv2.imread', 'cv2.imread', (['args.img_path'], {}), '(args.img_path)\n', (311, 326), False, 'import cv2\n'), ((464, 489), 'numpy.repeat', 'np.repeat', (['img', '(3)'], {'axis': '(2)'}), '(img, 3, axis=2)\n', (473, 489), True, 'import numpy as np\n'), ((1584, 1605), 'numpy.full', 'np.full', (['img.shape', '(1)'], {}), '(img.shape, 1)\n', (1591, 1605), True, 'import numpy as np\n')]
|
# Image-based testing borrowed from vispy
"""
Procedure for unit-testing with images:
1. Run unit tests at least once; this initializes a git clone of
pyqtgraph/test-data in ~/.pyqtgraph.
2. Run individual test scripts with the PYQTGRAPH_AUDIT environment variable set:
$ PYQTGRAPH_AUDIT=1 python pyqtgraph/graphicsItems/tests/test_PlotCurveItem.py
Any failing tests will
display the test results, standard image, and the differences between the
two. If the test result is bad, then press (f)ail. If the test result is
good, then press (p)ass and the new image will be saved to the test-data
directory.
3. After adding or changing test images, create a new commit:
$ cd ~/.pyqtgraph/test-data
$ git add ...
$ git commit -a
4. Look up the most recent tag name from the `testDataTag` global variable
below. Increment the tag name by 1 and create a new tag in the test-data
repository:
$ git tag test-data-NNN
$ git push --tags origin master
This tag is used to ensure that each pyqtgraph commit is linked to a specific
commit in the test-data repository. This makes it possible to push new
commits to the test-data repository without interfering with existing
tests, and also allows unit tests to continue working on older pyqtgraph
versions.
"""
# This is the name of a tag in the test-data repository that this version of
# pyqtgraph should be tested against. When adding or changing test images,
# create and push a new tag and update this variable.
testDataTag = 'test-data-3'
import time
import os
import sys
import inspect
import base64
import subprocess as sp
import numpy as np
if sys.version[0] >= '3':
import http.client as httplib
import urllib.parse as urllib
else:
import httplib
import urllib
from ..Qt import QtGui, QtCore
from .. import functions as fn
from .. import GraphicsLayoutWidget
from .. import ImageItem, TextItem
tester = None
def getTester():
global tester
if tester is None:
tester = ImageTester()
return tester
def assertImageApproved(image, standardFile, message=None, **kwargs):
"""Check that an image test result matches a pre-approved standard.
If the result does not match, then the user can optionally invoke a GUI
to compare the images and decide whether to fail the test or save the new
image as the standard.
This function will automatically clone the test-data repository into
~/.pyqtgraph/test-data. However, it is up to the user to ensure this repository
is kept up to date and to commit/push new images after they are saved.
Run the test with the environment variable PYQTGRAPH_AUDIT=1 to bring up
the auditing GUI.
Parameters
----------
image : (h, w, 4) ndarray
standardFile : str
The name of the approved test image to check against. This file name
is relative to the root of the pyqtgraph test-data repository and will
be automatically fetched.
message : str
A string description of the image. It is recommended to describe
specific features that an auditor should look for when deciding whether
to fail a test.
Extra keyword arguments are used to set the thresholds for automatic image
comparison (see ``assertImageMatch()``).
"""
if isinstance(image, QtGui.QWidget):
w = image
image = np.zeros((w.height(), w.width(), 4), dtype=np.ubyte)
qimg = fn.makeQImage(image, alpha=True, copy=False, transpose=False)
painter = QtGui.QPainter(qimg)
w.render(painter)
painter.end()
if message is None:
code = inspect.currentframe().f_back.f_code
message = "%s::%s" % (code.co_filename, code.co_name)
# Make sure we have a test data repo available, possibly invoking git
dataPath = getTestDataRepo()
# Read the standard image if it exists
stdFileName = os.path.join(dataPath, standardFile + '.png')
if not os.path.isfile(stdFileName):
stdImage = None
else:
pxm = QtGui.QPixmap()
pxm.load(stdFileName)
stdImage = fn.imageToArray(pxm.toImage(), copy=True, transpose=False)
# If the test image does not match, then we go to audit if requested.
try:
if image.shape[2] != stdImage.shape[2]:
raise Exception("Test result has different channel count than standard image"
"(%d vs %d)" % (image.shape[2], stdImage.shape[2]))
if image.shape != stdImage.shape:
# Allow im1 to be an integer multiple larger than im2 to account
# for high-resolution displays
ims1 = np.array(image.shape).astype(float)
ims2 = np.array(stdImage.shape).astype(float)
sr = ims1 / ims2 if ims1[0] > ims2[0] else ims2 / ims1
if (sr[0] != sr[1] or not np.allclose(sr, np.round(sr)) or
sr[0] < 1):
raise TypeError("Test result shape %s is not an integer factor"
" different than standard image shape %s." %
(ims1, ims2))
sr = np.round(sr).astype(int)
image = downsample(image, sr[0], axis=(0, 1)).astype(image.dtype)
assertImageMatch(image, stdImage, **kwargs)
except Exception:
if stdFileName in gitStatus(dataPath):
print("\n\nWARNING: unit test failed against modified standard "
"image %s.\nTo revert this file, run `cd %s; git checkout "
"%s`\n" % (stdFileName, dataPath, standardFile))
if os.getenv('PYQTGRAPH_AUDIT') == '1':
sys.excepthook(*sys.exc_info())
getTester().test(image, stdImage, message)
stdPath = os.path.dirname(stdFileName)
print('Saving new standard image to "%s"' % stdFileName)
if not os.path.isdir(stdPath):
os.makedirs(stdPath)
img = fn.makeQImage(image, alpha=True, copy=False, transpose=False)
img.save(stdFileName)
else:
if stdImage is None:
raise Exception("Test standard %s does not exist. Set "
"PYQTGRAPH_AUDIT=1 to add this image." % stdFileName)
else:
if os.getenv('TRAVIS') is not None:
saveFailedTest(image, stdImage, standardFile)
raise
def assertImageMatch(im1, im2, minCorr=None, pxThreshold=50.,
pxCount=0, maxPxDiff=None, avgPxDiff=None,
imgDiff=None):
"""Check that two images match.
Images that differ in shape or dtype will fail unconditionally.
Further tests for similarity depend on the arguments supplied.
By default, images may have no pixels that gave a value difference greater
than 50.
Parameters
----------
im1 : (h, w, 4) ndarray
Test output image
im2 : (h, w, 4) ndarray
Test standard image
minCorr : float or None
Minimum allowed correlation coefficient between corresponding image
values (see numpy.corrcoef)
pxThreshold : float
Minimum value difference at which two pixels are considered different
pxCount : int or None
Maximum number of pixels that may differ
maxPxDiff : float or None
Maximum allowed difference between pixels
avgPxDiff : float or None
Average allowed difference between pixels
imgDiff : float or None
Maximum allowed summed difference between images
"""
assert im1.ndim == 3
assert im1.shape[2] == 4
assert im1.dtype == im2.dtype
diff = im1.astype(float) - im2.astype(float)
if imgDiff is not None:
assert np.abs(diff).sum() <= imgDiff
pxdiff = diff.max(axis=2) # largest value difference per pixel
mask = np.abs(pxdiff) >= pxThreshold
if pxCount is not None:
assert mask.sum() <= pxCount
maskedDiff = diff[mask]
if maxPxDiff is not None and maskedDiff.size > 0:
assert maskedDiff.max() <= maxPxDiff
if avgPxDiff is not None and maskedDiff.size > 0:
assert maskedDiff.mean() <= avgPxDiff
if minCorr is not None:
with np.errstate(invalid='ignore'):
corr = np.corrcoef(im1.ravel(), im2.ravel())[0, 1]
assert corr >= minCorr
def saveFailedTest(data, expect, filename):
"""Upload failed test images to web server to allow CI test debugging.
"""
commit, error = runSubprocess(['git', 'rev-parse', 'HEAD'])
name = filename.split('/')
name.insert(-1, commit.strip())
filename = '/'.join(name)
host = 'data.pyqtgraph.org'
# concatenate data, expect, and diff into a single image
ds = data.shape
es = expect.shape
shape = (max(ds[0], es[0]) + 4, ds[1] + es[1] + 8 + max(ds[1], es[1]), 4)
img = np.empty(shape, dtype=np.ubyte)
img[..., :3] = 100
img[..., 3] = 255
img[2:2+ds[0], 2:2+ds[1], :ds[2]] = data
img[2:2+es[0], ds[1]+4:ds[1]+4+es[1], :es[2]] = expect
diff = makeDiffImage(data, expect)
img[2:2+diff.shape[0], -diff.shape[1]-2:-2] = diff
png = makePng(img)
conn = httplib.HTTPConnection(host)
req = urllib.urlencode({'name': filename,
'data': base64.b64encode(png)})
conn.request('POST', '/upload.py', req)
response = conn.getresponse().read()
conn.close()
print("\nImage comparison failed. Test result: %s %s Expected result: "
"%s %s" % (data.shape, data.dtype, expect.shape, expect.dtype))
print("Uploaded to: \nhttp://%s/data/%s" % (host, filename))
if not response.startswith(b'OK'):
print("WARNING: Error uploading data to %s" % host)
print(response)
def makePng(img):
"""Given an array like (H, W, 4), return a PNG-encoded byte string.
"""
io = QtCore.QBuffer()
qim = fn.makeQImage(img, alpha=False)
qim.save(io, format='png')
png = io.data().data().encode()
return png
def makeDiffImage(im1, im2):
"""Return image array showing the differences between im1 and im2.
Handles images of different shape. Alpha channels are not compared.
"""
ds = im1.shape
es = im2.shape
diff = np.empty((max(ds[0], es[0]), max(ds[1], es[1]), 4), dtype=int)
diff[..., :3] = 128
diff[..., 3] = 255
diff[:ds[0], :ds[1], :min(ds[2], 3)] += im1[..., :3]
diff[:es[0], :es[1], :min(es[2], 3)] -= im2[..., :3]
diff = np.clip(diff, 0, 255).astype(np.ubyte)
return diff
class ImageTester(QtGui.QWidget):
"""Graphical interface for auditing image comparison tests.
"""
def __init__(self):
self.lastKey = None
QtGui.QWidget.__init__(self)
self.resize(1200, 800)
self.showFullScreen()
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
self.view = GraphicsLayoutWidget()
self.layout.addWidget(self.view, 0, 0, 1, 2)
self.label = QtGui.QLabel()
self.layout.addWidget(self.label, 1, 0, 1, 2)
self.label.setWordWrap(True)
font = QtGui.QFont("monospace", 14, QtGui.QFont.Bold)
self.label.setFont(font)
self.passBtn = QtGui.QPushButton('Pass')
self.failBtn = QtGui.QPushButton('Fail')
self.layout.addWidget(self.passBtn, 2, 0)
self.layout.addWidget(self.failBtn, 2, 1)
self.views = (self.view.addViewBox(row=0, col=0),
self.view.addViewBox(row=0, col=1),
self.view.addViewBox(row=0, col=2))
labelText = ['test output', 'standard', 'diff']
for i, v in enumerate(self.views):
v.setAspectLocked(1)
v.invertY()
v.image = ImageItem()
v.image.setAutoDownsample(True)
v.addItem(v.image)
v.label = TextItem(labelText[i])
v.setBackgroundColor(0.5)
self.views[1].setXLink(self.views[0])
self.views[1].setYLink(self.views[0])
self.views[2].setXLink(self.views[0])
self.views[2].setYLink(self.views[0])
def test(self, im1, im2, message):
"""Ask the user to decide whether an image test passes or fails.
This method displays the test image, reference image, and the difference
between the two. It then blocks until the user selects the test output
by clicking a pass/fail button or typing p/f. If the user fails the test,
then an exception is raised.
"""
self.show()
if im2 is None:
message += '\nImage1: %s %s Image2: [no standard]' % (im1.shape, im1.dtype)
im2 = np.zeros((1, 1, 3), dtype=np.ubyte)
else:
message += '\nImage1: %s %s Image2: %s %s' % (im1.shape, im1.dtype, im2.shape, im2.dtype)
self.label.setText(message)
self.views[0].image.setImage(im1.transpose(1, 0, 2))
self.views[1].image.setImage(im2.transpose(1, 0, 2))
diff = makeDiffImage(im1, im2).transpose(1, 0, 2)
self.views[2].image.setImage(diff)
self.views[0].autoRange()
while True:
QtGui.QApplication.processEvents()
lastKey = self.lastKey
self.lastKey = None
if lastKey in ('f', 'esc') or not self.isVisible():
raise Exception("User rejected test result.")
elif lastKey == 'p':
break
time.sleep(0.03)
for v in self.views:
v.image.setImage(np.zeros((1, 1, 3), dtype=np.ubyte))
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.lastKey = 'esc'
else:
self.lastKey = str(event.text()).lower()
def getTestDataRepo():
"""Return the path to a git repository with the required commit checked
out.
If the repository does not exist, then it is cloned from
https://github.com/pyqtgraph/test-data. If the repository already exists
then the required commit is checked out.
"""
global testDataTag
dataPath = os.path.join(os.path.expanduser('~'), '.pyqtgraph', 'test-data')
gitPath = 'https://github.com/pyqtgraph/test-data'
gitbase = gitCmdBase(dataPath)
if os.path.isdir(dataPath):
# Already have a test-data repository to work with.
# Get the commit ID of testDataTag. Do a fetch if necessary.
try:
tagCommit = gitCommitId(dataPath, testDataTag)
except NameError:
cmd = gitbase + ['fetch', '--tags', 'origin']
print(' '.join(cmd))
sp.check_call(cmd)
try:
tagCommit = gitCommitId(dataPath, testDataTag)
except NameError:
raise Exception("Could not find tag '%s' in test-data repo at"
" %s" % (testDataTag, dataPath))
except Exception:
if not os.path.exists(os.path.join(dataPath, '.git')):
raise Exception("Directory '%s' does not appear to be a git "
"repository. Please remove this directory." %
dataPath)
else:
raise
# If HEAD is not the correct commit, then do a checkout
if gitCommitId(dataPath, 'HEAD') != tagCommit:
print("Checking out test-data tag '%s'" % testDataTag)
sp.check_call(gitbase + ['checkout', testDataTag])
else:
print("Attempting to create git clone of test data repo in %s.." %
dataPath)
parentPath = os.path.split(dataPath)[0]
if not os.path.isdir(parentPath):
os.makedirs(parentPath)
if os.getenv('TRAVIS') is not None:
# Create a shallow clone of the test-data repository (to avoid
# downloading more data than is necessary)
os.makedirs(dataPath)
cmds = [
gitbase + ['init'],
gitbase + ['remote', 'add', 'origin', gitPath],
gitbase + ['fetch', '--tags', 'origin', testDataTag,
'--depth=1'],
gitbase + ['checkout', '-b', 'master', 'FETCH_HEAD'],
]
else:
# Create a full clone
cmds = [['git', 'clone', gitPath, dataPath]]
for cmd in cmds:
print(' '.join(cmd))
rval = sp.check_call(cmd)
if rval == 0:
continue
raise RuntimeError("Test data path '%s' does not exist and could "
"not be created with git. Please create a git "
"clone of %s at this path." %
(dataPath, gitPath))
return dataPath
def gitCmdBase(path):
return ['git', '--git-dir=%s/.git' % path, '--work-tree=%s' % path]
def gitStatus(path):
"""Return a string listing all changes to the working tree in a git
repository.
"""
cmd = gitCmdBase(path) + ['status', '--porcelain']
return runSubprocess(cmd, stderr=None, universal_newlines=True)
def gitCommitId(path, ref):
"""Return the commit id of *ref* in the git repository at *path*.
"""
cmd = gitCmdBase(path) + ['show', ref]
try:
output = runSubprocess(cmd, stderr=None, universal_newlines=True)
except sp.CalledProcessError:
print(cmd)
raise NameError("Unknown git reference '%s'" % ref)
commit = output.split('\n')[0]
assert commit[:7] == 'commit '
return commit[7:]
def runSubprocess(command, return_code=False, **kwargs):
"""Run command using subprocess.Popen
Similar to subprocess.check_output(), which is not available in 2.6.
Run command and wait for command to complete. If the return code was zero
then return, otherwise raise CalledProcessError.
By default, this will also add stdout= and stderr=subproces.PIPE
to the call to Popen to suppress printing to the terminal.
Parameters
----------
command : list of str
Command to run as subprocess (see subprocess.Popen documentation).
**kwargs : dict
Additional kwargs to pass to ``subprocess.Popen``.
Returns
-------
stdout : str
Stdout returned by the process.
"""
# code adapted with permission from mne-python
use_kwargs = dict(stderr=None, stdout=sp.PIPE)
use_kwargs.update(kwargs)
p = sp.Popen(command, **use_kwargs)
output = p.communicate()[0]
# communicate() may return bytes, str, or None depending on the kwargs
# passed to Popen(). Convert all to unicode str:
output = '' if output is None else output
output = output.decode('utf-8') if isinstance(output, bytes) else output
if p.returncode != 0:
print(output)
err_fun = sp.CalledProcessError.__init__
if 'output' in inspect.getargspec(err_fun).args:
raise sp.CalledProcessError(p.returncode, command, output)
else:
raise sp.CalledProcessError(p.returncode, command)
return output
|
[
"numpy.clip",
"base64.b64encode",
"time.sleep",
"numpy.array",
"sys.exc_info",
"httplib.HTTPConnection",
"subprocess.Popen",
"subprocess.CalledProcessError",
"os.path.split",
"os.path.isdir",
"numpy.empty",
"os.path.expanduser",
"numpy.round",
"numpy.abs",
"subprocess.check_call",
"os.path.isfile",
"os.path.dirname",
"os.makedirs",
"os.getenv",
"inspect.currentframe",
"os.path.join",
"inspect.getargspec",
"numpy.errstate",
"numpy.zeros"
] |
[((3948, 3993), 'os.path.join', 'os.path.join', (['dataPath', "(standardFile + '.png')"], {}), "(dataPath, standardFile + '.png')\n", (3960, 3993), False, 'import os\n'), ((8885, 8916), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'np.ubyte'}), '(shape, dtype=np.ubyte)\n', (8893, 8916), True, 'import numpy as np\n'), ((9202, 9230), 'httplib.HTTPConnection', 'httplib.HTTPConnection', (['host'], {}), '(host)\n', (9224, 9230), False, 'import httplib\n'), ((14306, 14329), 'os.path.isdir', 'os.path.isdir', (['dataPath'], {}), '(dataPath)\n', (14319, 14329), False, 'import os\n'), ((18484, 18515), 'subprocess.Popen', 'sp.Popen', (['command'], {}), '(command, **use_kwargs)\n', (18492, 18515), True, 'import subprocess as sp\n'), ((4005, 4032), 'os.path.isfile', 'os.path.isfile', (['stdFileName'], {}), '(stdFileName)\n', (4019, 4032), False, 'import os\n'), ((7879, 7893), 'numpy.abs', 'np.abs', (['pxdiff'], {}), '(pxdiff)\n', (7885, 7893), True, 'import numpy as np\n'), ((14156, 14179), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (14174, 14179), False, 'import os\n'), ((8244, 8273), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (8255, 8273), True, 'import numpy as np\n'), ((9313, 9334), 'base64.b64encode', 'base64.b64encode', (['png'], {}), '(png)\n', (9329, 9334), False, 'import base64\n'), ((10497, 10518), 'numpy.clip', 'np.clip', (['diff', '(0)', '(255)'], {}), '(diff, 0, 255)\n', (10504, 10518), True, 'import numpy as np\n'), ((12706, 12741), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.ubyte'}), '((1, 1, 3), dtype=np.ubyte)\n', (12714, 12741), True, 'import numpy as np\n'), ((13504, 13520), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (13514, 13520), False, 'import time\n'), ((15465, 15515), 'subprocess.check_call', 'sp.check_call', (["(gitbase + ['checkout', testDataTag])"], {}), "(gitbase + ['checkout', testDataTag])\n", (15478, 15515), True, 'import subprocess as sp\n'), ((15648, 15671), 'os.path.split', 'os.path.split', (['dataPath'], {}), '(dataPath)\n', (15661, 15671), False, 'import os\n'), ((15690, 15715), 'os.path.isdir', 'os.path.isdir', (['parentPath'], {}), '(parentPath)\n', (15703, 15715), False, 'import os\n'), ((15729, 15752), 'os.makedirs', 'os.makedirs', (['parentPath'], {}), '(parentPath)\n', (15740, 15752), False, 'import os\n'), ((15765, 15784), 'os.getenv', 'os.getenv', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (15774, 15784), False, 'import os\n'), ((15940, 15961), 'os.makedirs', 'os.makedirs', (['dataPath'], {}), '(dataPath)\n', (15951, 15961), False, 'import os\n'), ((16460, 16478), 'subprocess.check_call', 'sp.check_call', (['cmd'], {}), '(cmd)\n', (16473, 16478), True, 'import subprocess as sp\n'), ((18973, 19025), 'subprocess.CalledProcessError', 'sp.CalledProcessError', (['p.returncode', 'command', 'output'], {}), '(p.returncode, command, output)\n', (18994, 19025), True, 'import subprocess as sp\n'), ((19058, 19102), 'subprocess.CalledProcessError', 'sp.CalledProcessError', (['p.returncode', 'command'], {}), '(p.returncode, command)\n', (19079, 19102), True, 'import subprocess as sp\n'), ((3679, 3701), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (3699, 3701), False, 'import inspect\n'), ((5630, 5658), 'os.getenv', 'os.getenv', (['"""PYQTGRAPH_AUDIT"""'], {}), "('PYQTGRAPH_AUDIT')\n", (5639, 5658), False, 'import os\n'), ((5788, 5816), 'os.path.dirname', 'os.path.dirname', (['stdFileName'], {}), '(stdFileName)\n', (5803, 5816), False, 'import os\n'), ((13580, 13615), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.ubyte'}), '((1, 1, 3), dtype=np.ubyte)\n', (13588, 13615), True, 'import numpy as np\n'), ((14662, 14680), 'subprocess.check_call', 'sp.check_call', (['cmd'], {}), '(cmd)\n', (14675, 14680), True, 'import subprocess as sp\n'), ((18921, 18948), 'inspect.getargspec', 'inspect.getargspec', (['err_fun'], {}), '(err_fun)\n', (18939, 18948), False, 'import inspect\n'), ((4689, 4710), 'numpy.array', 'np.array', (['image.shape'], {}), '(image.shape)\n', (4697, 4710), True, 'import numpy as np\n'), ((4744, 4768), 'numpy.array', 'np.array', (['stdImage.shape'], {}), '(stdImage.shape)\n', (4752, 4768), True, 'import numpy as np\n'), ((5172, 5184), 'numpy.round', 'np.round', (['sr'], {}), '(sr)\n', (5180, 5184), True, 'import numpy as np\n'), ((5905, 5927), 'os.path.isdir', 'os.path.isdir', (['stdPath'], {}), '(stdPath)\n', (5918, 5927), False, 'import os\n'), ((5945, 5965), 'os.makedirs', 'os.makedirs', (['stdPath'], {}), '(stdPath)\n', (5956, 5965), False, 'import os\n'), ((7769, 7781), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (7775, 7781), True, 'import numpy as np\n'), ((4904, 4916), 'numpy.round', 'np.round', (['sr'], {}), '(sr)\n', (4912, 4916), True, 'import numpy as np\n'), ((5695, 5709), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (5707, 5709), False, 'import sys\n'), ((6322, 6341), 'os.getenv', 'os.getenv', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (6331, 6341), False, 'import os\n'), ((14995, 15025), 'os.path.join', 'os.path.join', (['dataPath', '""".git"""'], {}), "(dataPath, '.git')\n", (15007, 15025), False, 'import os\n')]
|
import pandas as pd
import numpy as np
import csv
csv_file_loc = './/data//for-full-text-annotation.csv'
"""
Read in the CSV file and get the required data from it. Format the data.
"""
def get_file_description():
data = {}
all_rows = pd.read_csv(csv_file_loc)
all_rows = np.asarray(all_rows)
labels = get_labels()
labels[0] = "id"
for i in range(1, len(all_rows)):
row = all_rows[i]
name = row[labels.index('pmid')] # the name of the PMC file
if name in data:
data[name].append(gen_row_dictionary(labels, row))
else:
data[name] = [gen_row_dictionary(labels, row)]
return data
"""
Get the lables/headers for each column.
"""
def get_labels():
with open(csv_file_loc, newline = '') as csvfile:
for row in csv.reader(csvfile, delimiter = ",", quotechar='|'):
return row
"""
Take a row and put all the data into a dictionary.
@param labels represents the name of that column and what type of data it is.
@param row represents all the data in that row.
"""
def gen_row_dictionary(labels, row):
data = {}
for i in range(len(labels)):
data[labels[i]] = row[i]
return data
#get_file_description()
|
[
"numpy.asarray",
"pandas.read_csv",
"csv.reader"
] |
[((246, 271), 'pandas.read_csv', 'pd.read_csv', (['csv_file_loc'], {}), '(csv_file_loc)\n', (257, 271), True, 'import pandas as pd\n'), ((287, 307), 'numpy.asarray', 'np.asarray', (['all_rows'], {}), '(all_rows)\n', (297, 307), True, 'import numpy as np\n'), ((815, 864), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=',', quotechar='|')\n", (825, 864), False, 'import csv\n')]
|
# Minimal example showing how to reuse the exported c-code with
# different time-steps.
#
# There are two use-cases demonstrated here. One use-case is to change
# the length of the time-stamp vector (this results in a different
# N). Another use-case is to change the final time but keep the number
# of shooting nodes identical. Reusing the exported code with variing
# N can be useful especially in a c-only application where the process
# of code-generation should only be done once.
#
# This example is an extension of the 'minimal_example_ocp.py' example.
#
# Copyright 2021 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import os
import sys
sys.path.insert(0, '../common')
from acados_template import AcadosOcp, AcadosOcpSolver
from pendulum_model import export_pendulum_ode_model
import numpy as np
import scipy.linalg
from utils import plot_pendulum
print('This example demonstrates 2 use-cases for reuse of the code export.')
# create ocp object to formulate the OCP
ocp = AcadosOcp()
# set model
model = export_pendulum_ode_model()
ocp.model = model
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
# define the different options for the use-case demonstration
N0 = 20 # original number of shooting nodes
N12 = 15 # change the number of shooting nodes for use-cases 1 and 2
Tf_01 = 1.0 # original final time and for use-case 1
Tf_2 = Tf_01 * 0.7 # change final time for use-case 2 (but keep N identical)
# set dimensions
ocp.dims.N = N0
# set cost
Q = 2 * np.diag([1e3, 1e3, 1e-2, 1e-2])
R = 2 * np.diag([1e-2])
ocp.cost.W_e = Q
ocp.cost.W = scipy.linalg.block_diag(Q, R)
ocp.cost.cost_type = 'LINEAR_LS'
ocp.cost.cost_type_e = 'LINEAR_LS'
ocp.cost.Vx = np.zeros((ny, nx))
ocp.cost.Vx[:nx, :nx] = np.eye(nx)
Vu = np.zeros((ny, nu))
Vu[4, 0] = 1.0
ocp.cost.Vu = Vu
ocp.cost.Vx_e = np.eye(nx)
ocp.cost.yref = np.zeros((ny,))
ocp.cost.yref_e = np.zeros((ny_e,))
# set constraints
Fmax = 80
ocp.constraints.lbu = np.array([-Fmax])
ocp.constraints.ubu = np.array([+Fmax])
ocp.constraints.idxbu = np.array([0])
ocp.constraints.x0 = np.array([0.0, np.pi, 0.0, 0.0])
# set options
ocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM' # FULL_CONDENSING_QPOASES
# PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_QPOASES, FULL_CONDENSING_HPIPM,
# PARTIAL_CONDENSING_QPDUNES, PARTIAL_CONDENSING_OSQP
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_options.integrator_type = 'ERK'
# ocp.solver_options.print_level = 1
ocp.solver_options.nlp_solver_type = 'SQP' # SQP_RTI, SQP
# set prediction horizon
ocp.solver_options.tf = Tf_01
print(80*'-')
print('generate code and compile...')
ocp_solver = AcadosOcpSolver(ocp, json_file='acados_ocp.json')
# --------------------------------------------------------------------------------
# 0) solve the problem defined here (original from code export), analog to 'minimal_example_ocp.py'
simX0 = np.ndarray((N0 + 1, nx))
simU0 = np.ndarray((N0, nu))
print(80*'-')
print(f'solve original code with N = {N0} and Tf = {Tf_01} s:')
status = ocp_solver.solve()
if status != 0:
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
raise Exception('acados returned status {}. Exiting.'.format(status))
# get solution
for i in range(N0):
simX0[i, :] = ocp_solver.get(i, "x")
simU0[i, :] = ocp_solver.get(i, "u")
simX0[N0, :] = ocp_solver.get(N0, "x")
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
# plot but don't halt
plot_pendulum(np.linspace(0, Tf_01, N0 + 1), Fmax, simU0, simX0, latexify=False, plt_show=False, X_true_label=f'original: N={N0}, Tf={Tf_01}')
# --------------------------------------------------------------------------------
# 1) now reuse the code but set a new time-steps vector, with a new number of elements
dt1 = Tf_01 / N12
new_time_steps1 = np.tile(dt1, (N12,)) # Matlab's equivalent to repmat
time1 = np.hstack([0, np.cumsum(new_time_steps1)])
simX1 = np.ndarray((N12 + 1, nx))
simU1 = np.ndarray((N12, nu))
ocp_solver.set_new_time_steps(new_time_steps1)
print(80*'-')
print(f'solve use-case 1 with N = {N12} (instead of {N0}) and Tf = {Tf_01} s:')
status = ocp_solver.solve()
if status != 0:
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
raise Exception('acados returned status {}. Exiting.'.format(status))
# get solution
for i in range(N12):
simX1[i, :] = ocp_solver.get(i, "x")
simU1[i, :] = ocp_solver.get(i, "u")
simX1[N12, :] = ocp_solver.get(N12, "x")
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
plot_pendulum(time1, Fmax, simU1, simX1, latexify=False, plt_show=False, X_true_label=f'use-case 1: N={N12}')
# --------------------------------------------------------------------------------
# 2) reuse the code again, set a new time-steps vector, only with a different final time
dt2 = Tf_2 / N12
new_time_steps2 = np.tile(dt2, (N12,)) # Matlab's equivalent to repmat
time2 = np.hstack([0, np.cumsum(new_time_steps2)])
simX2 = np.ndarray((N12 + 1, nx))
simU2 = np.ndarray((N12, nu))
ocp_solver.set_new_time_steps(new_time_steps2)
print(80*'-')
print(f'solve use-case 2 with N = {N12} and Tf = {Tf_2} s (instead of {Tf_01} s):')
status = ocp_solver.solve()
if status != 0:
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
raise Exception('acados returned status {}. Exiting.'.format(status))
# get solution
for i in range(N12):
simX2[i, :] = ocp_solver.get(i, "x")
simU2[i, :] = ocp_solver.get(i, "u")
simX2[N12, :] = ocp_solver.get(N12, "x")
ocp_solver.print_statistics() # encapsulates: stat = ocp_solver.get_stats("statistics")
plot_pendulum(time2, Fmax, simU2, simX2, latexify=False, plt_show=True, X_true_label=f'use-case 2: Tf={Tf_2} s')
|
[
"numpy.tile",
"numpy.eye",
"sys.path.insert",
"acados_template.AcadosOcpSolver",
"acados_template.AcadosOcp",
"utils.plot_pendulum",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.ndarray",
"pendulum_model.export_pendulum_ode_model",
"numpy.cumsum"
] |
[((2088, 2119), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../common"""'], {}), "(0, '../common')\n", (2103, 2119), False, 'import sys\n'), ((2426, 2437), 'acados_template.AcadosOcp', 'AcadosOcp', ([], {}), '()\n', (2435, 2437), False, 'from acados_template import AcadosOcp, AcadosOcpSolver\n'), ((2459, 2486), 'pendulum_model.export_pendulum_ode_model', 'export_pendulum_ode_model', ([], {}), '()\n', (2484, 2486), False, 'from pendulum_model import export_pendulum_ode_model\n'), ((3140, 3158), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (3148, 3158), True, 'import numpy as np\n'), ((3183, 3193), 'numpy.eye', 'np.eye', (['nx'], {}), '(nx)\n', (3189, 3193), True, 'import numpy as np\n'), ((3200, 3218), 'numpy.zeros', 'np.zeros', (['(ny, nu)'], {}), '((ny, nu))\n', (3208, 3218), True, 'import numpy as np\n'), ((3268, 3278), 'numpy.eye', 'np.eye', (['nx'], {}), '(nx)\n', (3274, 3278), True, 'import numpy as np\n'), ((3296, 3311), 'numpy.zeros', 'np.zeros', (['(ny,)'], {}), '((ny,))\n', (3304, 3311), True, 'import numpy as np\n'), ((3330, 3347), 'numpy.zeros', 'np.zeros', (['(ny_e,)'], {}), '((ny_e,))\n', (3338, 3347), True, 'import numpy as np\n'), ((3399, 3416), 'numpy.array', 'np.array', (['[-Fmax]'], {}), '([-Fmax])\n', (3407, 3416), True, 'import numpy as np\n'), ((3439, 3456), 'numpy.array', 'np.array', (['[+Fmax]'], {}), '([+Fmax])\n', (3447, 3456), True, 'import numpy as np\n'), ((3481, 3494), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (3489, 3494), True, 'import numpy as np\n'), ((3517, 3549), 'numpy.array', 'np.array', (['[0.0, np.pi, 0.0, 0.0]'], {}), '([0.0, np.pi, 0.0, 0.0])\n', (3525, 3549), True, 'import numpy as np\n'), ((4092, 4141), 'acados_template.AcadosOcpSolver', 'AcadosOcpSolver', (['ocp'], {'json_file': '"""acados_ocp.json"""'}), "(ocp, json_file='acados_ocp.json')\n", (4107, 4141), False, 'from acados_template import AcadosOcp, AcadosOcpSolver\n'), ((4335, 4359), 'numpy.ndarray', 'np.ndarray', (['(N0 + 1, nx)'], {}), '((N0 + 1, nx))\n', (4345, 4359), True, 'import numpy as np\n'), ((4368, 4388), 'numpy.ndarray', 'np.ndarray', (['(N0, nu)'], {}), '((N0, nu))\n', (4378, 4388), True, 'import numpy as np\n'), ((5301, 5321), 'numpy.tile', 'np.tile', (['dt1', '(N12,)'], {}), '(dt1, (N12,))\n', (5308, 5321), True, 'import numpy as np\n'), ((5415, 5440), 'numpy.ndarray', 'np.ndarray', (['(N12 + 1, nx)'], {}), '((N12 + 1, nx))\n', (5425, 5440), True, 'import numpy as np\n'), ((5449, 5470), 'numpy.ndarray', 'np.ndarray', (['(N12, nu)'], {}), '((N12, nu))\n', (5459, 5470), True, 'import numpy as np\n'), ((6076, 6189), 'utils.plot_pendulum', 'plot_pendulum', (['time1', 'Fmax', 'simU1', 'simX1'], {'latexify': '(False)', 'plt_show': '(False)', 'X_true_label': 'f"""use-case 1: N={N12}"""'}), "(time1, Fmax, simU1, simX1, latexify=False, plt_show=False,\n X_true_label=f'use-case 1: N={N12}')\n", (6089, 6189), False, 'from utils import plot_pendulum\n'), ((6395, 6415), 'numpy.tile', 'np.tile', (['dt2', '(N12,)'], {}), '(dt2, (N12,))\n', (6402, 6415), True, 'import numpy as np\n'), ((6509, 6534), 'numpy.ndarray', 'np.ndarray', (['(N12 + 1, nx)'], {}), '((N12 + 1, nx))\n', (6519, 6534), True, 'import numpy as np\n'), ((6543, 6564), 'numpy.ndarray', 'np.ndarray', (['(N12, nu)'], {}), '((N12, nu))\n', (6553, 6564), True, 'import numpy as np\n'), ((7174, 7290), 'utils.plot_pendulum', 'plot_pendulum', (['time2', 'Fmax', 'simU2', 'simX2'], {'latexify': '(False)', 'plt_show': '(True)', 'X_true_label': 'f"""use-case 2: Tf={Tf_2} s"""'}), "(time2, Fmax, simU2, simX2, latexify=False, plt_show=True,\n X_true_label=f'use-case 2: Tf={Tf_2} s')\n", (7187, 7290), False, 'from utils import plot_pendulum\n'), ((2939, 2976), 'numpy.diag', 'np.diag', (['[1000.0, 1000.0, 0.01, 0.01]'], {}), '([1000.0, 1000.0, 0.01, 0.01])\n', (2946, 2976), True, 'import numpy as np\n'), ((2979, 2994), 'numpy.diag', 'np.diag', (['[0.01]'], {}), '([0.01])\n', (2986, 2994), True, 'import numpy as np\n'), ((4964, 4993), 'numpy.linspace', 'np.linspace', (['(0)', 'Tf_01', '(N0 + 1)'], {}), '(0, Tf_01, N0 + 1)\n', (4975, 4993), True, 'import numpy as np\n'), ((5377, 5403), 'numpy.cumsum', 'np.cumsum', (['new_time_steps1'], {}), '(new_time_steps1)\n', (5386, 5403), True, 'import numpy as np\n'), ((6471, 6497), 'numpy.cumsum', 'np.cumsum', (['new_time_steps2'], {}), '(new_time_steps2)\n', (6480, 6497), True, 'import numpy as np\n')]
|
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
# Main, three machines (image - KMC, text - Logistic Regression, features - RF) and all white pdfs files
# @Authors: <NAME> and <NAME>
# @Version: 1.0
# @Date 06.2019
# -------------------------------------------------------------------------------------------------------------------------------------------------------------
# libraries
from classes.dataPDF import dataPDF
from classes.createDATA import createDATA
from classes.readPDF import readPDF
import os
import sys
import csv
import argparse
import tempfile
import numpy as np
from numpy import random
from array import *
# machine learning libraries
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn import metrics
from sklearn.pipeline import Pipeline, make_pipeline
# importing K-Means
from sklearn.cluster import KMeans
# import RF
from sklearn.ensemble import RandomForestClassifier
# import LR
from sklearn.linear_model import LogisticRegression
# import AdaBoostClassifier
from sklearn.ensemble import AdaBoostClassifier
# import AdaBoostRegressor
from sklearn.ensemble import AdaBoostRegressor
# import XGBClassifier
from xgboost import XGBClassifier
# import XGBRegressor
from xgboost.sklearn import XGBRegressor
# import RFClassifier
from sklearn.ensemble import RandomForestClassifier
# import RFRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
if __name__ == "__main__":
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
# arguments for k-means-clustering
ap.add_argument("-c", "--clusters", type = int, default = 16,
help="the number of clusters to form as well as the number of centroids to generate")
ap.add_argument("-j", "--jobs", type = int, default = -1,
help="the number of jobs to use for the computation. ")
args = vars(ap.parse_args())
# define the name of the directory to be created
path_IMAGES = "IMAGES"
path_TEXTS = "TEXTS"
# create folders for images and texts
try:
os.mkdir(path_IMAGES)
os.mkdir(path_TEXTS)
except OSError:
print("[!] Creation of the directories {} or {} failed, maybe the folders are exist".format(path_IMAGES, path_TEXTS))
else:
print(
"[*] Successfully created the directories {} and {} ".format(path_IMAGES, path_TEXTS))
folder_path = os.getcwd()
dataset_path = os.path.join(folder_path, args["dataset"])
# check if a folder of data is exist
if (not os.path.exists(dataset_path)):
print("[!] The {} folder is not exist!\n GOODBYE".format(dataset_path))
sys.exit()
# create csv file
with open("pdfFILES.csv", 'w') as csvFile:
fields = ['File', 'Text']
writer = csv.DictWriter(csvFile, fieldnames = fields)
writer.writeheader()
# start create data
print("+++++++++++++++++++++++++++++++++++ START CREATE DATA +++++++++++++++++++++++++++++++++++")
obj_data = createDATA(folder_path, args["dataset"])
# convert first page of pdf file to image
result = obj_data.convert(dataset_path)
if (result):
print("[*] Succces convert pdf files")
else:
print("[!] Whoops. something wrong dude. enable err var to track it")
sys.exit()
# extract JavaScript from pdf file
result = obj_data.extract(dataset_path)
if (result):
print("[*] Succces extract JavaScript from pdf files")
else:
print("[!] Whoops. something wrong dude. enable err var to track it")
sys.exit()
print("\n+++++++++++++++++++++++++++++++++++++++++ FINISH ++++++++++++++++++++++++++++++++++++++++\n")
# start create vectors
print("++++++++++++++++++++++++++++++++++ START CREATE VECTORS +++++++++++++++++++++++++++++++++")
# dir of folder and filter for pdf files
files = [f for f in os.listdir(dataset_path) if os.path.isfile(os.path.join(dataset_path, f))]
files = list(filter(lambda f: f.endswith(('.pdf', '.PDF')), files))
# variables for print information
cnt_files = len(files)
obj_pdfs = []
labels = []
obj_read = readPDF(obj_data.getDict())
set_white_files = []
# loop over the input pdfs
for (i, pdfFILE) in enumerate(files):
label = -1
if ("mal" == pdfFILE.split(".")[0]):
label = 1
else:
label = 0
set_white_files.append(pdfFILE)
labels.append(label)
# create pdf object
obj_pdf = dataPDF(pdfFILE, folder_path+'/', args["dataset"])
obj_pdf.calculate_histogram_blur()
obj_pdf.calculate_dsurlsjsentropy()
obj_pdf.save_text(obj_read.extractTEXT(obj_pdf.getFilename(), obj_pdf.getImage()))
obj_pdfs.append(obj_pdf)
# show an update every 50 pdfs
if (i > 0 and i % 50 == 0):
print("[INFO] processed {}/{}".format(i, cnt_files))
print("[INFO] processed {}/{}".format(cnt_files, cnt_files))
print("\n+++++++++++++++++++++++++++++++++++++++++ FINISH ++++++++++++++++++++++++++++++++++++++++\n")
# start machine learning
print("+++++++++++++++++++++++++++++++++ START MACHINE LEARNING ++++++++++++++++++++++++++++++++")
labels = np.array(labels)
# partition the data into training and testing splits, using 70%
# of the data for training and the remaining 30% for testing
(trainF, testF, trainLabels, testLabels) = train_test_split(obj_pdfs, labels, test_size = 0.30, random_state = 42)
trainFeat = []
testFeat = []
for pdf in trainF:
trainFeat.append(pdf.getImgHistogram())
for pdf in testF:
testFeat.append(pdf.getImgHistogram())
trainFeat = np.array(trainFeat)
testFeat = np.array(testFeat)
# instantiating kmeans
km = KMeans(algorithm = 'auto', copy_x = True, init = 'k-means++', max_iter = 300, n_clusters = args["clusters"], n_init = 10, n_jobs = args["jobs"])
# training km model
km.fit(trainFeat)
# testing km
predictions1_m = km.predict(testFeat)
# creating vector for Random Forest on features
trainFeat = []
testFeat = []
for pdf in trainF:
trainFeat.append(pdf.getFeatVec())
for pdf in testF:
testFeat.append(pdf.getFeatVec())
trainFeat = np.array(trainFeat)
testFeat = np.array(testFeat)
# instantiating Random Forest
ranfor = Pipeline([
('clf', RandomForestClassifier(n_estimators = 30, random_state = 0)),
])
ranfor.fit(trainFeat, trainLabels)
predictions3 = ranfor.predict(testFeat)
# creating vector for Logistic Regression on text
trainFeat = []
testFeat = []
for pdf in trainF:
trainFeat.append(pdf.getText())
for pdf in testF:
testFeat.append(pdf.getText())
# instantiating Logistic Regression Machine
logreg = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=1000, n_jobs=1, C=1e5)),
])
logreg.fit(trainFeat, trainLabels)
predictions2 = logreg.predict(testFeat)
print("\n+++++++++++++++++++++++++++++++++++++++++ FINISH ++++++++++++++++++++++++++++++++++++++++\n")
# start boost
print("+++++++++++++++++++++++++++++++++++++++ START BOOST +++++++++++++++++++++++++++++++++++++")
# creating vectors
trainFeat = []
for p1, p2, p3 in zip(predictions1_m, predictions2, predictions3):
p_all = [p1, p2, p3]
trainFeat.append(p_all)
trainFeat = np.array(trainFeat)
# partition the data into training and testing splits, using 67% (20% from all set)
# of the data for training and the remaining 33% (10% from all set) for testing
(trainFeat, testFeat, trainLabels, testLabels) = train_test_split(trainFeat, testLabels, test_size = 0.33, random_state = 42)
# instantiating AdaBoostClassifier
abc = AdaBoostClassifier(n_estimators = 100, random_state = 0)
abc.fit(trainFeat, trainLabels)
print("Feature importances for AdaBoostClassifier: ")
print(abc.feature_importances_)
# make predictions for test data
predictions = abc.predict(testFeat)
accuracy = accuracy_score(testLabels, predictions)
print("Accuracy of AdaBoostClassifier: %.2f%%" % (accuracy * 100.0))
# classification_report - precision, recall, f1 table for adaboost classifier
print(classification_report(testLabels, predictions, target_names=["benign", "malicious"]))
cm = confusion_matrix(testLabels, predictions)
# the count of true negatives is A00, false negatives is A10, true positives is A11 and false positives is A01
print('confusion matrix:\n %s' % cm)
# instantiating AdaBoostRegressor (similar to logistic regression)
abr = AdaBoostRegressor(random_state = 0, n_estimators = 100)
abr.fit(trainFeat, trainLabels)
print("Feature importances for AdaBoostRegressor: ")
print(abr.feature_importances_)
# make predictions for test data
predictions = abr.predict(testFeat)
accuracy = accuracy_score(testLabels, predictions.round())
print("Accuracy of AdaBoostRegressor: %.2f%%" % (accuracy * 100.0))
# classification_report - precision, recall, f1 table for adaboost classifier
print(classification_report(testLabels, predictions.round(), target_names=["benign", "malicious"]))
cm = confusion_matrix(testLabels, predictions.round())
# the count of true negatives is A00, false negatives is A10, true positives is A11 and false positives is A01
print('confusion matrix:\n %s' % cm)
# instantiating XGBClassifier
xgbc = XGBClassifier()
xgbc.fit(trainFeat, trainLabels)
print("Feature importances for XGBClassifier: ")
print(xgbc.feature_importances_)
# make predictions for test data
predictions = xgbc.predict(testFeat)
accuracy = accuracy_score(testLabels, predictions)
print("Accuracy of XGBClassifier: %.2f%%" % (accuracy * 100.0))
# classification_report - precision, recall, f1 table for adaboost classifier
print(classification_report(testLabels, predictions, target_names=["benign", "malicious"]))
cm = confusion_matrix(testLabels, predictions)
# the count of true negatives is A00, false negatives is A10, true positives is A11 and false positives is A01
print('confusion matrix:\n %s' % cm)
# instantiating XGBRegressor (similar to linear regression)
xgbr = XGBRegressor(n_estimators = 100, max_depth = 3)
xgbr.fit(trainFeat, trainLabels)
print("Feature importances for XGBRegressor: ")
print(xgbr.feature_importances_)
# make predictions for test data
predictions = xgbr.predict(testFeat)
accuracy = accuracy_score(testLabels, predictions.round())
print("Accuracy of XGBRegressor: %.2f%%" % (accuracy * 100.0))
# classification_report - precision, recall, f1 table for adaboost classifier
print(classification_report(testLabels, predictions.round(), target_names=["benign", "malicious"]))
cm = confusion_matrix(testLabels, predictions.round())
# the count of true negatives is A00, false negatives is A10, true positives is A11 and false positives is A01
print('confusion matrix:\n %s' % cm)
# instantiating Random Forest Classifier
rfclf = RandomForestClassifier(n_estimators = 250)
rfclf.fit(trainFeat, trainLabels)
print("Feature importances for Random Forest: ")
print(rfclf.feature_importances_)
# predictions for test data
cla_pred = rfclf.predict(testFeat)
rf_acc = accuracy_score(testLabels, cla_pred)
print("Random Forest Accuracy: %.2f%%" % (rf_acc * 100.0))
# classification_report - precision, recall, f1 table for adaboost classifier
print(classification_report(testLabels, cla_pred, target_names=["benign", "malicious"]))
# confusion_matrix
cm_rf_cla = confusion_matrix(testLabels, cla_pred)
# the count of true negatives is A00, false negatives is A10, true positives is A11 and false positives is A01
print('confusion matrix:\n %s' % cm_rf_cla)
# instantiating Random Forest Regressor
rfreg = RandomForestRegressor(n_estimators = 250)
rfreg.fit(trainFeat, trainLabels)
print("Feature importances for Random Forest: ")
print(rfreg.feature_importances_)
# predictions for test data
reg_pred = rfreg.predict(testFeat)
rfreg_acc = accuracy_score(testLabels, reg_pred.round())
print("Random Forest Accuracy: %.2f%%" % (rfreg_acc * 100.0))
# classification_report - precision, recall, f1 table for adaboost classifier
print(classification_report(testLabels, reg_pred.round(), target_names=["benign", "malicious"]))
# confusion_matrix
cm_rf_reg = confusion_matrix(testLabels, reg_pred.round())
# the count of true negatives is A00, false negatives is A10, true positives is A11 and false positives is A01
print('confusion matrix:\n %s' % cm_rf_reg)
print("\n+++++++++++++++++++++++++++++++++++++++++ FINISH ++++++++++++++++++++++++++++++++++++++++\n")
# start check all white pdf files
print("+++++++++++++++++++++++++++++++++++++++ START CHECK +++++++++++++++++++++++++++++++++++++")
white_path = 'WHITE'
dataset_path = os.path.join(folder_path, white_path)
# extract JavaScript from white pdf file
result = obj_data.extract(dataset_path)
if (result):
print("[*] Succces extract JavaScript from white pdf files")
else:
print("[!] Whoops. something wrong dude. enable err var to track it")
sys.exit()
# dir of folder and filter for pdf files
files = [f for f in os.listdir(dataset_path) if os.path.isfile(
os.path.join(dataset_path, f))]
files = list(filter(lambda f: f.endswith(('.pdf', '.PDF')), files))
# AdoBC AdoBR XGBC XGBR RFC RFR
answers = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
# loop over the input pdfs
for (i, pdfFILE) in enumerate(files):
if (pdfFILE in set_white_files):
continue
obj_data.convertOnePDF(dataset_path + '/' + pdfFILE)
obj_read = readPDF(obj_data.getDict())
# create pdf object
obj_pdf = dataPDF(pdfFILE, folder_path+'/', white_path)
# Image
vector1 = []
obj_pdf.calculate_histogram_blur()
vector1.append(obj_pdf.getImgHistogram())
vector1 = np.array(vector1)
# 28 features
vector2 = []
obj_pdf.calculate_dsurlsjsentropy()
vector2.append(obj_pdf.getFeatVec())
vector2 = np.array(vector2)
# Text
obj_pdf.save_text(obj_read.extractTEXT(obj_pdf.getFilename(), obj_pdf.getImage()))
vector_text = []
vector_text.append(obj_pdf.getText())
v1 = km.predict(vector1)
v2 = logreg.predict(vector_text)
v3 = ranfor.predict(vector2)
v_all = [[v1[0], v2[0], v3[0]]]
answer = abc.predict(v_all)
if (answer == 0):
answers[0][0] = answers[0][0] + 1
else:
answers[0][1] = answers[0][1] + 1
print("AdoBoostClassifier ",pdfFILE)
answer = abr.predict(v_all)
if (answer.round() == 0):
answers[1][0] = answers[1][0] + 1
else:
answers[1][1] = answers[1][1] + 1
print("AdoBoostRegression ",pdfFILE)
answer = xgbc.predict(v_all)
if (answer == 0):
answers[2][0] = answers[2][0] + 1
else:
answers[2][1] = answers[2][1] + 1
print("XGBClassifier ",pdfFILE)
answer = xgbr.predict(v_all)
if (answer.round() == 0):
answers[3][0] = answers[3][0] + 1
else:
answers[3][1] = answers[3][1] + 1
print("XGBRegression ",pdfFILE)
answer = rfclf.predict(v_all)
if (answer == 0):
answers[4][0] = answers[4][0] + 1
else:
answers[4][1] = answers[4][1] + 1
print("RFClassifier ",pdfFILE)
answer = rfreg.predict(v_all)
if (answer.round() == 0):
answers[5][0] = answers[5][0] + 1
else:
answers[5][1] = answers[5][1] + 1
print("RFRegression ",pdfFILE)
try:
obj_pdf.removeIMAGE()
except:
continue
print(answers)
print("\n+++++++++++++++++++++++++++++++++++++++++ FINISH ++++++++++++++++++++++++++++++++++++++++\n")
|
[
"csv.DictWriter",
"sklearn.feature_extraction.text.TfidfTransformer",
"sklearn.metrics.classification_report",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.ensemble.AdaBoostRegressor",
"numpy.array",
"sys.exit",
"xgboost.sklearn.XGBRegressor",
"os.path.exists",
"sklearn.ensemble.RandomForestRegressor",
"os.listdir",
"argparse.ArgumentParser",
"sklearn.feature_extraction.text.CountVectorizer",
"classes.dataPDF.dataPDF",
"os.mkdir",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.accuracy_score",
"xgboost.XGBClassifier",
"sklearn.cluster.KMeans",
"classes.createDATA.createDATA",
"os.path.join",
"os.getcwd",
"sklearn.linear_model.LogisticRegression"
] |
[((1825, 1850), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1848, 1850), False, 'import argparse\n'), ((2805, 2816), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2814, 2816), False, 'import os\n'), ((2836, 2878), 'os.path.join', 'os.path.join', (['folder_path', "args['dataset']"], {}), "(folder_path, args['dataset'])\n", (2848, 2878), False, 'import os\n'), ((3416, 3456), 'classes.createDATA.createDATA', 'createDATA', (['folder_path', "args['dataset']"], {}), "(folder_path, args['dataset'])\n", (3426, 3456), False, 'from classes.createDATA import createDATA\n'), ((5648, 5664), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5656, 5664), True, 'import numpy as np\n'), ((5846, 5912), 'sklearn.model_selection.train_test_split', 'train_test_split', (['obj_pdfs', 'labels'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(obj_pdfs, labels, test_size=0.3, random_state=42)\n', (5862, 5912), False, 'from sklearn.model_selection import train_test_split\n'), ((6111, 6130), 'numpy.array', 'np.array', (['trainFeat'], {}), '(trainFeat)\n', (6119, 6130), True, 'import numpy as np\n'), ((6146, 6164), 'numpy.array', 'np.array', (['testFeat'], {}), '(testFeat)\n', (6154, 6164), True, 'import numpy as np\n'), ((6201, 6335), 'sklearn.cluster.KMeans', 'KMeans', ([], {'algorithm': '"""auto"""', 'copy_x': '(True)', 'init': '"""k-means++"""', 'max_iter': '(300)', 'n_clusters': "args['clusters']", 'n_init': '(10)', 'n_jobs': "args['jobs']"}), "(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,\n n_clusters=args['clusters'], n_init=10, n_jobs=args['jobs'])\n", (6207, 6335), False, 'from sklearn.cluster import KMeans\n'), ((6688, 6707), 'numpy.array', 'np.array', (['trainFeat'], {}), '(trainFeat)\n', (6696, 6707), True, 'import numpy as np\n'), ((6723, 6741), 'numpy.array', 'np.array', (['testFeat'], {}), '(testFeat)\n', (6731, 6741), True, 'import numpy as np\n'), ((7969, 7988), 'numpy.array', 'np.array', (['trainFeat'], {}), '(trainFeat)\n', (7977, 7988), True, 'import numpy as np\n'), ((8214, 8286), 'sklearn.model_selection.train_test_split', 'train_test_split', (['trainFeat', 'testLabels'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(trainFeat, testLabels, test_size=0.33, random_state=42)\n', (8230, 8286), False, 'from sklearn.model_selection import train_test_split\n'), ((8341, 8393), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'n_estimators': '(100)', 'random_state': '(0)'}), '(n_estimators=100, random_state=0)\n', (8359, 8393), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((8620, 8659), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testLabels', 'predictions'], {}), '(testLabels, predictions)\n', (8634, 8659), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((8920, 8961), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['testLabels', 'predictions'], {}), '(testLabels, predictions)\n', (8936, 8961), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((9200, 9251), 'sklearn.ensemble.AdaBoostRegressor', 'AdaBoostRegressor', ([], {'random_state': '(0)', 'n_estimators': '(100)'}), '(random_state=0, n_estimators=100)\n', (9217, 9251), False, 'from sklearn.ensemble import AdaBoostRegressor\n'), ((10044, 10059), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {}), '()\n', (10057, 10059), False, 'from xgboost import XGBClassifier\n'), ((10280, 10319), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testLabels', 'predictions'], {}), '(testLabels, predictions)\n', (10294, 10319), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((10575, 10616), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['testLabels', 'predictions'], {}), '(testLabels, predictions)\n', (10591, 10616), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((10849, 10892), 'xgboost.sklearn.XGBRegressor', 'XGBRegressor', ([], {'n_estimators': '(100)', 'max_depth': '(3)'}), '(n_estimators=100, max_depth=3)\n', (10861, 10892), False, 'from xgboost.sklearn import XGBRegressor\n'), ((11690, 11730), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(250)'}), '(n_estimators=250)\n', (11712, 11730), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((11946, 11982), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testLabels', 'cla_pred'], {}), '(testLabels, cla_pred)\n', (11960, 11982), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((12260, 12298), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['testLabels', 'cla_pred'], {}), '(testLabels, cla_pred)\n', (12276, 12298), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((12523, 12562), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(250)'}), '(n_estimators=250)\n', (12544, 12562), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((13620, 13657), 'os.path.join', 'os.path.join', (['folder_path', 'white_path'], {}), '(folder_path, white_path)\n', (13632, 13657), False, 'import os\n'), ((2466, 2487), 'os.mkdir', 'os.mkdir', (['path_IMAGES'], {}), '(path_IMAGES)\n', (2474, 2487), False, 'import os\n'), ((2496, 2516), 'os.mkdir', 'os.mkdir', (['path_TEXTS'], {}), '(path_TEXTS)\n', (2504, 2516), False, 'import os\n'), ((2933, 2961), 'os.path.exists', 'os.path.exists', (['dataset_path'], {}), '(dataset_path)\n', (2947, 2961), False, 'import os\n'), ((3055, 3065), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3063, 3065), False, 'import sys\n'), ((3196, 3238), 'csv.DictWriter', 'csv.DictWriter', (['csvFile'], {'fieldnames': 'fields'}), '(csvFile, fieldnames=fields)\n', (3210, 3238), False, 'import csv\n'), ((3708, 3718), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3716, 3718), False, 'import sys\n'), ((3979, 3989), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3987, 3989), False, 'import sys\n'), ((4927, 4979), 'classes.dataPDF.dataPDF', 'dataPDF', (['pdfFILE', "(folder_path + '/')", "args['dataset']"], {}), "(pdfFILE, folder_path + '/', args['dataset'])\n", (4934, 4979), False, 'from classes.dataPDF import dataPDF\n'), ((8825, 8913), 'sklearn.metrics.classification_report', 'classification_report', (['testLabels', 'predictions'], {'target_names': "['benign', 'malicious']"}), "(testLabels, predictions, target_names=['benign',\n 'malicious'])\n", (8846, 8913), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((10480, 10568), 'sklearn.metrics.classification_report', 'classification_report', (['testLabels', 'predictions'], {'target_names': "['benign', 'malicious']"}), "(testLabels, predictions, target_names=['benign',\n 'malicious'])\n", (10501, 10568), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((12138, 12223), 'sklearn.metrics.classification_report', 'classification_report', (['testLabels', 'cla_pred'], {'target_names': "['benign', 'malicious']"}), "(testLabels, cla_pred, target_names=['benign',\n 'malicious'])\n", (12159, 12223), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((13929, 13939), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13937, 13939), False, 'import sys\n'), ((14579, 14626), 'classes.dataPDF.dataPDF', 'dataPDF', (['pdfFILE', "(folder_path + '/')", 'white_path'], {}), "(pdfFILE, folder_path + '/', white_path)\n", (14586, 14626), False, 'from classes.dataPDF import dataPDF\n'), ((14782, 14799), 'numpy.array', 'np.array', (['vector1'], {}), '(vector1)\n', (14790, 14799), True, 'import numpy as np\n'), ((14959, 14976), 'numpy.array', 'np.array', (['vector2'], {}), '(vector2)\n', (14967, 14976), True, 'import numpy as np\n'), ((4301, 4325), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (4311, 4325), False, 'import os\n'), ((14010, 14034), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (14020, 14034), False, 'import os\n'), ((4344, 4373), 'os.path.join', 'os.path.join', (['dataset_path', 'f'], {}), '(dataset_path, f)\n', (4356, 4373), False, 'import os\n'), ((6821, 6876), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(30)', 'random_state': '(0)'}), '(n_estimators=30, random_state=0)\n', (6843, 6876), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7269, 7286), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (7284, 7286), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\n'), ((7315, 7333), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (7331, 7333), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\n'), ((7360, 7455), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""lbfgs"""', 'multi_class': '"""auto"""', 'max_iter': '(1000)', 'n_jobs': '(1)', 'C': '(100000.0)'}), "(solver='lbfgs', multi_class='auto', max_iter=1000,\n n_jobs=1, C=100000.0)\n", (7378, 7455), False, 'from sklearn.linear_model import LogisticRegression\n'), ((14062, 14091), 'os.path.join', 'os.path.join', (['dataset_path', 'f'], {}), '(dataset_path, f)\n', (14074, 14091), False, 'import os\n')]
|
from __future__ import absolute_import
import numpy as np
import chainer
import tqdm
import glob
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from ..data import load_image # NOQA
class BaseDataset(chainer.dataset.DatasetMixin, metaclass=ABCMeta):
""" Base class of dataset
Args:
root (str): Directory to the dataset
patients (list, optional): List of patient names. Defaults to [].
classes (None or list, optional): List of class names. Defaults to None.
dtypes (dict, optional): An dictionary of data types. Defaults to {}.
filenames (dict, optional): An dictionary of wildcard to filenames.
Each filename can be a format string using '{root}' and '{patient}'. Defaults to {}.
normalizer (callable, optional): An callable function for normalization. Defaults to None.
augmentor (callable, optional): An callable function for data augmentation. Defaults to None.
"""
def __init__(self,
root,
patients=[],
classes=None,
dtypes={},
filenames={},
normalizer=None,
augmentor=None):
super(BaseDataset, self).__init__()
assert isinstance(patients, (list, np.ndarray)), \
'please specify the patient names..'
if classes is not None:
if isinstance(classes, list):
classes = np.asarray(classes)
assert isinstance(classes, np.ndarray), \
'class names should be list or np.ndarray..'
assert isinstance(dtypes, dict), \
'please specify the dtype per each file..'
assert isinstance(filenames, dict), \
'please specify the filename per each file..'
if normalizer is not None:
assert callable(normalizer), 'normalizer should be callable..'
if augmentor is not None:
assert callable(augmentor), 'augmentor should be callable..'
# initialize
files = OrderedDict()
file_sizes = []
for key in filenames.keys():
files[key] = []
for p in tqdm.tqdm(patients, desc='Collecting %s files' % key, ncols=80):
files[key].extend(
glob.glob(filenames[key].format(root=root, patient=p)))
if len(files[key]) == 0:
warnings.warn('%s files are not found.. ' % key)
file_sizes.append(len(files[key]))
assert all(file_sizes[0] == s for s in file_sizes), \
'the number of files must be the same..'
self._root = root
self._patients = patients
self._classes = classes
self._dtypes = dtypes
self._filenames = filenames
self._files = files
self._normalizer = normalizer
self._augmentor = augmentor
def __len__(self):
key = list(self._files.keys())[0]
return len(self._files[key])
@property
def classes(self):
return self._classes
@property
def n_classes(self):
if self.classes is None:
return None
return len(self.classes)
@property
def files(self):
return self._files
@property
def dtypes(self):
return self._dtypes
@property
def normalizer(self):
return self._normalizer
@property
def augmentor(self):
return self._augmentor
@augmentor.deleter
def augmentor(self):
self._augmentor = None
@classmethod
@abstractmethod
def normalize(self, **kwargs):
raise NotImplementedError()
@classmethod
@abstractmethod
def denormalize(self, **kwargs):
raise NotImplementedError()
@classmethod
@abstractmethod
def get_example(self, i):
raise NotImplementedError()
@classmethod
@abstractmethod
def __copy__(self):
"""Copy the class instance"""
raise NotImplementedError()
from .volume import VolumeDataset # NOQA
from .image import ImageDataset # NOQA
def train_valid_split(train, valid_ratio):
if isinstance(train, BaseDataset):
valid = train.__copy__()
n_samples = len(train)
valid_indices = np.random.choice(np.arange(n_samples),
int(valid_ratio * n_samples),
replace=False)
files = train.files
for key in files.keys():
valid._files[key] = np.asarray(files[key])[valid_indices]
train._files[key] = np.delete(
np.asarray(files[key]), valid_indices)
elif isinstance(train, (list, np.ndarray)):
valid = np.asarray(train)
n_samples = len(train)
valid_indices = np.random.choice(np.arange(n_samples),
int(valid_ratio * n_samples),
replace=False)
valid = valid[valid_indices]
train = np.delete(train, valid_indices)
assert len(train) + len(valid) == n_samples
return train, valid
def load_crossval_list(xls_file, index):
import pandas as pd
from distutils.version import LooseVersion
if LooseVersion(pd.__version__) >= LooseVersion('0.21.0'):
df = pd.read_excel(xls_file, sheet_name=index)
else:
df = pd.read_excel(xls_file, sheetname=index)
train = df['train'].dropna().tolist()
valid = df['valid'].dropna().tolist()
test = df['test'].dropna().tolist()
return {'train': train, 'valid': valid, 'test': test}
|
[
"collections.OrderedDict",
"numpy.delete",
"tqdm.tqdm",
"numpy.asarray",
"pandas.read_excel",
"warnings.warn",
"distutils.version.LooseVersion",
"numpy.arange"
] |
[((2079, 2092), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2090, 2092), False, 'from collections import OrderedDict\n'), ((5263, 5291), 'distutils.version.LooseVersion', 'LooseVersion', (['pd.__version__'], {}), '(pd.__version__)\n', (5275, 5291), False, 'from distutils.version import LooseVersion\n'), ((5295, 5317), 'distutils.version.LooseVersion', 'LooseVersion', (['"""0.21.0"""'], {}), "('0.21.0')\n", (5307, 5317), False, 'from distutils.version import LooseVersion\n'), ((5332, 5373), 'pandas.read_excel', 'pd.read_excel', (['xls_file'], {'sheet_name': 'index'}), '(xls_file, sheet_name=index)\n', (5345, 5373), True, 'import pandas as pd\n'), ((5397, 5437), 'pandas.read_excel', 'pd.read_excel', (['xls_file'], {'sheetname': 'index'}), '(xls_file, sheetname=index)\n', (5410, 5437), True, 'import pandas as pd\n'), ((2205, 2268), 'tqdm.tqdm', 'tqdm.tqdm', (['patients'], {'desc': "('Collecting %s files' % key)", 'ncols': '(80)'}), "(patients, desc='Collecting %s files' % key, ncols=80)\n", (2214, 2268), False, 'import tqdm\n'), ((4295, 4315), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (4304, 4315), True, 'import numpy as np\n'), ((4740, 4757), 'numpy.asarray', 'np.asarray', (['train'], {}), '(train)\n', (4750, 4757), True, 'import numpy as np\n'), ((5035, 5066), 'numpy.delete', 'np.delete', (['train', 'valid_indices'], {}), '(train, valid_indices)\n', (5044, 5066), True, 'import numpy as np\n'), ((1487, 1506), 'numpy.asarray', 'np.asarray', (['classes'], {}), '(classes)\n', (1497, 1506), True, 'import numpy as np\n'), ((2435, 2483), 'warnings.warn', 'warnings.warn', (["('%s files are not found.. ' % key)"], {}), "('%s files are not found.. ' % key)\n", (2448, 2483), False, 'import warnings\n'), ((4538, 4560), 'numpy.asarray', 'np.asarray', (['files[key]'], {}), '(files[key])\n', (4548, 4560), True, 'import numpy as np\n'), ((4635, 4657), 'numpy.asarray', 'np.asarray', (['files[key]'], {}), '(files[key])\n', (4645, 4657), True, 'import numpy as np\n'), ((4832, 4852), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (4841, 4852), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 14:46:59 2017
Some personal numpy array filtering and finding intersections with indices
@author: <NAME>
"""
import numpy as np
import glob
import os
import shutil
def idx_filter(idx, *array_list):
new_array_list = []
for array in array_list:
new_array_list.append(array[idx])
return new_array_list
def intersect(*arrays):
""" This only works if arrays are sorted and unique"""
matched = np.array(list(set(arrays[0]).intersection(*arrays[1:])))
return np.array([np.where(np.in1d(array, matched))[0] for array in arrays])
def copy_dir_diff(dir1, dir2, dirout):
""" Copy files in dir1 that are missingin dir2 into dirout """
print(dir1)
fileList = glob.glob(os.path.join(dir1, '*'))
for curFullFile in fileList:
curFile = os.path.basename(curFullFile)
checkFullFile = os.path.join(dir2, curFile)
if os.path.isfile(checkFullFile):
print('{0} exist'.format(curFile))
else:
print('{0} miss'.format(curFile))
newFullFile = os.path.join(dirout, curFile)
shutil.copyfile(curFullFile, newFullFile)
|
[
"numpy.in1d",
"os.path.join",
"os.path.isfile",
"shutil.copyfile",
"os.path.basename"
] |
[((760, 783), 'os.path.join', 'os.path.join', (['dir1', '"""*"""'], {}), "(dir1, '*')\n", (772, 783), False, 'import os\n'), ((836, 865), 'os.path.basename', 'os.path.basename', (['curFullFile'], {}), '(curFullFile)\n', (852, 865), False, 'import os\n'), ((890, 917), 'os.path.join', 'os.path.join', (['dir2', 'curFile'], {}), '(dir2, curFile)\n', (902, 917), False, 'import os\n'), ((929, 958), 'os.path.isfile', 'os.path.isfile', (['checkFullFile'], {}), '(checkFullFile)\n', (943, 958), False, 'import os\n'), ((1093, 1122), 'os.path.join', 'os.path.join', (['dirout', 'curFile'], {}), '(dirout, curFile)\n', (1105, 1122), False, 'import os\n'), ((1135, 1176), 'shutil.copyfile', 'shutil.copyfile', (['curFullFile', 'newFullFile'], {}), '(curFullFile, newFullFile)\n', (1150, 1176), False, 'import shutil\n'), ((562, 585), 'numpy.in1d', 'np.in1d', (['array', 'matched'], {}), '(array, matched)\n', (569, 585), True, 'import numpy as np\n')]
|
# Written by <NAME>, 2018
import numpy as np
import bisect
class SDR_Classifier:
"""Maximum Likelyhood classifier for SDRs."""
def __init__(self, alpha, input_sdr, num_labels):
"""
Argument alpha is the small constant used by the exponential moving
average which tracks input-output co-occurances.
"""
self.alpha = alpha
self.input_sdr = input_sdr
self.num_labels = num_labels
# Don't initialize to zero, touch every input+output pair.
self.stats = np.random.uniform(
0.1 * self.alpha,
0.2 * self.alpha,
size=(self.input_sdr.size, self.num_labels))
def train(self, labels, input_sdr=None):
"""
Argument labels is array of float, PDF.
"""
labels = np.array(labels) / np.sum(labels)
self.input_sdr.assign(input_sdr)
inputs = self.input_sdr.flat_index
# Decay.
self.stats[inputs, :] *= (1 - self.alpha)
self.stats[:, np.nonzero(labels)[0]] *= (1 - self.alpha)
# Update.
updates = (labels - self.stats[inputs]) * self.alpha
self.stats[inputs] += updates
def predict(self, input_sdr=None):
"""
Argument inputs is ndarray of indexes into the input space.
Returns probability of each catagory in output space.
"""
self.input_sdr.assign(input_sdr)
pdf = self.stats[self.input_sdr.flat_index, :]
pdf = pdf / np.sum(pdf, axis=1, keepdims=True)
if False:
# Combine multiple probabilities into single pdf. Product, not
# summation, to combine probabilities of independant events. The
# problem with this is if a few unexpected bits turn on it
# mutliplies the result by zero, and the test dataset is going to
# have unexpected things in it.
return np.product(pdf, axis=0, keepdims=False)
else:
# Use summation B/C it works well.
return np.sum(pdf, axis=0, keepdims=False)
class RandomOutputClassifier:
"""
This classifier uses the frequency of the trained target outputs to generate
random predictions. It is used to get a baseline performance to compare
against.
"""
def __init__(self, num_labels):
self.stats = np.zeros(num_labels)
def train(self, label):
label = np.array(label) / np.sum(label)
self.stats += label
def predict(self):
"""Returns probability of each catagory in output space."""
cdf = np.cumsum(self.stats)
assert(cdf[-1] > 0) # Classifier must be trained before it can make predictions.
return bisect.bisect(cdf, np.random.random() * cdf[-1])
|
[
"numpy.product",
"numpy.random.random",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.nonzero",
"numpy.random.uniform",
"numpy.cumsum"
] |
[((544, 647), 'numpy.random.uniform', 'np.random.uniform', (['(0.1 * self.alpha)', '(0.2 * self.alpha)'], {'size': '(self.input_sdr.size, self.num_labels)'}), '(0.1 * self.alpha, 0.2 * self.alpha, size=(self.input_sdr.\n size, self.num_labels))\n', (561, 647), True, 'import numpy as np\n'), ((2357, 2377), 'numpy.zeros', 'np.zeros', (['num_labels'], {}), '(num_labels)\n', (2365, 2377), True, 'import numpy as np\n'), ((2589, 2610), 'numpy.cumsum', 'np.cumsum', (['self.stats'], {}), '(self.stats)\n', (2598, 2610), True, 'import numpy as np\n'), ((815, 831), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (823, 831), True, 'import numpy as np\n'), ((834, 848), 'numpy.sum', 'np.sum', (['labels'], {}), '(labels)\n', (840, 848), True, 'import numpy as np\n'), ((1507, 1541), 'numpy.sum', 'np.sum', (['pdf'], {'axis': '(1)', 'keepdims': '(True)'}), '(pdf, axis=1, keepdims=True)\n', (1513, 1541), True, 'import numpy as np\n'), ((1926, 1965), 'numpy.product', 'np.product', (['pdf'], {'axis': '(0)', 'keepdims': '(False)'}), '(pdf, axis=0, keepdims=False)\n', (1936, 1965), True, 'import numpy as np\n'), ((2046, 2081), 'numpy.sum', 'np.sum', (['pdf'], {'axis': '(0)', 'keepdims': '(False)'}), '(pdf, axis=0, keepdims=False)\n', (2052, 2081), True, 'import numpy as np\n'), ((2423, 2438), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2431, 2438), True, 'import numpy as np\n'), ((2441, 2454), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (2447, 2454), True, 'import numpy as np\n'), ((2734, 2752), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2750, 2752), True, 'import numpy as np\n'), ((1037, 1055), 'numpy.nonzero', 'np.nonzero', (['labels'], {}), '(labels)\n', (1047, 1055), True, 'import numpy as np\n')]
|
import os, sys
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=""
import pandas as pd
import numpy as np
eps = 0.004
desired = {
0: 0.36239782,
1: 0.043841336,
2: 0.075268817,
3: 0.059322034,
4: 0.075268817,
5: 0.075268817,
6: 0.043841336,
7: 0.075268817,
8: eps,
9: eps,
10: eps,
11: 0.043841336,
12: 0.043841336,
13: 0.014198783,
14: 0.043841336,
15: eps,
16: 0.028806584,
17: 0.014198783,
18: 0.028806584,
19: 0.059322034,
20: eps,
21: 0.126126126,
22: 0.028806584,
23: 0.075268817,
24: eps,
25: 0.222493888,
26: 0.028806584,
27: eps
}
MODEL_PATH = 'Christof/models/GAPNet/13_ext_512crop/'
pred_ul = np.load(MODEL_PATH + 'pred_ul_40.npy')
pred_ur = np.load(MODEL_PATH + 'pred_ur_40.npy')
pred_mm = np.load(MODEL_PATH + 'pred_mm_40.npy')
pred_bl = np.load(MODEL_PATH + 'pred_bl_40.npy')
pred_br = np.load(MODEL_PATH + 'pred_br_40.npy')
X = np.stack([pred_ul,pred_ur,pred_mm,pred_bl,pred_br])
X = np.mean(X,axis=1)
X = np.transpose(X, (1, 0, 2))
from keras.models import load_model
preds = np.zeros((X.shape[0],28))
for f_id in range(5):
m = load_model(MODEL_PATH + f'stacker{f_id}_stats.hdf5')
preds += m.predict(X, batch_size = 512)
preds = preds/ 5
desired = {}
best_sub = pd.read_csv('best_sub.csv')
s0 = [s if isinstance(s, str) else '' for s in best_sub.Predicted]
p0 = [s.split() for s in s0]
y0 = np.zeros((best_sub.shape[0], 28)).astype(int)
for i in range(best_sub.shape[0]):
for j in p0[i]: y0[i, int(j)] = 1
for i in range(28):
desired[i] = y0[:,i].mean()
thresholds = np.linspace(0.95, 0.05, 101)
pred = preds.copy()
for j in range(pred.shape[1]):
for t in thresholds:
pred[:, j] = (preds[:, j] > t).astype(int)
prop = np.mean(pred[:, j])
if prop >= desired[j]: break
print(j, '%3.2f' % t, '%6.4f' % desired[j], '%6.4f' % prop, j, )
print(pred[:5].astype(int))
label_predict = [np.arange(28)[score_predict == 1] for score_predict in pred]
str_predict_label = [' '.join(str(l) for l in lp) for lp in label_predict]
submit = pd.read_csv('Christof/assets/sample_submission.csv')
submit['Predicted'] = str_predict_label
# np.save('draw_predict_InceptionV3.npy', score_predict)
#submit.to_csv(MODEL_PATH + 'submission.csv', index=False)
from Christof.utils import f1_sub
best_sub = pd.read_csv('ens56d.csv')
f1_sub(best_sub,submit)
|
[
"numpy.mean",
"keras.models.load_model",
"pandas.read_csv",
"Christof.utils.f1_sub",
"numpy.stack",
"numpy.zeros",
"numpy.linspace",
"numpy.transpose",
"numpy.arange",
"numpy.load"
] |
[((752, 790), 'numpy.load', 'np.load', (["(MODEL_PATH + 'pred_ul_40.npy')"], {}), "(MODEL_PATH + 'pred_ul_40.npy')\n", (759, 790), True, 'import numpy as np\n'), ((801, 839), 'numpy.load', 'np.load', (["(MODEL_PATH + 'pred_ur_40.npy')"], {}), "(MODEL_PATH + 'pred_ur_40.npy')\n", (808, 839), True, 'import numpy as np\n'), ((850, 888), 'numpy.load', 'np.load', (["(MODEL_PATH + 'pred_mm_40.npy')"], {}), "(MODEL_PATH + 'pred_mm_40.npy')\n", (857, 888), True, 'import numpy as np\n'), ((899, 937), 'numpy.load', 'np.load', (["(MODEL_PATH + 'pred_bl_40.npy')"], {}), "(MODEL_PATH + 'pred_bl_40.npy')\n", (906, 937), True, 'import numpy as np\n'), ((948, 986), 'numpy.load', 'np.load', (["(MODEL_PATH + 'pred_br_40.npy')"], {}), "(MODEL_PATH + 'pred_br_40.npy')\n", (955, 986), True, 'import numpy as np\n'), ((992, 1047), 'numpy.stack', 'np.stack', (['[pred_ul, pred_ur, pred_mm, pred_bl, pred_br]'], {}), '([pred_ul, pred_ur, pred_mm, pred_bl, pred_br])\n', (1000, 1047), True, 'import numpy as np\n'), ((1048, 1066), 'numpy.mean', 'np.mean', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (1055, 1066), True, 'import numpy as np\n'), ((1070, 1096), 'numpy.transpose', 'np.transpose', (['X', '(1, 0, 2)'], {}), '(X, (1, 0, 2))\n', (1082, 1096), True, 'import numpy as np\n'), ((1143, 1169), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 28)'], {}), '((X.shape[0], 28))\n', (1151, 1169), True, 'import numpy as np\n'), ((1340, 1367), 'pandas.read_csv', 'pd.read_csv', (['"""best_sub.csv"""'], {}), "('best_sub.csv')\n", (1351, 1367), True, 'import pandas as pd\n'), ((1655, 1683), 'numpy.linspace', 'np.linspace', (['(0.95)', '(0.05)', '(101)'], {}), '(0.95, 0.05, 101)\n', (1666, 1683), True, 'import numpy as np\n'), ((2146, 2198), 'pandas.read_csv', 'pd.read_csv', (['"""Christof/assets/sample_submission.csv"""'], {}), "('Christof/assets/sample_submission.csv')\n", (2157, 2198), True, 'import pandas as pd\n'), ((2402, 2427), 'pandas.read_csv', 'pd.read_csv', (['"""ens56d.csv"""'], {}), "('ens56d.csv')\n", (2413, 2427), True, 'import pandas as pd\n'), ((2428, 2452), 'Christof.utils.f1_sub', 'f1_sub', (['best_sub', 'submit'], {}), '(best_sub, submit)\n', (2434, 2452), False, 'from Christof.utils import f1_sub\n'), ((1199, 1251), 'keras.models.load_model', 'load_model', (["(MODEL_PATH + f'stacker{f_id}_stats.hdf5')"], {}), "(MODEL_PATH + f'stacker{f_id}_stats.hdf5')\n", (1209, 1251), False, 'from keras.models import load_model\n'), ((1469, 1502), 'numpy.zeros', 'np.zeros', (['(best_sub.shape[0], 28)'], {}), '((best_sub.shape[0], 28))\n', (1477, 1502), True, 'import numpy as np\n'), ((1826, 1845), 'numpy.mean', 'np.mean', (['pred[:, j]'], {}), '(pred[:, j])\n', (1833, 1845), True, 'import numpy as np\n'), ((2000, 2013), 'numpy.arange', 'np.arange', (['(28)'], {}), '(28)\n', (2009, 2013), True, 'import numpy as np\n')]
|
import pyPROPOSAL as pp
import numpy as np
photo_real = [
pp.parametrization.photonuclear.Zeus,
pp.parametrization.photonuclear.BezrukovBugaev,
pp.parametrization.photonuclear.Rhode,
pp.parametrization.photonuclear.Kokoulin
]
particle_defs = [
pp.particle.MuMinusDef.get(),
pp.particle.TauMinusDef.get()#,
# pp.particle.EMinusDef.get()
]
mediums = [
pp.medium.Ice(1.0),
pp.medium.Hydrogen(1.0),
pp.medium.Uranium(1.0)
]
cuts = [
pp.EnergyCutSettings(-1, -1),
pp.EnergyCutSettings(500, -1),
pp.EnergyCutSettings(-1, 0.05),
pp.EnergyCutSettings(500, 0.05)
]
multiplier = 1.
hard_components = [0, 1]
photo_q2 = [
pp.parametrization.photonuclear.AbramowiczLevinLevyMaor91,
pp.parametrization.photonuclear.AbramowiczLevinLevyMaor97,
pp.parametrization.photonuclear.ButkevichMikhailov,
pp.parametrization.photonuclear.RenoSarcevicSu
]
photo_q2_interpol = [
pp.parametrization.photonuclear.AbramowiczLevinLevyMaor91Interpolant,
pp.parametrization.photonuclear.AbramowiczLevinLevyMaor97Interpolant,
pp.parametrization.photonuclear.ButkevichMikhailovInterpolant,
pp.parametrization.photonuclear.RenoSarcevicSuInterpolant
]
shadows = [
pp.parametrization.photonuclear.ShadowDuttaRenoSarcevicSeckel(),
pp.parametrization.photonuclear.ShadowButkevichMikhailov()
]
energies = np.logspace(4, 13, num=10)
interpoldef = pp.InterpolationDef()
def create_table_dEdx(dir_name, interpolate=False):
with open(dir_name + "Photo_Real_dEdx{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for hard in hard_components:
for parametrization in photo_real:
photo = parametrization(
particle,
medium,
cut,
multiplier,
hard)
if interpolate:
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dEdx = xsection.calculate_dEdx(energy)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(dEdx))
buf.append(photo.name)
buf.append(str(hard))
buf.append("\n")
file.write("\t".join(buf))
def create_table_dNdx(dir_name, interpolate=False):
with open(dir_name + "Photo_Real_dNdx{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for hard in hard_components:
for parametrization in photo_real:
photo = parametrization(
particle,
medium,
cut,
multiplier,
hard)
if interpolate:
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dNdx = xsection.calculate_dNdx(energy)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(dNdx))
buf.append(photo.name)
buf.append(str(hard))
buf.append("\n")
file.write("\t".join(buf))
def create_table_dNdx_rnd(dir_name, interpolate=False):
pp.RandomGenerator.get().set_seed(1234)
with open(dir_name + "Photo_Real_dNdx_rnd{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for hard in hard_components:
rnd = pp.RandomGenerator.get().random_double()
for parametrization in photo_real:
photo = parametrization(
particle,
medium,
cut,
multiplier,
hard)
if interpolate:
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dNdx = xsection.calculate_dNdx_rnd(energy, rnd)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(rnd))
buf.append(str(dNdx))
buf.append(photo.name)
buf.append(str(hard))
buf.append("\n")
file.write("\t".join(buf))
def create_table_stochastic_loss(dir_name, interpolate=False):
pp.RandomGenerator.get().set_seed(1234)
with open(dir_name + "Photo_Real_e{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for hard in hard_components:
for parametrization in photo_real:
photo = parametrization(
particle,
medium,
cut,
multiplier,
hard)
if interpolate:
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
rnd1 = pp.RandomGenerator.get().random_double()
rnd2 = pp.RandomGenerator.get().random_double()
stochastic_loss = xsection.calculate_stochastic_loss(energy, rnd1, rnd2)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(rnd1))
buf.append(str(rnd2))
buf.append(str(stochastic_loss))
buf.append(photo.name)
buf.append(str(hard))
buf.append("\n")
file.write("\t".join(buf))
def create_table_dEdx_Q2(dir_name, interpolate=False):
if interpolate:
q2 = photo_q2_interpol
else:
q2 = photo_q2
with open(dir_name + "Photo_Q2_dEdx{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for shadow in shadows:
for parametrization in q2:
if interpolate:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow,
interpoldef)
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow)
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dEdx = xsection.calculate_dEdx(energy)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(dEdx))
buf.append(photo.name)
buf.append(shadow.name)
buf.append("\n")
file.write("\t".join(buf))
def create_table_dNdx_Q2(dir_name, interpolate=False):
if interpolate:
q2 = photo_q2_interpol
else:
q2 = photo_q2
with open(dir_name + "Photo_Q2_dNdx{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for shadow in shadows:
for parametrization in q2:
if interpolate:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow,
interpoldef)
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow)
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dNdx = xsection.calculate_dNdx(energy)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(dNdx))
buf.append(photo.name)
buf.append(shadow.name)
buf.append("\n")
file.write("\t".join(buf))
def create_table_dNdx_rnd_Q2(dir_name, interpolate=False):
pp.RandomGenerator.get().set_seed(1234)
if interpolate:
q2 = photo_q2_interpol
else:
q2 = photo_q2
with open(dir_name + "Photo_Q2_dNdx_rnd{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for shadow in shadows:
rnd = pp.RandomGenerator.get().random_double()
for parametrization in q2:
if interpolate:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow,
interpoldef)
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow)
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
dNdx = xsection.calculate_dNdx_rnd(energy, rnd)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(rnd))
buf.append(str(dNdx))
buf.append(photo.name)
buf.append(shadow.name)
buf.append("\n")
file.write("\t".join(buf))
def create_table_stochastic_loss_Q2(dir_name, interpolate=False):
pp.RandomGenerator.get().set_seed(1234)
if interpolate:
q2 = photo_q2_interpol
else:
q2 = photo_q2
with open(dir_name + "Photo_Q2_e{}.txt".format("_interpol" if interpolate else ""), "a") as file:
for particle in particle_defs:
for medium in mediums:
for cut in cuts:
for shadow in shadows:
for parametrization in q2:
if interpolate:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow,
interpoldef)
xsection = pp.crosssection.PhotoInterpolant(photo, interpoldef)
else:
photo = parametrization(
particle,
medium,
cut,
multiplier,
shadow)
xsection = pp.crosssection.PhotoIntegral(photo)
buf = [""]
for energy in energies:
rnd1 = pp.RandomGenerator.get().random_double()
rnd2 = pp.RandomGenerator.get().random_double()
stochastic_loss = xsection.calculate_stochastic_loss(energy, rnd1, rnd2)
buf.append(particle.name)
buf.append(medium.name)
buf.append(str(cut.ecut))
buf.append(str(cut.vcut))
buf.append(str(multiplier))
buf.append(str(energy))
buf.append(str(rnd1))
buf.append(str(rnd2))
buf.append(str(stochastic_loss))
buf.append(photo.name)
buf.append(shadow.name)
buf.append("\n")
file.write("\t".join(buf))
def main(dir_name):
# Integrate
create_table_dEdx(dir_name)
create_table_dNdx(dir_name)
create_table_dNdx_rnd(dir_name)
create_table_stochastic_loss(dir_name)
create_table_dEdx_Q2(dir_name)
create_table_dNdx_Q2(dir_name)
create_table_dNdx_rnd_Q2(dir_name)
create_table_stochastic_loss_Q2(dir_name)
# Interpolate
create_table_dEdx(dir_name, True)
create_table_dNdx(dir_name, True)
create_table_dNdx_rnd(dir_name, True)
create_table_stochastic_loss(dir_name, True)
create_table_dEdx_Q2(dir_name, True)
create_table_dNdx_Q2(dir_name, True)
create_table_dNdx_rnd_Q2(dir_name, True)
create_table_stochastic_loss_Q2(dir_name, True)
if __name__ == "__main__":
import os
dir_name = "TestFiles/"
if os.path.isdir(dir_name):
print("Directory {} already exists".format(dir_name))
else:
os.makedirs(dir_name)
print("Directory {} created".format(dir_name))
main(dir_name)
|
[
"pyPROPOSAL.particle.TauMinusDef.get",
"pyPROPOSAL.parametrization.photonuclear.ShadowButkevichMikhailov",
"os.makedirs",
"pyPROPOSAL.InterpolationDef",
"pyPROPOSAL.EnergyCutSettings",
"pyPROPOSAL.crosssection.PhotoIntegral",
"pyPROPOSAL.parametrization.photonuclear.ShadowDuttaRenoSarcevicSeckel",
"os.path.isdir",
"pyPROPOSAL.crosssection.PhotoInterpolant",
"pyPROPOSAL.medium.Hydrogen",
"pyPROPOSAL.particle.MuMinusDef.get",
"numpy.logspace",
"pyPROPOSAL.RandomGenerator.get",
"pyPROPOSAL.medium.Ice",
"pyPROPOSAL.medium.Uranium"
] |
[((1369, 1395), 'numpy.logspace', 'np.logspace', (['(4)', '(13)'], {'num': '(10)'}), '(4, 13, num=10)\n', (1380, 1395), True, 'import numpy as np\n'), ((1411, 1432), 'pyPROPOSAL.InterpolationDef', 'pp.InterpolationDef', ([], {}), '()\n', (1430, 1432), True, 'import pyPROPOSAL as pp\n'), ((266, 294), 'pyPROPOSAL.particle.MuMinusDef.get', 'pp.particle.MuMinusDef.get', ([], {}), '()\n', (292, 294), True, 'import pyPROPOSAL as pp\n'), ((300, 329), 'pyPROPOSAL.particle.TauMinusDef.get', 'pp.particle.TauMinusDef.get', ([], {}), '()\n', (327, 329), True, 'import pyPROPOSAL as pp\n'), ((385, 403), 'pyPROPOSAL.medium.Ice', 'pp.medium.Ice', (['(1.0)'], {}), '(1.0)\n', (398, 403), True, 'import pyPROPOSAL as pp\n'), ((409, 432), 'pyPROPOSAL.medium.Hydrogen', 'pp.medium.Hydrogen', (['(1.0)'], {}), '(1.0)\n', (427, 432), True, 'import pyPROPOSAL as pp\n'), ((438, 460), 'pyPROPOSAL.medium.Uranium', 'pp.medium.Uranium', (['(1.0)'], {}), '(1.0)\n', (455, 460), True, 'import pyPROPOSAL as pp\n'), ((477, 505), 'pyPROPOSAL.EnergyCutSettings', 'pp.EnergyCutSettings', (['(-1)', '(-1)'], {}), '(-1, -1)\n', (497, 505), True, 'import pyPROPOSAL as pp\n'), ((511, 540), 'pyPROPOSAL.EnergyCutSettings', 'pp.EnergyCutSettings', (['(500)', '(-1)'], {}), '(500, -1)\n', (531, 540), True, 'import pyPROPOSAL as pp\n'), ((546, 576), 'pyPROPOSAL.EnergyCutSettings', 'pp.EnergyCutSettings', (['(-1)', '(0.05)'], {}), '(-1, 0.05)\n', (566, 576), True, 'import pyPROPOSAL as pp\n'), ((582, 613), 'pyPROPOSAL.EnergyCutSettings', 'pp.EnergyCutSettings', (['(500)', '(0.05)'], {}), '(500, 0.05)\n', (602, 613), True, 'import pyPROPOSAL as pp\n'), ((1227, 1290), 'pyPROPOSAL.parametrization.photonuclear.ShadowDuttaRenoSarcevicSeckel', 'pp.parametrization.photonuclear.ShadowDuttaRenoSarcevicSeckel', ([], {}), '()\n', (1288, 1290), True, 'import pyPROPOSAL as pp\n'), ((1296, 1354), 'pyPROPOSAL.parametrization.photonuclear.ShadowButkevichMikhailov', 'pp.parametrization.photonuclear.ShadowButkevichMikhailov', ([], {}), '()\n', (1352, 1354), True, 'import pyPROPOSAL as pp\n'), ((18375, 18398), 'os.path.isdir', 'os.path.isdir', (['dir_name'], {}), '(dir_name)\n', (18388, 18398), False, 'import os\n'), ((18480, 18501), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (18491, 18501), False, 'import os\n'), ((4832, 4856), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (4854, 4856), True, 'import pyPROPOSAL as pp\n'), ((6693, 6717), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (6715, 6717), True, 'import pyPROPOSAL as pp\n'), ((12915, 12939), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (12937, 12939), True, 'import pyPROPOSAL as pp\n'), ((15204, 15228), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (15226, 15228), True, 'import pyPROPOSAL as pp\n'), ((2155, 2207), 'pyPROPOSAL.crosssection.PhotoInterpolant', 'pp.crosssection.PhotoInterpolant', (['photo', 'interpoldef'], {}), '(photo, interpoldef)\n', (2187, 2207), True, 'import pyPROPOSAL as pp\n'), ((2285, 2321), 'pyPROPOSAL.crosssection.PhotoIntegral', 'pp.crosssection.PhotoIntegral', (['photo'], {}), '(photo)\n', (2314, 2321), True, 'import pyPROPOSAL as pp\n'), ((3823, 3875), 'pyPROPOSAL.crosssection.PhotoInterpolant', 'pp.crosssection.PhotoInterpolant', (['photo', 'interpoldef'], {}), '(photo, interpoldef)\n', (3855, 3875), True, 'import pyPROPOSAL as pp\n'), ((3953, 3989), 'pyPROPOSAL.crosssection.PhotoIntegral', 'pp.crosssection.PhotoIntegral', (['photo'], {}), '(photo)\n', (3982, 3989), True, 'import pyPROPOSAL as pp\n'), ((5172, 5196), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (5194, 5196), True, 'import pyPROPOSAL as pp\n'), ((5615, 5667), 'pyPROPOSAL.crosssection.PhotoInterpolant', 'pp.crosssection.PhotoInterpolant', (['photo', 'interpoldef'], {}), '(photo, interpoldef)\n', (5647, 5667), True, 'import pyPROPOSAL as pp\n'), ((5745, 5781), 'pyPROPOSAL.crosssection.PhotoIntegral', 'pp.crosssection.PhotoIntegral', (['photo'], {}), '(photo)\n', (5774, 5781), True, 'import pyPROPOSAL as pp\n'), ((7398, 7450), 'pyPROPOSAL.crosssection.PhotoInterpolant', 'pp.crosssection.PhotoInterpolant', (['photo', 'interpoldef'], {}), '(photo, interpoldef)\n', (7430, 7450), True, 'import pyPROPOSAL as pp\n'), ((7528, 7564), 'pyPROPOSAL.crosssection.PhotoIntegral', 'pp.crosssection.PhotoIntegral', (['photo'], {}), '(photo)\n', (7557, 7564), True, 'import pyPROPOSAL as pp\n'), ((9524, 9576), 'pyPROPOSAL.crosssection.PhotoInterpolant', 'pp.crosssection.PhotoInterpolant', (['photo', 'interpoldef'], {}), '(photo, interpoldef)\n', (9556, 9576), True, 'import pyPROPOSAL as pp\n'), ((9935, 9971), 'pyPROPOSAL.crosssection.PhotoIntegral', 'pp.crosssection.PhotoIntegral', (['photo'], {}), '(photo)\n', (9964, 9971), True, 'import pyPROPOSAL as pp\n'), ((11620, 11672), 'pyPROPOSAL.crosssection.PhotoInterpolant', 'pp.crosssection.PhotoInterpolant', (['photo', 'interpoldef'], {}), '(photo, interpoldef)\n', (11652, 11672), True, 'import pyPROPOSAL as pp\n'), ((12031, 12067), 'pyPROPOSAL.crosssection.PhotoIntegral', 'pp.crosssection.PhotoIntegral', (['photo'], {}), '(photo)\n', (12060, 12067), True, 'import pyPROPOSAL as pp\n'), ((13330, 13354), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (13352, 13354), True, 'import pyPROPOSAL as pp\n'), ((13840, 13892), 'pyPROPOSAL.crosssection.PhotoInterpolant', 'pp.crosssection.PhotoInterpolant', (['photo', 'interpoldef'], {}), '(photo, interpoldef)\n', (13872, 13892), True, 'import pyPROPOSAL as pp\n'), ((14251, 14287), 'pyPROPOSAL.crosssection.PhotoIntegral', 'pp.crosssection.PhotoIntegral', (['photo'], {}), '(photo)\n', (14280, 14287), True, 'import pyPROPOSAL as pp\n'), ((16051, 16103), 'pyPROPOSAL.crosssection.PhotoInterpolant', 'pp.crosssection.PhotoInterpolant', (['photo', 'interpoldef'], {}), '(photo, interpoldef)\n', (16083, 16103), True, 'import pyPROPOSAL as pp\n'), ((16462, 16498), 'pyPROPOSAL.crosssection.PhotoIntegral', 'pp.crosssection.PhotoIntegral', (['photo'], {}), '(photo)\n', (16491, 16498), True, 'import pyPROPOSAL as pp\n'), ((7697, 7721), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (7719, 7721), True, 'import pyPROPOSAL as pp\n'), ((7777, 7801), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (7799, 7801), True, 'import pyPROPOSAL as pp\n'), ((16631, 16655), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (16653, 16655), True, 'import pyPROPOSAL as pp\n'), ((16711, 16735), 'pyPROPOSAL.RandomGenerator.get', 'pp.RandomGenerator.get', ([], {}), '()\n', (16733, 16735), True, 'import pyPROPOSAL as pp\n')]
|
print("################################################################################")
print("# Implementation of a multivariate pattern analysis based on the scikitlearn ")
print("# toolbox (http://scikit-learn.org/stable/). It reads a matlab file containing ")
print("# Xm: a matrix of trials x chans x timepoint. ")
print("# y: a vector indicating the class of each trial ")
print("# The classification algorithm is based on a support vector machine. ")
print("# (c) <NAME> 2012, jeanremi.king [at] gmail.com ")
print("################################################################################")
###############################################################################
print("LIBRARY")
import sys as sys
import numpy as np
from scipy import stats
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold
from sklearn.feature_selection import SelectPercentile, f_classif
import scipy.io as sio
from sklearn.preprocessing import Scaler
###############################################################################
print("INPUT DATA")
#-- get argument to load specific file
filenameX = str(sys.argv[1])
filenamey = str(sys.argv[2])
print(filenameX)
print(filenamey)
#-- Load data into python
mat = sio.loadmat(filenameX)
Xm_all = mat["Xm"] # data
#-- load classification parameters
mat = sio.loadmat(filenamey)
path = mat["path"][0]
nameX = mat["nameX"][0]
namey = mat["namey"][0]
folding = mat["folding"][0]
n_splits = mat["n_splits"] # svm penalization parameter
n_splits = np.reshape(n_splits, n_splits.size)
n_folds = mat["n_folds"] # fold number
n_folds = np.reshape(n_folds, n_folds.size)
svm_C = mat["C"] # svm penalization parameter
svm_C = np.reshape(svm_C, svm_C.size)
compute_probas = mat["compute_probas"] # svm penalization parameter
compute_probas = np.reshape(compute_probas, compute_probas.size)
compute_predict = mat["compute_predict"] # svm penalization parameter
compute_predict = np.reshape(compute_predict, compute_predict.size)
fs_n = mat["fs"] # feature selection
fs_n = np.reshape(fs_n, fs_n.size)
dims = mat["dims"] # select time windows to compute
dims = np.reshape(dims, dims.size) - 1 # reshape for skl compatibility
dims_tg = mat["dims_tg"] - 1 # svm penalization parameter
y_all = mat["y"] # class used for train and test
y_all = np.reshape(y_all, y_all.size) # reshape for skl compatibility
y2_all = mat["y2"] # class used for sample weights
y2_all = np.reshape(y2_all, y2_all.size) # reshape for skl compatibility
#-- build training and generalizing classes
Xm = Xm_all[y_all > 0, :, :] # training categories
Xmg = Xm_all[y_all < 0, :, :] # generalization categories
y = y_all[y_all > 0]
yg = y_all[y_all < 0]
y2 = y2_all[y_all > 0]
n_samples, n_features, unused = Xm.shape
n_samplesg, unused, unused = Xmg.shape
n_featuresg = n_features
n_dims = dims.shape[0]
n_dimsg = n_dims
n_dims_tg = dims_tg.shape[1]
n_dimsg_tg = dims_tg.shape[1]
n_classes = np.unique(y).shape[0]
#deal with sample_weight
sample_weight = np.ones(y.shape[0])
classes = np.unique(y2)
for c in range(classes.shape[0]):
sample_weight[y2 == classes[c]] = 1. / (np.sum(y2 == classes[c]))
###############################################################################
print("PREPARE CLASSIFICATION")
#--crossvalidation
if folding == 'stratified':
cv = StratifiedKFold(y, k=n_folds)
elif folding == 'kfolding':
cv = KFold(n=y.shape[0], k=n_folds)
elif folding == 'leaveoneout':
n_folds[0] = y.shape[0]
cv = LeaveOneOut(n=y.shape[0])
else:
print("unknown crossvalidation method!")
#-- classifier
clf = svm.SVC(kernel='linear', probability=True, C=svm_C)
#-- normalizer
scaler = Scaler()
#-- feature selection
fs = SelectPercentile(f_classif, percentile=fs_n)
print("INITIALIZE RESULTS")
if compute_predict:
predict = np.zeros([n_splits, n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_folds]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_splits, n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]) ** np.nan
else:
probas = []
probasg = []
coef = np.empty([n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
all_folds = np.zeros([n_splits, n_folds, n_samples]) ** np.nan
y_shfl = np.copy(y)
Xm_shfl = np.copy(Xm)
sw_shfl = np.copy(sample_weight)
###############################################################################
print("CLASSIFY")
#-- shufflesplit
# repeat stratified kfolding for getting rid off the folding artefacts
for split in range(n_splits):
print(split)
# shuffle order
new_order = np.array(range(y.shape[0]))
if split > 0:
np.random.shuffle(new_order)
y_shfl[new_order] = np.copy(y)
Xm_shfl[new_order, :, :] = np.copy(Xm)
sw_shfl[new_order] = np.copy(sample_weight)
cv = StratifiedKFold(y_shfl, k=n_folds)
# Stratified crossvalidation
for fold, (train, test) in enumerate(cv):
print(fold)
all_folds[split, fold, train] = 1
all_folds[split, fold, test] = 0
for d in range(0, dims.shape[0]):
Xtrain = Xm_shfl[train, :, dims[d]]
ytrain = y_shfl[train]
sw_train = sw_shfl[train]
# (deal with NaN in training)
ytrain = ytrain[~np.isnan(np.nansum(Xtrain, axis=1))]
sw_train = sw_train[~np.isnan(np.nansum(Xtrain, axis=1))]
Xtrain = Xtrain[~np.isnan(np.nansum(Xtrain, axis=1)), :]
if np.unique(ytrain).shape[0] > 1:
# feature selection (find the 50% most discriminative channels)
fs.fit(Xtrain, ytrain) # find
Xtrain = fs.transform(Xtrain) # remove unnecessary channels
# normalization
scaler.fit(Xtrain) # find
Xtrain = scaler.transform(Xtrain) # apply zscore
# SVM fit
clf.fit(Xtrain, ytrain, sample_weight=sw_train)
# retrieve hyperplan feature identification
coef[split, fold, d, :, :] = 0 # initialize
#--- univariate
uni_features = fs.pvalues_ <= stats.scoreatpercentile(fs.pvalues_, fs.percentile)
#--- multivariate
coef[split, fold, d, :, uni_features] = scaler.inverse_transform(clf.coef_).T
# predict cross val (deal with NaN in testing)
# generalize across all time points
for d_tg in range(0, n_dims_tg):
sys.stdout.write("*")
sys.stdout.flush()
# select data
Xtest = Xm_shfl[test, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtest, axis=1))
Xtest = Xtest[~test_nan, :]
# preproc
Xtest = fs.transform(Xtest)
Xtest = scaler.transform(Xtest)
# predict
if (Xtest.shape[0] - np.sum(test_nan)) > 0:
if compute_predict:
predict[split, test[~test_nan], d, d_tg] = clf.predict(Xtest)
if compute_probas:
probas[split, test[~test_nan], d, d_tg, :] = clf.predict_proba(Xtest)
if np.sum(test_nan) > 0:
probas[split, test[test_nan], d, d_tg, :] = np.nan
# predict cross val on generalization sample (deal with NaN in testing)
# select data
Xtestg = Xmg[:, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtestg, axis=1))
if (Xtestg.shape[0] - np.sum(test_nan)) > 0:
Xtestg = Xtestg[~test_nan, :]
# preproc
Xtestg = fs.transform(Xtestg)
Xtestg = scaler.transform(Xtestg)
# predict
if compute_predict:
predictg[split, ~test_nan, d, d_tg, fold] = clf.predict(Xtestg)
if compute_probas:
probasg[split, ~test_nan, d, d_tg, :, fold] = clf.predict_proba(Xtestg)
if np.sum(test_nan) > 0:
probasg[split, test_nan, d, d_tg, :, fold] = np.nan
#-- reorder
if compute_predict:
predict[split, :, :, :] = predict[split, new_order, :, :]
if compute_probas:
probas[split, :, :, :, :] = probas[split, new_order, :, :, :]
all_folds[split, :, :] = all_folds[split, :, new_order].T
###############################################################################
print("EXPORT DATA")
mat['predict'] = predict
mat['predictg'] = predictg
mat['probas'] = probas
mat['probasg'] = probasg
mat['coef'] = coef
mat['all_folds'] = all_folds
mat['y_all'] = y_all
mat['y'] = y
mat['yg'] = yg
mat['filenameX'] = filenameX
mat['filenamey'] = filenamey
output = path + nameX + '_' + namey + "_results.mat"
print(output)
sio.savemat(output, mat)
|
[
"sklearn.cross_validation.KFold",
"scipy.io.savemat",
"scipy.io.loadmat",
"sys.stdout.write",
"numpy.reshape",
"scipy.stats.scoreatpercentile",
"numpy.empty",
"sklearn.cross_validation.LeaveOneOut",
"sklearn.feature_selection.SelectPercentile",
"sys.stdout.flush",
"numpy.ones",
"sklearn.preprocessing.Scaler",
"numpy.nansum",
"sklearn.svm.SVC",
"numpy.copy",
"numpy.unique",
"numpy.sum",
"sklearn.cross_validation.StratifiedKFold",
"numpy.zeros",
"numpy.random.shuffle"
] |
[((1364, 1386), 'scipy.io.loadmat', 'sio.loadmat', (['filenameX'], {}), '(filenameX)\n', (1375, 1386), True, 'import scipy.io as sio\n'), ((1456, 1478), 'scipy.io.loadmat', 'sio.loadmat', (['filenamey'], {}), '(filenamey)\n', (1467, 1478), True, 'import scipy.io as sio\n'), ((1645, 1680), 'numpy.reshape', 'np.reshape', (['n_splits', 'n_splits.size'], {}), '(n_splits, n_splits.size)\n', (1655, 1680), True, 'import numpy as np\n'), ((1731, 1764), 'numpy.reshape', 'np.reshape', (['n_folds', 'n_folds.size'], {}), '(n_folds, n_folds.size)\n', (1741, 1764), True, 'import numpy as np\n'), ((1820, 1849), 'numpy.reshape', 'np.reshape', (['svm_C', 'svm_C.size'], {}), '(svm_C, svm_C.size)\n', (1830, 1849), True, 'import numpy as np\n'), ((1936, 1983), 'numpy.reshape', 'np.reshape', (['compute_probas', 'compute_probas.size'], {}), '(compute_probas, compute_probas.size)\n', (1946, 1983), True, 'import numpy as np\n'), ((2073, 2122), 'numpy.reshape', 'np.reshape', (['compute_predict', 'compute_predict.size'], {}), '(compute_predict, compute_predict.size)\n', (2083, 2122), True, 'import numpy as np\n'), ((2168, 2195), 'numpy.reshape', 'np.reshape', (['fs_n', 'fs_n.size'], {}), '(fs_n, fs_n.size)\n', (2178, 2195), True, 'import numpy as np\n'), ((2438, 2467), 'numpy.reshape', 'np.reshape', (['y_all', 'y_all.size'], {}), '(y_all, y_all.size)\n', (2448, 2467), True, 'import numpy as np\n'), ((2562, 2593), 'numpy.reshape', 'np.reshape', (['y2_all', 'y2_all.size'], {}), '(y2_all, y2_all.size)\n', (2572, 2593), True, 'import numpy as np\n'), ((3131, 3150), 'numpy.ones', 'np.ones', (['y.shape[0]'], {}), '(y.shape[0])\n', (3138, 3150), True, 'import numpy as np\n'), ((3161, 3174), 'numpy.unique', 'np.unique', (['y2'], {}), '(y2)\n', (3170, 3174), True, 'import numpy as np\n'), ((3716, 3767), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'probability': '(True)', 'C': 'svm_C'}), "(kernel='linear', probability=True, C=svm_C)\n", (3723, 3767), False, 'from sklearn import svm\n'), ((3793, 3801), 'sklearn.preprocessing.Scaler', 'Scaler', ([], {}), '()\n', (3799, 3801), False, 'from sklearn.preprocessing import Scaler\n'), ((3830, 3874), 'sklearn.feature_selection.SelectPercentile', 'SelectPercentile', (['f_classif'], {'percentile': 'fs_n'}), '(f_classif, percentile=fs_n)\n', (3846, 3874), False, 'from sklearn.feature_selection import SelectPercentile, f_classif\n'), ((4538, 4548), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (4545, 4548), True, 'import numpy as np\n'), ((4559, 4570), 'numpy.copy', 'np.copy', (['Xm'], {}), '(Xm)\n', (4566, 4570), True, 'import numpy as np\n'), ((4581, 4603), 'numpy.copy', 'np.copy', (['sample_weight'], {}), '(sample_weight)\n', (4588, 4603), True, 'import numpy as np\n'), ((9427, 9451), 'scipy.io.savemat', 'sio.savemat', (['output', 'mat'], {}), '(output, mat)\n', (9438, 9451), True, 'import scipy.io as sio\n'), ((2256, 2283), 'numpy.reshape', 'np.reshape', (['dims', 'dims.size'], {}), '(dims, dims.size)\n', (2266, 2283), True, 'import numpy as np\n'), ((3450, 3479), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (['y'], {'k': 'n_folds'}), '(y, k=n_folds)\n', (3465, 3479), False, 'from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold\n'), ((4378, 4464), 'numpy.empty', 'np.empty', (['[n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2, n_features]'], {}), '([n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2,\n n_features])\n', (4386, 4464), True, 'import numpy as np\n'), ((4478, 4518), 'numpy.zeros', 'np.zeros', (['[n_splits, n_folds, n_samples]'], {}), '([n_splits, n_folds, n_samples])\n', (4486, 4518), True, 'import numpy as np\n'), ((3068, 3080), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3077, 3080), True, 'import numpy as np\n'), ((3253, 3277), 'numpy.sum', 'np.sum', (['(y2 == classes[c])'], {}), '(y2 == classes[c])\n', (3259, 3277), True, 'import numpy as np\n'), ((3517, 3547), 'sklearn.cross_validation.KFold', 'KFold', ([], {'n': 'y.shape[0]', 'k': 'n_folds'}), '(n=y.shape[0], k=n_folds)\n', (3522, 3547), False, 'from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold\n'), ((3938, 3988), 'numpy.zeros', 'np.zeros', (['[n_splits, n_samples, n_dims, n_dims_tg]'], {}), '([n_splits, n_samples, n_dims, n_dims_tg])\n', (3946, 3988), True, 'import numpy as np\n'), ((4014, 4076), 'numpy.zeros', 'np.zeros', (['[n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_folds]'], {}), '([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_folds])\n', (4022, 4076), True, 'import numpy as np\n'), ((4161, 4222), 'numpy.zeros', 'np.zeros', (['[n_splits, n_samples, n_dims, n_dims_tg, n_classes]'], {}), '([n_splits, n_samples, n_dims, n_dims_tg, n_classes])\n', (4169, 4222), True, 'import numpy as np\n'), ((4247, 4320), 'numpy.zeros', 'np.zeros', (['[n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]'], {}), '([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds])\n', (4255, 4320), True, 'import numpy as np\n'), ((4929, 4957), 'numpy.random.shuffle', 'np.random.shuffle', (['new_order'], {}), '(new_order)\n', (4946, 4957), True, 'import numpy as np\n'), ((4986, 4996), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (4993, 4996), True, 'import numpy as np\n'), ((5032, 5043), 'numpy.copy', 'np.copy', (['Xm'], {}), '(Xm)\n', (5039, 5043), True, 'import numpy as np\n'), ((5073, 5095), 'numpy.copy', 'np.copy', (['sample_weight'], {}), '(sample_weight)\n', (5080, 5095), True, 'import numpy as np\n'), ((5109, 5143), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', (['y_shfl'], {'k': 'n_folds'}), '(y_shfl, k=n_folds)\n', (5124, 5143), False, 'from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold\n'), ((3616, 3641), 'sklearn.cross_validation.LeaveOneOut', 'LeaveOneOut', ([], {'n': 'y.shape[0]'}), '(n=y.shape[0])\n', (3627, 3641), False, 'from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold\n'), ((6434, 6485), 'scipy.stats.scoreatpercentile', 'stats.scoreatpercentile', (['fs.pvalues_', 'fs.percentile'], {}), '(fs.pvalues_, fs.percentile)\n', (6457, 6485), False, 'from scipy import stats\n'), ((6798, 6819), 'sys.stdout.write', 'sys.stdout.write', (['"""*"""'], {}), "('*')\n", (6814, 6819), True, 'import sys as sys\n'), ((6840, 6858), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6856, 6858), True, 'import sys as sys\n'), ((5569, 5594), 'numpy.nansum', 'np.nansum', (['Xtrain'], {'axis': '(1)'}), '(Xtrain, axis=1)\n', (5578, 5594), True, 'import numpy as np\n'), ((5639, 5664), 'numpy.nansum', 'np.nansum', (['Xtrain'], {'axis': '(1)'}), '(Xtrain, axis=1)\n', (5648, 5664), True, 'import numpy as np\n'), ((5751, 5768), 'numpy.unique', 'np.unique', (['ytrain'], {}), '(ytrain)\n', (5760, 5768), True, 'import numpy as np\n'), ((7031, 7055), 'numpy.nansum', 'np.nansum', (['Xtest'], {'axis': '(1)'}), '(Xtest, axis=1)\n', (7040, 7055), True, 'import numpy as np\n'), ((7998, 8023), 'numpy.nansum', 'np.nansum', (['Xtestg'], {'axis': '(1)'}), '(Xtestg, axis=1)\n', (8007, 8023), True, 'import numpy as np\n'), ((5705, 5730), 'numpy.nansum', 'np.nansum', (['Xtrain'], {'axis': '(1)'}), '(Xtrain, axis=1)\n', (5714, 5730), True, 'import numpy as np\n'), ((7306, 7322), 'numpy.sum', 'np.sum', (['test_nan'], {}), '(test_nan)\n', (7312, 7322), True, 'import numpy as np\n'), ((8067, 8083), 'numpy.sum', 'np.sum', (['test_nan'], {}), '(test_nan)\n', (8073, 8083), True, 'import numpy as np\n'), ((7635, 7651), 'numpy.sum', 'np.sum', (['test_nan'], {}), '(test_nan)\n', (7641, 7651), True, 'import numpy as np\n'), ((8634, 8650), 'numpy.sum', 'np.sum', (['test_nan'], {}), '(test_nan)\n', (8640, 8650), True, 'import numpy as np\n')]
|
"""
Helmoltz coils
==============
A script that computes the magnetic field generated by a pair of Helmoltz
coils.
"""
import numpy as np
from scipy import special, linalg
##############################################################################
# Function to caculate the field of a loop
def base_vectors(n):
""" Returns 3 orthognal base vectors, the first one colinear to n.
"""
# normalize n
n = n / np.sqrt(np.square(n).sum(axis=-1))
# choose two vectors perpendicular to n
# choice is arbitrary since the coil is symetric about n
if abs(n[0]) == 1 :
l = np.r_[n[2], 0, -n[0]]
else:
l = np.r_[0, n[2], -n[1]]
l = l / np.sqrt(np.square(l).sum(axis=-1))
m = np.cross(n, l)
return n, l, m
def B_field(r, n, r0, R):
"""
returns the magnetic field from an arbitrary current loop calculated from
eqns (1) and (2) in Phys Rev A Vol. 35, N 4, pp. 1535-1546; 1987.
Parameters
----------
n is normal vector to the plane of the loop at the center, current
is oriented by the right-hand-rule.
r is a position vector where the Bfield is evaluated:
[x1 y2 z3 ; x2 y2 z2 ; ... ]
r is in units of d
r0 is the location of the center of the loop in units of d: [x y z]
R is the radius of the loop
Returns
-------
B is a vector for the B field at point r in inverse units of
(mu I) / (2 pi d)
for I in amps and d in meters and mu = 4 pi * 10^-7 we get Tesla
"""
### Translate the coordinates in the coil's frame
n, l, m = base_vectors(n)
# transformation matrix coil frame to lab frame
trans = np.vstack((l, m, n))
# transformation matrix to lab frame to coil frame
inv_trans = linalg.inv(trans)
r = r - r0 #point location from center of coil
r = np.dot(r, inv_trans) #transform vector to coil frame
#### calculate field
# express the coordinates in polar form
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
rho = np.sqrt(x**2 + y**2)
theta = np.arctan(x / y)
# NaNs are generated where y is zero.
theta[y == 0] = np.pi / 2
E = special.ellipe((4 * R * rho)/( (R + rho)**2 + z**2))
K = special.ellipk((4 * R * rho)/( (R + rho)**2 + z**2))
dist = ((R - rho)**2 + z**2)
Bz = 1 / np.sqrt((R + rho)**2 + z**2) * (
K
+ E * (R**2 - rho**2 - z**2) / dist
)
Brho = z / (rho*np.sqrt((R + rho)**2 + z**2)) * (
-K
+ E * (R**2 + rho**2 + z**2)/ dist
)
# On the axis of the coil we get a divided by zero here. This returns a
# NaN, where the field is actually zero :
Brho[dist == 0] = 0
Brho[rho == 0] = 0
Bz[dist == 0] = 0
B = np.c_[np.cos(theta)*Brho, np.sin(theta)*Brho, Bz ]
# Rotate the field back in the lab's frame
B = np.dot(B, trans)
return B
##############################################################################
# The grid of points on which we want to evaluate the field
X, Y, Z = np.mgrid[-0.15:0.15:31j, -0.15:0.15:31j, -0.15:0.15:31j]
# Avoid rounding issues :
f = 1e4 # this gives the precision we are interested in:
X = np.round(X * f) / f
Y = np.round(Y * f) / f
Z = np.round(Z * f) / f
# The (x, y, z) position vector
r = np.c_[np.ravel(X), np.ravel(Y), np.ravel(Z)]
##############################################################################
# The coil positions
# The center of the coil
r0 = np.r_[0, 0, 0.1]
# The normal to the coils
n = np.r_[0, 0, 1]
# The radius
R = 0.1
# Add the mirror image of this coils relatively to the xy plane :
r0 = np.vstack((r0, -r0 ))
R = np.r_[R, R]
n = np.vstack((n, n)) # Helmoltz like configuration
##############################################################################
# Calculate field
# First initialize a container matrix for the field vector :
B = np.zeros_like(r)
# Then loop through the different coils and sum the fields :
for this_n, this_r0, this_R in zip(n, r0, R):
this_n = np.array(this_n)
this_r0 = np.array(this_r0)
this_R = np.array(this_R)
B += B_field(r, this_n, this_r0, this_R)
|
[
"numpy.sqrt",
"numpy.cross",
"scipy.special.ellipe",
"numpy.sin",
"numpy.zeros_like",
"numpy.square",
"scipy.special.ellipk",
"numpy.array",
"numpy.dot",
"numpy.vstack",
"numpy.cos",
"numpy.ravel",
"scipy.linalg.inv",
"numpy.round",
"numpy.arctan"
] |
[((3661, 3681), 'numpy.vstack', 'np.vstack', (['(r0, -r0)'], {}), '((r0, -r0))\n', (3670, 3681), True, 'import numpy as np\n'), ((3703, 3720), 'numpy.vstack', 'np.vstack', (['(n, n)'], {}), '((n, n))\n', (3712, 3720), True, 'import numpy as np\n'), ((3918, 3934), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (3931, 3934), True, 'import numpy as np\n'), ((727, 741), 'numpy.cross', 'np.cross', (['n', 'l'], {}), '(n, l)\n', (735, 741), True, 'import numpy as np\n'), ((1683, 1703), 'numpy.vstack', 'np.vstack', (['(l, m, n)'], {}), '((l, m, n))\n', (1692, 1703), True, 'import numpy as np\n'), ((1775, 1792), 'scipy.linalg.inv', 'linalg.inv', (['trans'], {}), '(trans)\n', (1785, 1792), False, 'from scipy import special, linalg\n'), ((1855, 1875), 'numpy.dot', 'np.dot', (['r', 'inv_trans'], {}), '(r, inv_trans)\n', (1861, 1875), True, 'import numpy as np\n'), ((2042, 2066), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2049, 2066), True, 'import numpy as np\n'), ((2075, 2091), 'numpy.arctan', 'np.arctan', (['(x / y)'], {}), '(x / y)\n', (2084, 2091), True, 'import numpy as np\n'), ((2173, 2228), 'scipy.special.ellipe', 'special.ellipe', (['(4 * R * rho / ((R + rho) ** 2 + z ** 2))'], {}), '(4 * R * rho / ((R + rho) ** 2 + z ** 2))\n', (2187, 2228), False, 'from scipy import special, linalg\n'), ((2234, 2289), 'scipy.special.ellipk', 'special.ellipk', (['(4 * R * rho / ((R + rho) ** 2 + z ** 2))'], {}), '(4 * R * rho / ((R + rho) ** 2 + z ** 2))\n', (2248, 2289), False, 'from scipy import special, linalg\n'), ((2894, 2910), 'numpy.dot', 'np.dot', (['B', 'trans'], {}), '(B, trans)\n', (2900, 2910), True, 'import numpy as np\n'), ((3220, 3235), 'numpy.round', 'np.round', (['(X * f)'], {}), '(X * f)\n', (3228, 3235), True, 'import numpy as np\n'), ((3244, 3259), 'numpy.round', 'np.round', (['(Y * f)'], {}), '(Y * f)\n', (3252, 3259), True, 'import numpy as np\n'), ((3268, 3283), 'numpy.round', 'np.round', (['(Z * f)'], {}), '(Z * f)\n', (3276, 3283), True, 'import numpy as np\n'), ((4056, 4072), 'numpy.array', 'np.array', (['this_n'], {}), '(this_n)\n', (4064, 4072), True, 'import numpy as np\n'), ((4087, 4104), 'numpy.array', 'np.array', (['this_r0'], {}), '(this_r0)\n', (4095, 4104), True, 'import numpy as np\n'), ((4119, 4135), 'numpy.array', 'np.array', (['this_R'], {}), '(this_R)\n', (4127, 4135), True, 'import numpy as np\n'), ((3331, 3342), 'numpy.ravel', 'np.ravel', (['X'], {}), '(X)\n', (3339, 3342), True, 'import numpy as np\n'), ((3344, 3355), 'numpy.ravel', 'np.ravel', (['Y'], {}), '(Y)\n', (3352, 3355), True, 'import numpy as np\n'), ((3357, 3368), 'numpy.ravel', 'np.ravel', (['Z'], {}), '(Z)\n', (3365, 3368), True, 'import numpy as np\n'), ((2333, 2365), 'numpy.sqrt', 'np.sqrt', (['((R + rho) ** 2 + z ** 2)'], {}), '((R + rho) ** 2 + z ** 2)\n', (2340, 2365), True, 'import numpy as np\n'), ((2470, 2502), 'numpy.sqrt', 'np.sqrt', (['((R + rho) ** 2 + z ** 2)'], {}), '((R + rho) ** 2 + z ** 2)\n', (2477, 2502), True, 'import numpy as np\n'), ((2793, 2806), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2799, 2806), True, 'import numpy as np\n'), ((2813, 2826), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2819, 2826), True, 'import numpy as np\n'), ((436, 448), 'numpy.square', 'np.square', (['n'], {}), '(n)\n', (445, 448), True, 'import numpy as np\n'), ((692, 704), 'numpy.square', 'np.square', (['l'], {}), '(l)\n', (701, 704), True, 'import numpy as np\n')]
|
import itertools
import numpy as np
import torch
from ..math import complex_mult, conj_complex_mult
def get_interpob(model):
"""Retrieves the interpolation dictionary from model.
Different nufft objects use different interpolation objects. This function
only extracts the minimum amount necessary for sparse matrix precomputation.
Args:
model (KbNufft-type object): A KbNufft object with attributes for
forming a KbNufft interpolation dictionary.
Returns:
interpob (dictionary): A dictionary with interpolation parameters.
"""
interpob = dict()
interpob['table'] = []
for i in range(len(model.table)):
interpob['table'].append(getattr(model, 'table_tensor_' + str(i)))
interpob['grid_size'] = model.grid_size_tensor
interpob['numpoints'] = model.numpoints_tensor
interpob['table_oversamp'] = model.table_oversamp_tensor
return interpob
def compute_forw_mat(dims, table, numpoints, Jlist, L, tm):
"""Compute a forward Kaiser-Bessel interpolation sparse matrix.
Args:
dims (tensor): A list of sizes of each dimension.
table (tensor): A list of interpolation tables.
numpoints (tensor): A list of numbers of nearest neighbors for each dimension.
Jlist (tensor): A list of nearest neighbor configurations.
L (tensor): A list of table sizes for each dimension.
tm (tensor): An array of normalized frequency locations.
Returns:
(coef_mat_real, coef_mat_imag) (tuple): A 2-length tuple with a sparse
interpolation matrix in each element. The first matrix has the real
coefficients; the second has the imaginary.
"""
dtype = table[0].dtype
device = table[0].device
int_type = torch.long
M = tm.shape[1]
ndims = tm.shape[0]
nJ = Jlist.shape[1]
# center of tables
centers = torch.floor(numpoints * L / 2).to(dtype=int_type)
# offset from k-space to first coef loc
kofflist = 1 + torch.floor(tm - numpoints.unsqueeze(1) / 2.0)
# do a bit of type management - ints for faster index comps
curgridind = torch.zeros(tm.shape, dtype=dtype, device=device)
curdistind = torch.zeros(tm.shape, dtype=int_type, device=device)
arr_ind = torch.zeros((M,), dtype=int_type, device=device)
coef = torch.ones((2, M), dtype=dtype, device=device)
dims = dims.to(dtype=int_type)
kofflist = kofflist.to(dtype=int_type)
Jlist = Jlist.to(dtype=int_type)
coef_mat_real = torch.sparse.FloatTensor(
tm.shape[-1], torch.prod(dims)).to(dtype=dtype, device=device)
coef_mat_imag = torch.sparse.FloatTensor(
tm.shape[-1], torch.prod(dims)).to(dtype=dtype, device=device)
# loop over offsets and take advantage of broadcasting
for Jind in range(nJ):
curgridind = (kofflist + Jlist[:, Jind].unsqueeze(1)).to(dtype)
curdistind = torch.round(
(tm - curgridind) * L.unsqueeze(1)).to(dtype=int_type)
curgridind = curgridind.to(int_type)
arr_ind = torch.zeros((M,), dtype=int_type, device=device)
coef = torch.stack((
torch.ones(M, dtype=dtype, device=device),
torch.zeros(M, dtype=dtype, device=device)
))
for d in range(ndims): # spatial dimension
coef = complex_mult(
coef,
table[d][:, curdistind[d, :] + centers[d]],
dim=0
)
arr_ind = arr_ind + torch.remainder(curgridind[d, :], dims[d]).view(-1) * \
torch.prod(dims[d + 1:])
sparse_coords = torch.stack(
(
torch.arange(
arr_ind.shape[0],
dtype=arr_ind.dtype,
device=arr_ind.device
),
arr_ind
)
)
coef_mat_real = coef_mat_real + torch.sparse.FloatTensor(
sparse_coords,
coef[0],
torch.Size((arr_ind.shape[0], torch.prod(dims)))
)
coef_mat_imag = coef_mat_imag + torch.sparse.FloatTensor(
sparse_coords,
coef[1],
torch.Size((arr_ind.shape[0], torch.prod(dims)))
)
return coef_mat_real, coef_mat_imag
def precomp_sparse_mats(om, model):
"""Precompute sparse interpolation matrices.
Args:
om (tensor): The k-space trajectory in radians/voxel.
model (KbNufft-type object): A KbNufft type object with attributes for
creating a KbNufft interpolation object.
Returns:
(coef_real_mats, coef_imag_mats) (tuple): A 2-length tuple with lists of sparse
interpolation matrices in each element. The first matrix has the real
coefficient matrices; the second has the imaginary.
"""
interpob = get_interpob(model)
dtype = interpob['table'][0].dtype
device = interpob['table'][0].device
# extract interpolation params and match device and dtype to input
table = interpob['table']
grid_size = interpob['grid_size']
numpoints = interpob['numpoints']
table_oversamp = interpob['table_oversamp']
ndims = om.shape[1]
M = om.shape[2]
# convert to normalized freq locs
tm = torch.zeros(size=om.shape, dtype=dtype, device=device)
Jgen = []
for i in range(ndims):
gam = (2 * np.pi / grid_size[i])
tm[:, i, :] = om[:, i, :] / gam
Jgen.append(range(np.array(numpoints[i].cpu(), dtype=np.int)))
# build an iterator for going over all J values
Jgen = list(itertools.product(*Jgen))
coef_real_mats = []
coef_imag_mats = []
for norm_traj in tm:
coef_mat_real, coef_mat_imag = compute_forw_mat(
grid_size.to(dtype=dtype, device=device),
table,
numpoints,
torch.tensor(
np.transpose(np.array(Jgen)),
dtype=dtype,
device=device
),
table_oversamp,
norm_traj
)
coef_real_mats.append(coef_mat_real)
coef_imag_mats.append(coef_mat_imag)
return coef_real_mats, coef_imag_mats
|
[
"torch.floor",
"itertools.product",
"torch.prod",
"numpy.array",
"torch.remainder",
"torch.zeros",
"torch.arange",
"torch.ones"
] |
[((2134, 2183), 'torch.zeros', 'torch.zeros', (['tm.shape'], {'dtype': 'dtype', 'device': 'device'}), '(tm.shape, dtype=dtype, device=device)\n', (2145, 2183), False, 'import torch\n'), ((2201, 2253), 'torch.zeros', 'torch.zeros', (['tm.shape'], {'dtype': 'int_type', 'device': 'device'}), '(tm.shape, dtype=int_type, device=device)\n', (2212, 2253), False, 'import torch\n'), ((2268, 2316), 'torch.zeros', 'torch.zeros', (['(M,)'], {'dtype': 'int_type', 'device': 'device'}), '((M,), dtype=int_type, device=device)\n', (2279, 2316), False, 'import torch\n'), ((2328, 2374), 'torch.ones', 'torch.ones', (['(2, M)'], {'dtype': 'dtype', 'device': 'device'}), '((2, M), dtype=dtype, device=device)\n', (2338, 2374), False, 'import torch\n'), ((5244, 5298), 'torch.zeros', 'torch.zeros', ([], {'size': 'om.shape', 'dtype': 'dtype', 'device': 'device'}), '(size=om.shape, dtype=dtype, device=device)\n', (5255, 5298), False, 'import torch\n'), ((3049, 3097), 'torch.zeros', 'torch.zeros', (['(M,)'], {'dtype': 'int_type', 'device': 'device'}), '((M,), dtype=int_type, device=device)\n', (3060, 3097), False, 'import torch\n'), ((5561, 5585), 'itertools.product', 'itertools.product', (['*Jgen'], {}), '(*Jgen)\n', (5578, 5585), False, 'import itertools\n'), ((1892, 1922), 'torch.floor', 'torch.floor', (['(numpoints * L / 2)'], {}), '(numpoints * L / 2)\n', (1903, 1922), False, 'import torch\n'), ((2559, 2575), 'torch.prod', 'torch.prod', (['dims'], {}), '(dims)\n', (2569, 2575), False, 'import torch\n'), ((2676, 2692), 'torch.prod', 'torch.prod', (['dims'], {}), '(dims)\n', (2686, 2692), False, 'import torch\n'), ((3139, 3180), 'torch.ones', 'torch.ones', (['M'], {'dtype': 'dtype', 'device': 'device'}), '(M, dtype=dtype, device=device)\n', (3149, 3180), False, 'import torch\n'), ((3194, 3236), 'torch.zeros', 'torch.zeros', (['M'], {'dtype': 'dtype', 'device': 'device'}), '(M, dtype=dtype, device=device)\n', (3205, 3236), False, 'import torch\n'), ((3649, 3723), 'torch.arange', 'torch.arange', (['arr_ind.shape[0]'], {'dtype': 'arr_ind.dtype', 'device': 'arr_ind.device'}), '(arr_ind.shape[0], dtype=arr_ind.dtype, device=arr_ind.device)\n', (3661, 3723), False, 'import torch\n'), ((3556, 3580), 'torch.prod', 'torch.prod', (['dims[d + 1:]'], {}), '(dims[d + 1:])\n', (3566, 3580), False, 'import torch\n'), ((5869, 5883), 'numpy.array', 'np.array', (['Jgen'], {}), '(Jgen)\n', (5877, 5883), True, 'import numpy as np\n'), ((4007, 4023), 'torch.prod', 'torch.prod', (['dims'], {}), '(dims)\n', (4017, 4023), False, 'import torch\n'), ((4192, 4208), 'torch.prod', 'torch.prod', (['dims'], {}), '(dims)\n', (4202, 4208), False, 'import torch\n'), ((3484, 3526), 'torch.remainder', 'torch.remainder', (['curgridind[d, :]', 'dims[d]'], {}), '(curgridind[d, :], dims[d])\n', (3499, 3526), False, 'import torch\n')]
|
import torch
from torch import optim
from torchvision import datasets, transforms
from vision import LeNet, CNN, weights_init
from PIL import Image
from utils import label_to_onehot, cross_entropy_for_onehot
import torch.nn.functional as F
import numpy as np
from skimage.metrics import structural_similarity as ssim
import matplotlib.pyplot as plt
import sys
tomer_path = r"C:\Users\tomer\Documents\Final_project_git\federated_learning_uveqfed_dlg\Federated-Learning-Natalie"
elad_path = r"/Users/elad.sofer/src/Engineering Project/federated_learning_uveqfed_dlg/Federated-Learning-Natalie"
sys.path.append(elad_path)
sys.path.append(tomer_path)
from federated_utils import PQclass
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = "cpu"
# if torch.cuda.is_available():
# device = "cuda"
def add_uveqFed(original_dy_dx, epsilon, bit_rate, args):
noised_dy_dx = []
args.epsilon = epsilon
args.R = bit_rate
noiser = PQclass(args)
for g in original_dy_dx:
if args.attack=='JOPEQ':
output, dither = noiser(g)
noised_dy_dx.append(output - dither)
# output = noiser.apply_quantization(g)
# noised_dy_dx.append(output)
elif args.attack=="quantization":
# quantization only
output = noiser.apply_quantization(g)
noised_dy_dx.append(output)
else: # ppn only
output = noiser.apply_privacy_noise(g)
noised_dy_dx.append(output)
return noised_dy_dx
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
class dlg_cls():
def __init__(self,model=None, train_loader=None, test_loader=None, args=None, noise_func = lambda x, y, z, l: x):
self.dst = getattr(datasets, args.dataset)("~/.torch", download=True)
self.tp = transforms.ToTensor()
self.tt = transforms.ToPILImage()
self.args = args
self.model = model
self.train_loader = train_loader
self.test_loader = test_loader
self.noise_func = noise_func
def __call__(self, img_index, seed=1234,learning_epoches=0,read_grads= -1, epsilon=0, bit_rate=1,num_of_iterations=200):
self.load_image(img_index)
self.config_model(None,seed)
self.train_model(learning_epoches)
if (read_grads == -1):
self.compute_gradients()
else:
self.load_model_and_gradients(read_grads)
self.apply_noise(epsilon,bit_rate)
return self.dlg(num_of_iterations=num_of_iterations)
def load_image(self, img_index):
self.img_index = img_index
self.gt_data = self.tp(self.dst[img_index][0]).to(device)
if len(self.args.image) > 1:
self.gt_data = Image.open(self.args.image)
self.gt_data = self.tp(self.gt_data).to(device)
self.gt_data = self.gt_data.view(1, *self.gt_data.size())
self.gt_label = torch.Tensor([self.dst[img_index][1]]).long().to(device)
self.gt_label = self.gt_label.view(1, )
self.gt_onehot_label = label_to_onehot(self.gt_label)
return self.dst[self.img_index][0]
def config_model(self,model=None,seed=1234):
if model == None:
self.model = LeNet().to(device)
else:
self.model = model
torch.manual_seed(seed)
self.model.apply(weights_init)
self.model.to(device)
self.criterion = cross_entropy_for_onehot
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
def train_model(self,learning_epoches=0):
if (learning_epoches > 0):
self.model.train_nn(
train_loader=self.train_loader,
optimizer=self.optimizer,
criterion=self.criterion,
epoch_num=learning_epoches,
test_loader=self.test_loader)
return self.model.test_nn(self.test_loader, self.criterion)
def compute_gradients(self):
self.pred = self.model(self.gt_data)
y = self.criterion(self.pred, self.gt_onehot_label)
self.dy_dx = torch.autograd.grad(y, self.model.parameters())
self.original_dy_dx = self.dy_dx
return self.dy_dx
def load_model_and_gradients(self,read_grads):
grad_checkpoint_address = "./fed-ler_checkpoints/grad/checkpoint{0}_{1}.pk".format(model_number, read_grads)
global_checkpoint_address = "./fed-ler_checkpoints/global/checkpoint{0}_{1}.pk".format(model_number, read_grads)
fed_ler_grad_state_dict = torch.load(grad_checkpoint_address)
global_model = torch.load(global_checkpoint_address)
self.model = global_model
# luckily the state dict is saved in exactly the same order as the gradients are so we can easily transfer them
self.dy_dx = tuple([fed_ler_grad_state_dict[key] for key in fed_ler_grad_state_dict.keys()])
return self.dy_dx
def apply_noise(self, epsilon, bit_rate, noise_func = None, args = None):
if noise_func != None:
self.noise_func = noise_func
if args != None:
self.args = args
if (epsilon > 0 or self.args.attack=="quantization"):
self.original_dy_dx = self.noise_func(list((_.detach().clone() for _ in self.dy_dx)), epsilon, bit_rate, self.args)
else:
self.original_dy_dx = self.dy_dx
def dlg(self,num_of_iterations = 200):
# generate dummy data and label
dummy_data = torch.randn(self.gt_data.size()).to(device).requires_grad_(True)
dummy_label = torch.randn(self.gt_onehot_label.size()).to(device).requires_grad_(True)
# plt.figure()
# plt.imshow(tt(dummy_data[0].cpu()))
optimizer = torch.optim.LBFGS([dummy_data, dummy_label])
# history = []
current_loss = torch.Tensor([1])
iters = 0
MSE=0
SSIM=0
# while (iters < num_of_iterations):
while (current_loss.item() > 0.00001 and iters < num_of_iterations):
def closure():
optimizer.zero_grad()
dummy_pred = self.model(dummy_data)
dummy_onehot_label = F.softmax(dummy_label, dim=-1)
dummy_loss = self.criterion(dummy_pred, dummy_onehot_label)
dummy_dy_dx = torch.autograd.grad(dummy_loss, self.model.parameters(), create_graph=True)
grad_diff = 0
for gx, gy in zip(dummy_dy_dx, self.original_dy_dx):
grad_diff += ((gx - gy) ** 2).sum()
grad_diff.backward()
return grad_diff
optimizer.step(closure)
if iters % 10 == 0:
current_loss = closure()
reconstructedIm = np.asarray(self.tt(dummy_data[0].cpu()))
RecImShape = reconstructedIm.shape
groundTruthIm = np.asarray(self.dst[self.img_index][0]).reshape((RecImShape[0], RecImShape[1], RecImShape[2]))
MSE = mse(reconstructedIm,groundTruthIm)
SSIM = ssim(reconstructedIm,groundTruthIm,channel_axis=2, multichannel=True)
print(iters, "%.4f" % current_loss.item()," MSE {0:.4f}, SSIM {1:.4f}".format(MSE,SSIM))
# history.append(self.tt(dummy_data[0].cpu()))
iters = iters + 1
self.final_image = self.tt(dummy_data[0].cpu())
return current_loss.item(), MSE, SSIM
def run_dlg(img_index, model=None, train_loader=None, test_loader=None, noise_func = lambda x, y, z: x, learning_epoches = 0, epsilon=0.1, bit_rate=1,read_grads=-1,model_number=0):
gt_data = tp(dst[img_index][0]).to(device)
if len(args.image) > 1:
gt_data = Image.open(args.image)
gt_data = tp(gt_data).to(device)
gt_data = gt_data.view(1, *gt_data.size())
gt_label = torch.Tensor([dst[img_index][1]]).long().to(device)
gt_label = gt_label.view(1, )
gt_onehot_label = label_to_onehot(gt_label)
#################### Model Configuration ####################
model = LeNet().to(device)
torch.manual_seed(1234)
model.apply(weights_init)
criterion = cross_entropy_for_onehot
optimizer = optim.Adam(model.parameters(), lr=0.001)
if (read_grads == -1):# run the original images
#################### Train & Test ####################
if (learning_epoches >0):
model.train_nn(train_loader=train_loader, optimizer=optimizer, criterion=criterion, epoch_num=learning_epoches,test_loader=test_loader)
model.test_nn(test_loader,criterion)
######################################################
# compute original gradient
pred = model(gt_data)
y = criterion(pred, gt_onehot_label)
dy_dx = torch.autograd.grad(y, model.parameters())
else: # get the images from the fed-learn
grad_checkpoint_address = "./fed-ler_checkpoints/grad/checkpoint{0}_{1}.pk".format(model_number,read_grads)
global_checkpoint_address = "./fed-ler_checkpoints/global/checkpoint{0}_{1}.pk".format(model_number,read_grads)
fed_ler_grad_state_dict = torch.load(grad_checkpoint_address)
global_model = torch.load(global_checkpoint_address)
model =global_model
# luckily the state dict is saved in exactly the same order as the gradients are so we can easily transfer them
dy_dx = tuple([fed_ler_grad_state_dict[key] for key in fed_ler_grad_state_dict.keys()])
#################### adding noise ####################
if (epsilon > 0):
original_dy_dx = noise_func(list((_.detach().clone() for _ in dy_dx)), epsilon, bit_rate)
else:
original_dy_dx = dy_dx
#### adding noise!! ####
#original_dy_dx = [w_layer + torch.normal(mean = 0, std= 0.01,size = w_layer.shape) for w_layer in original_dy_dx]
#original_dy_dx = [w_layer+np.random.laplace(0,epsilon,w_layer.shape) for w_layer in original_dy_dx]
######################################################
# generate dummy data and label
dummy_data = torch.randn(gt_data.size()).to(device).requires_grad_(True)
dummy_label = torch.randn(gt_onehot_label.size()).to(device).requires_grad_(True)
plt.figure()
plt.imshow(tt(dummy_data[0].cpu()))
# plt.imshow(tt(dummy_data[0].cpu()))
optimizer = torch.optim.LBFGS([dummy_data, dummy_label])
history = []
current_loss = torch.Tensor([1])
iters = 0
#for iters in range(num_of_iterations):
# while (iters < num_of_iterations):
while (current_loss.item()>0.00001 and iters < num_of_iterations):
def closure():
optimizer.zero_grad()
dummy_pred = model(dummy_data)
dummy_onehot_label = F.softmax(dummy_label, dim=-1)
dummy_loss = criterion(dummy_pred, dummy_onehot_label)
dummy_dy_dx = torch.autograd.grad(dummy_loss, model.parameters(), create_graph=True)
grad_diff = 0
for gx, gy in zip(dummy_dy_dx, original_dy_dx):
grad_diff += ((gx - gy) ** 2).sum()
grad_diff.backward()
return grad_diff
optimizer.step(closure)
if iters % 10 == 0:
current_loss = closure()
print(iters, "%.4f" % current_loss.item())
history.append(tt(dummy_data[0].cpu()))
iters = iters + 1
# plt.figure()
# plt.subplot(1, 2, 1)
# plt.imshow(tt(dummy_data[0].cpu()))
# plt.axis('off')
#
# plt.subplot(1, 2, 2)
# plt.imshow(dst[img_index][0])
# plt.axis('off')
# plt.figure(figsize=(12, 8))
# for i in range(round(iters / 10)):
# plt.subplot(int(np.ceil(iters / 100)), 10, i + 1)
# plt.imshow(history[i])
# plt.title("iter=%d" % (i * 10))
# plt.axis('off')
return current_loss.item()
# l = []
# for i in range(10):
# l.append(test_image(img_index,learning_iterations=500+50*i))
# print(l)
#plt.hist([7 if (x>5) else x for x in l])
# plt.plot(l)
|
[
"torch.manual_seed",
"PIL.Image.open",
"utils.label_to_onehot",
"torchvision.transforms.ToPILImage",
"skimage.metrics.structural_similarity",
"vision.LeNet",
"torch.load",
"torch.Tensor",
"numpy.asarray",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"federated_utils.PQclass",
"torch.optim.LBFGS",
"torchvision.transforms.ToTensor",
"sys.path.append",
"torch.nn.functional.softmax"
] |
[((593, 619), 'sys.path.append', 'sys.path.append', (['elad_path'], {}), '(elad_path)\n', (608, 619), False, 'import sys\n'), ((620, 647), 'sys.path.append', 'sys.path.append', (['tomer_path'], {}), '(tomer_path)\n', (635, 647), False, 'import sys\n'), ((970, 983), 'federated_utils.PQclass', 'PQclass', (['args'], {}), '(args)\n', (977, 983), False, 'from federated_utils import PQclass\n'), ((8284, 8309), 'utils.label_to_onehot', 'label_to_onehot', (['gt_label'], {}), '(gt_label)\n', (8299, 8309), False, 'from utils import label_to_onehot, cross_entropy_for_onehot\n'), ((8414, 8437), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (8431, 8437), False, 'import torch\n'), ((10542, 10554), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10552, 10554), True, 'import matplotlib.pyplot as plt\n'), ((10654, 10698), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[dummy_data, dummy_label]'], {}), '([dummy_data, dummy_label])\n', (10671, 10698), False, 'import torch\n'), ((10737, 10754), 'torch.Tensor', 'torch.Tensor', (['[1]'], {}), '([1])\n', (10749, 10754), False, 'import torch\n'), ((719, 744), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (742, 744), False, 'import torch\n'), ((2196, 2217), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2215, 2217), False, 'from torchvision import datasets, transforms\n'), ((2236, 2259), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (2257, 2259), False, 'from torchvision import datasets, transforms\n'), ((3428, 3458), 'utils.label_to_onehot', 'label_to_onehot', (['self.gt_label'], {}), '(self.gt_label)\n', (3443, 3458), False, 'from utils import label_to_onehot, cross_entropy_for_onehot\n'), ((3675, 3698), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3692, 3698), False, 'import torch\n'), ((4897, 4932), 'torch.load', 'torch.load', (['grad_checkpoint_address'], {}), '(grad_checkpoint_address)\n', (4907, 4932), False, 'import torch\n'), ((4957, 4994), 'torch.load', 'torch.load', (['global_checkpoint_address'], {}), '(global_checkpoint_address)\n', (4967, 4994), False, 'import torch\n'), ((6083, 6127), 'torch.optim.LBFGS', 'torch.optim.LBFGS', (['[dummy_data, dummy_label]'], {}), '([dummy_data, dummy_label])\n', (6100, 6127), False, 'import torch\n'), ((6175, 6192), 'torch.Tensor', 'torch.Tensor', (['[1]'], {}), '([1])\n', (6187, 6192), False, 'import torch\n'), ((8049, 8071), 'PIL.Image.open', 'Image.open', (['args.image'], {}), '(args.image)\n', (8059, 8071), False, 'from PIL import Image\n'), ((9462, 9497), 'torch.load', 'torch.load', (['grad_checkpoint_address'], {}), '(grad_checkpoint_address)\n', (9472, 9497), False, 'import torch\n'), ((9523, 9560), 'torch.load', 'torch.load', (['global_checkpoint_address'], {}), '(global_checkpoint_address)\n', (9533, 9560), False, 'import torch\n'), ((3114, 3141), 'PIL.Image.open', 'Image.open', (['self.args.image'], {}), '(self.args.image)\n', (3124, 3141), False, 'from PIL import Image\n'), ((8390, 8397), 'vision.LeNet', 'LeNet', ([], {}), '()\n', (8395, 8397), False, 'from vision import LeNet, CNN, weights_init\n'), ((11060, 11090), 'torch.nn.functional.softmax', 'F.softmax', (['dummy_label'], {'dim': '(-1)'}), '(dummy_label, dim=-1)\n', (11069, 11090), True, 'import torch.nn.functional as F\n'), ((6518, 6548), 'torch.nn.functional.softmax', 'F.softmax', (['dummy_label'], {'dim': '(-1)'}), '(dummy_label, dim=-1)\n', (6527, 6548), True, 'import torch.nn.functional as F\n'), ((7401, 7472), 'skimage.metrics.structural_similarity', 'ssim', (['reconstructedIm', 'groundTruthIm'], {'channel_axis': '(2)', 'multichannel': '(True)'}), '(reconstructedIm, groundTruthIm, channel_axis=2, multichannel=True)\n', (7405, 7472), True, 'from skimage.metrics import structural_similarity as ssim\n'), ((3603, 3610), 'vision.LeNet', 'LeNet', ([], {}), '()\n', (3608, 3610), False, 'from vision import LeNet, CNN, weights_init\n'), ((8176, 8209), 'torch.Tensor', 'torch.Tensor', (['[dst[img_index][1]]'], {}), '([dst[img_index][1]])\n', (8188, 8209), False, 'import torch\n'), ((3292, 3330), 'torch.Tensor', 'torch.Tensor', (['[self.dst[img_index][1]]'], {}), '([self.dst[img_index][1]])\n', (3304, 3330), False, 'import torch\n'), ((7226, 7265), 'numpy.asarray', 'np.asarray', (['self.dst[self.img_index][0]'], {}), '(self.dst[self.img_index][0])\n', (7236, 7265), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
from model_fn.model_fn_nlp.util_nlp.attention import MultiHeadAttention
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
angle_rads = get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn1 = tf.keras.layers.Dense(dff, activation='relu') # (batch_size, seq_len, dff)
self.ffn2 = tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, inputs, training):
x = inputs['x']
mask = inputs['mask']
attn_output, _ = self.mha({'q': x, 'k': x, 'v': x, 'mask': mask}) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output_pre = self.ffn1(out1) # (batch_size, input_seq_len, dff)
ffn_output = self.ffn2(ffn_output_pre) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2
class Encoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding,
self.d_model)
self.enc_layers = [EncoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, inputs, training):
x = inputs['x']
mask = inputs['mask']
seq_len = tf.shape(x)[1]
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
# x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i]({'x': x, 'mask': mask}, training)
return x # (batch_size, input_seq_len, d_model)
class AlbertEncoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, emb_dim, num_heads, dff, input_vocab_size,
maximum_position_encoding, rate=0.1):
super(AlbertEncoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, emb_dim)
self.pos_encoding = positional_encoding(maximum_position_encoding,
emb_dim)
self.projection_layer = tf.keras.layers.Dense(d_model)
self.shared_enc_layer = EncoderLayer(d_model, num_heads, dff, rate)
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, inputs, training):
x = inputs['x']
mask = inputs['mask']
seq_len = tf.shape(x)[1]
# adding embedding and position encoding.
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x= self.projection_layer(x)
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.shared_enc_layer({'x': x, 'mask': mask}, training)
return x
class DecoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(d_model, num_heads)
self.mha2 = MultiHeadAttention(d_model, num_heads)
self.ffn1 = tf.keras.layers.Dense(dff, activation='relu') # (batch_size, seq_len, dff)
self.ffn2 = tf.keras.layers.Dense(d_model) # (batch_size, seq_len, d_model)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, inputs, training):
x = inputs['x']
enc_output = inputs['enc_output']
look_ahead_mask = inputs['look_ahead_mask']
padding_mask = inputs['padding_mask']
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(
{'q': x, 'k': x, 'v': x, 'mask': look_ahead_mask}) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2({'q': out1, 'k': enc_output, 'v': enc_output,
'mask': padding_mask}) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output_pre = self.ffn1(out2) # (batch_size, input_seq_len, dff)
ffn_output = self.ffn2(ffn_output_pre) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
class Decoder(tf.keras.layers.Layer):
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, inputs, training):
x = inputs['tar']
enc_output = inputs['enc_output']
look_ahead_mask = inputs['look_ahead_mask']
padding_mask = inputs['padding_mask']
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](
{'x': x, 'enc_output': enc_output, 'look_ahead_mask': look_ahead_mask, 'padding_mask': padding_mask},
training)
attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1
attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
|
[
"tensorflow.shape",
"numpy.arange",
"model_fn.model_fn_nlp.util_nlp.attention.MultiHeadAttention",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"numpy.cos",
"numpy.sin",
"tensorflow.cast",
"numpy.float32",
"tensorflow.keras.layers.LayerNormalization"
] |
[((541, 568), 'numpy.sin', 'np.sin', (['angle_rads[:, 0::2]'], {}), '(angle_rads[:, 0::2])\n', (547, 568), True, 'import numpy as np\n'), ((646, 673), 'numpy.cos', 'np.cos', (['angle_rads[:, 1::2]'], {}), '(angle_rads[:, 1::2])\n', (652, 673), True, 'import numpy as np\n'), ((734, 773), 'tensorflow.cast', 'tf.cast', (['pos_encoding'], {'dtype': 'tf.float32'}), '(pos_encoding, dtype=tf.float32)\n', (741, 773), True, 'import tensorflow as tf\n'), ((943, 981), 'model_fn.model_fn_nlp.util_nlp.attention.MultiHeadAttention', 'MultiHeadAttention', (['d_model', 'num_heads'], {}), '(d_model, num_heads)\n', (961, 981), False, 'from model_fn.model_fn_nlp.util_nlp.attention import MultiHeadAttention\n'), ((1002, 1047), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['dff'], {'activation': '"""relu"""'}), "(dff, activation='relu')\n", (1023, 1047), True, 'import tensorflow as tf\n'), ((1098, 1128), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (1119, 1128), True, 'import tensorflow as tf\n'), ((1190, 1239), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (1224, 1239), True, 'import tensorflow as tf\n'), ((1265, 1314), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (1299, 1314), True, 'import tensorflow as tf\n'), ((1339, 1368), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (1362, 1368), True, 'import tensorflow as tf\n'), ((1393, 1422), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (1416, 1422), True, 'import tensorflow as tf\n'), ((2437, 2489), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'd_model'], {}), '(input_vocab_size, d_model)\n', (2462, 2489), True, 'import tensorflow as tf\n'), ((2778, 2807), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (2801, 2807), True, 'import tensorflow as tf\n'), ((3710, 3762), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['input_vocab_size', 'emb_dim'], {}), '(input_vocab_size, emb_dim)\n', (3735, 3762), True, 'import tensorflow as tf\n'), ((3927, 3957), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (3948, 3957), True, 'import tensorflow as tf\n'), ((4059, 4088), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (4082, 4088), True, 'import tensorflow as tf\n'), ((4831, 4869), 'model_fn.model_fn_nlp.util_nlp.attention.MultiHeadAttention', 'MultiHeadAttention', (['d_model', 'num_heads'], {}), '(d_model, num_heads)\n', (4849, 4869), False, 'from model_fn.model_fn_nlp.util_nlp.attention import MultiHeadAttention\n'), ((4890, 4928), 'model_fn.model_fn_nlp.util_nlp.attention.MultiHeadAttention', 'MultiHeadAttention', (['d_model', 'num_heads'], {}), '(d_model, num_heads)\n', (4908, 4928), False, 'from model_fn.model_fn_nlp.util_nlp.attention import MultiHeadAttention\n'), ((4950, 4995), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['dff'], {'activation': '"""relu"""'}), "(dff, activation='relu')\n", (4971, 4995), True, 'import tensorflow as tf\n'), ((5046, 5076), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d_model'], {}), '(d_model)\n', (5067, 5076), True, 'import tensorflow as tf\n'), ((5138, 5187), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (5172, 5187), True, 'import tensorflow as tf\n'), ((5213, 5262), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (5247, 5262), True, 'import tensorflow as tf\n'), ((5288, 5337), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': '(1e-06)'}), '(epsilon=1e-06)\n', (5322, 5337), True, 'import tensorflow as tf\n'), ((5362, 5391), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (5385, 5391), True, 'import tensorflow as tf\n'), ((5416, 5445), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (5439, 5445), True, 'import tensorflow as tf\n'), ((5470, 5499), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (5493, 5499), True, 'import tensorflow as tf\n'), ((7063, 7116), 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['target_vocab_size', 'd_model'], {}), '(target_vocab_size, d_model)\n', (7088, 7116), True, 'import tensorflow as tf\n'), ((7351, 7380), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['rate'], {}), '(rate)\n', (7374, 7380), True, 'import tensorflow as tf\n'), ((329, 348), 'numpy.arange', 'np.arange', (['position'], {}), '(position)\n', (338, 348), True, 'import numpy as np\n'), ((393, 411), 'numpy.arange', 'np.arange', (['d_model'], {}), '(d_model)\n', (402, 411), True, 'import numpy as np\n'), ((2919, 2930), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2927, 2930), True, 'import tensorflow as tf\n'), ((3081, 3114), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (3088, 3114), True, 'import tensorflow as tf\n'), ((4200, 4211), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4208, 4211), True, 'import tensorflow as tf\n'), ((4362, 4395), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (4369, 4395), True, 'import tensorflow as tf\n'), ((7605, 7616), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (7613, 7616), True, 'import tensorflow as tf\n'), ((7749, 7782), 'tensorflow.cast', 'tf.cast', (['self.d_model', 'tf.float32'], {}), '(self.d_model, tf.float32)\n', (7756, 7782), True, 'import tensorflow as tf\n'), ((205, 224), 'numpy.float32', 'np.float32', (['d_model'], {}), '(d_model)\n', (215, 224), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
Example of training DCGAN on MNIST using PBT with Tune's function API.
"""
import ray
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
import argparse
import os
from filelock import FileLock
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import numpy as np
from common import beta1, MODEL_PATH
from common import demo_gan, get_data_loader, plot_images, train, weights_init
from common import Discriminator, Generator, Net
# __Train_begin__
def dcgan_train(config, checkpoint_dir=None):
step = 0
use_cuda = config.get("use_gpu") and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
netD = Discriminator().to(device)
netD.apply(weights_init)
netG = Generator().to(device)
netG.apply(weights_init)
criterion = nn.BCELoss()
optimizerD = optim.Adam(
netD.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999))
optimizerG = optim.Adam(
netG.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999))
with FileLock(os.path.expanduser("~/.data.lock")):
dataloader = get_data_loader()
if checkpoint_dir is not None:
path = os.path.join(checkpoint_dir, "checkpoint")
checkpoint = torch.load(path)
netD.load_state_dict(checkpoint["netDmodel"])
netG.load_state_dict(checkpoint["netGmodel"])
optimizerD.load_state_dict(checkpoint["optimD"])
optimizerG.load_state_dict(checkpoint["optimG"])
step = checkpoint["step"]
if "netD_lr" in config:
for param_group in optimizerD.param_groups:
param_group["lr"] = config["netD_lr"]
if "netG_lr" in config:
for param_group in optimizerG.param_groups:
param_group["lr"] = config["netG_lr"]
while True:
lossG, lossD, is_score = train(netD, netG, optimizerG, optimizerD,
criterion, dataloader, step, device,
config["mnist_model_ref"])
step += 1
with tune.checkpoint_dir(step=step) as checkpoint_dir:
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save({
"netDmodel": netD.state_dict(),
"netGmodel": netG.state_dict(),
"optimD": optimizerD.state_dict(),
"optimG": optimizerG.state_dict(),
"step": step,
}, path)
tune.report(lossg=lossG, lossd=lossD, is_score=is_score)
# __Train_end__
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
import urllib.request
# Download a pre-trained MNIST model for inception score calculation.
# This is a tiny model (<100kb).
if not os.path.exists(MODEL_PATH):
print("downloading model")
os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
urllib.request.urlretrieve(
"https://github.com/ray-project/ray/raw/master/python/ray/tune/"
"examples/pbt_dcgan_mnist/mnist_cnn.pt", MODEL_PATH)
dataloader = get_data_loader()
if not args.smoke_test:
plot_images(dataloader)
# __tune_begin__
# load the pretrained mnist classification model for inception_score
mnist_cnn = Net()
mnist_cnn.load_state_dict(torch.load(MODEL_PATH))
mnist_cnn.eval()
# Put the model in Ray object store.
mnist_model_ref = ray.put(mnist_cnn)
scheduler = PopulationBasedTraining(
perturbation_interval=5,
hyperparam_mutations={
# distribution for resampling
"netG_lr": lambda: np.random.uniform(1e-2, 1e-5),
"netD_lr": lambda: np.random.uniform(1e-2, 1e-5),
})
tune_iter = 5 if args.smoke_test else 300
analysis = tune.run(
dcgan_train,
name="pbt_dcgan_mnist",
scheduler=scheduler,
verbose=1,
stop={
"training_iteration": tune_iter,
},
metric="is_score",
mode="max",
num_samples=8,
config={
"netG_lr": tune.choice([0.0001, 0.0002, 0.0005]),
"netD_lr": tune.choice([0.0001, 0.0002, 0.0005]),
"mnist_model_ref": mnist_model_ref
})
# __tune_end__
# demo of the trained Generators
if not args.smoke_test:
all_trials = analysis.trials
checkpoint_paths = [
os.path.join(analysis.get_best_checkpoint(t), "checkpoint")
for t in all_trials
]
demo_gan(analysis, checkpoint_paths)
|
[
"ray.tune.report",
"torch.cuda.is_available",
"common.train",
"common.Discriminator",
"ray.init",
"os.path.exists",
"argparse.ArgumentParser",
"ray.tune.checkpoint_dir",
"os.path.expanduser",
"ray.tune.choice",
"os.path.dirname",
"common.demo_gan",
"common.get_data_loader",
"torch.device",
"common.Generator",
"torch.load",
"os.path.join",
"torch.nn.BCELoss",
"common.Net",
"numpy.random.uniform",
"ray.put",
"common.plot_images"
] |
[((702, 745), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (714, 745), False, 'import torch\n'), ((892, 904), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (902, 904), True, 'import torch.nn as nn\n'), ((2657, 2682), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2680, 2682), False, 'import argparse\n'), ((2832, 2842), 'ray.init', 'ray.init', ([], {}), '()\n', (2840, 2842), False, 'import ray\n'), ((3315, 3332), 'common.get_data_loader', 'get_data_loader', ([], {}), '()\n', (3330, 3332), False, 'from common import demo_gan, get_data_loader, plot_images, train, weights_init\n'), ((3505, 3510), 'common.Net', 'Net', ([], {}), '()\n', (3508, 3510), False, 'from common import Discriminator, Generator, Net\n'), ((3649, 3667), 'ray.put', 'ray.put', (['mnist_cnn'], {}), '(mnist_cnn)\n', (3656, 3667), False, 'import ray\n'), ((663, 688), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (686, 688), False, 'import torch\n'), ((1191, 1208), 'common.get_data_loader', 'get_data_loader', ([], {}), '()\n', (1206, 1208), False, 'from common import demo_gan, get_data_loader, plot_images, train, weights_init\n'), ((1260, 1302), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""checkpoint"""'], {}), "(checkpoint_dir, 'checkpoint')\n", (1272, 1302), False, 'import os\n'), ((1324, 1340), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (1334, 1340), False, 'import torch\n'), ((1932, 2041), 'common.train', 'train', (['netD', 'netG', 'optimizerG', 'optimizerD', 'criterion', 'dataloader', 'step', 'device', "config['mnist_model_ref']"], {}), "(netD, netG, optimizerG, optimizerD, criterion, dataloader, step,\n device, config['mnist_model_ref'])\n", (1937, 2041), False, 'from common import demo_gan, get_data_loader, plot_images, train, weights_init\n'), ((2541, 2597), 'ray.tune.report', 'tune.report', ([], {'lossg': 'lossG', 'lossd': 'lossD', 'is_score': 'is_score'}), '(lossg=lossG, lossd=lossD, is_score=is_score)\n', (2552, 2597), False, 'from ray import tune\n'), ((2992, 3018), 'os.path.exists', 'os.path.exists', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (3006, 3018), False, 'import os\n'), ((3369, 3392), 'common.plot_images', 'plot_images', (['dataloader'], {}), '(dataloader)\n', (3380, 3392), False, 'from common import demo_gan, get_data_loader, plot_images, train, weights_init\n'), ((3541, 3563), 'torch.load', 'torch.load', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (3551, 3563), False, 'import torch\n'), ((4737, 4773), 'common.demo_gan', 'demo_gan', (['analysis', 'checkpoint_paths'], {}), '(analysis, checkpoint_paths)\n', (4745, 4773), False, 'from common import demo_gan, get_data_loader, plot_images, train, weights_init\n'), ((757, 772), 'common.Discriminator', 'Discriminator', ([], {}), '()\n', (770, 772), False, 'from common import Discriminator, Generator, Net\n'), ((824, 835), 'common.Generator', 'Generator', ([], {}), '()\n', (833, 835), False, 'from common import Discriminator, Generator, Net\n'), ((1133, 1167), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.data.lock"""'], {}), "('~/.data.lock')\n", (1151, 1167), False, 'import os\n'), ((2147, 2177), 'ray.tune.checkpoint_dir', 'tune.checkpoint_dir', ([], {'step': 'step'}), '(step=step)\n', (2166, 2177), False, 'from ray import tune\n'), ((2216, 2258), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""checkpoint"""'], {}), "(checkpoint_dir, 'checkpoint')\n", (2228, 2258), False, 'import os\n'), ((3075, 3102), 'os.path.dirname', 'os.path.dirname', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (3090, 3102), False, 'import os\n'), ((4305, 4342), 'ray.tune.choice', 'tune.choice', (['[0.0001, 0.0002, 0.0005]'], {}), '([0.0001, 0.0002, 0.0005])\n', (4316, 4342), False, 'from ray import tune\n'), ((4367, 4404), 'ray.tune.choice', 'tune.choice', (['[0.0001, 0.0002, 0.0005]'], {}), '([0.0001, 0.0002, 0.0005])\n', (4378, 4404), False, 'from ray import tune\n'), ((3847, 3877), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(1e-05)'], {}), '(0.01, 1e-05)\n', (3864, 3877), True, 'import numpy as np\n'), ((3909, 3939), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(1e-05)'], {}), '(0.01, 1e-05)\n', (3926, 3939), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import copy
import numpy as np
import torch
from lottery.branch import base
import models.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
from lottery.branch.morphism import change_depth
def linear_interpolate(pruned_model_a, pruned_model_b, alpha):
model_c = copy.deepcopy(pruned_model_a)
sd_c = model_c.state_dict()
for (ka, va), (kb, vb) in zip(pruned_model_a.state_dict().items(), pruned_model_b.state_dict().items()):
assert ka == kb
if 'mask' in ka:
assert torch.all(va == vb)
sd_c[ka].data = va.detach().clone()
else:
sd_c[ka].data = (va * alpha + vb * (1 - alpha)).detach().clone()
model_c.load_state_dict(sd_c)
return model_c
def parse_block_mapping_for_stage(string):
mapping = dict()
mapping_strs = string.split(';')
try:
for s in mapping_strs:
src_id_str, tgt_ids_str = s.split(':')
src_id = int(src_id_str)
tgt_ids = [int(t) for t in tgt_ids_str.split(',')]
mapping[src_id] = tgt_ids
return mapping
except:
raise RuntimeError('Invalid block mapping string.')
class Branch(base.Branch):
def branch_function(
self,
target_model_name: str = None,
block_mapping: str = None,
start_at_step_zero: bool = False,
data_seed: int = 118
):
# Process the mapping
# A valid string format of a mapping is like:
# `0:0;1:1,2;2:3,4;3:5,6;4:7,8`
if 'cifar' in target_model_name and 'resnet' in target_model_name:
mappings = parse_block_mapping_for_stage(block_mapping)
elif 'imagenet' in target_model_name and 'resnet' in target_model_name:
mappings = list(map(parse_block_mapping_for_stage, block_mapping.split('|')))
elif 'cifar' in target_model_name and 'vggnfc' in target_model_name:
mappings = parse_block_mapping_for_stage(block_mapping)
elif 'cifar' in target_model_name and 'vgg' in target_model_name:
mappings = list(map(parse_block_mapping_for_stage, block_mapping.split('|')))
elif 'cifar' in target_model_name and 'mobilenetv1' in target_model_name:
mappings = parse_block_mapping_for_stage(block_mapping)
elif 'mnist' in target_model_name and 'lenet' in target_model_name:
mappings = parse_block_mapping_for_stage(block_mapping)
else:
raise NotImplementedError('Other mapping cases not implemented yet')
# Load source model at `train_start_step`
src_mask = Mask.load(self.level_root)
start_step = self.lottery_desc.str_to_step('0it') if start_at_step_zero else self.lottery_desc.train_start_step
# model = PrunedModel(models.registry.get(self.lottery_desc.model_hparams), src_mask)
src_model = models.registry.load(self.level_root, start_step, self.lottery_desc.model_hparams)
# Create target model
target_model_hparams = copy.deepcopy(self.lottery_desc.model_hparams)
target_model_hparams.model_name = target_model_name
target_model = models.registry.get(target_model_hparams)
target_ones_mask = Mask.ones_like(target_model)
# Do the morphism
target_sd = change_depth(target_model_name, src_model.state_dict(), target_model.state_dict(), mappings)
target_model.load_state_dict(target_sd)
target_mask = change_depth(target_model_name, src_mask, target_ones_mask, mappings)
target_model_a = PrunedModel(target_model, target_mask)
target_model_b = copy.deepcopy(target_model_a)
# Save and run a standard train on model a
seed_a = data_seed + 9999
training_hparams_a = copy.deepcopy(self.lottery_desc.training_hparams)
training_hparams_a.data_order_seed = seed_a
output_dir_a = os.path.join(self.branch_root, f'seed_{seed_a}')
target_mask.save(output_dir_a)
train.standard_train(target_model_a, output_dir_a, self.lottery_desc.dataset_hparams,
training_hparams_a, start_step=start_step, verbose=self.verbose)
# Save and run a standard train on model b
seed_b = data_seed + 10001
training_hparams_b = copy.deepcopy(self.lottery_desc.training_hparams)
training_hparams_b.data_order_seed = seed_b
output_dir_b = os.path.join(self.branch_root, f'seed_{seed_b}')
target_mask.save(output_dir_b)
train.standard_train(target_model_b, output_dir_b, self.lottery_desc.dataset_hparams,
training_hparams_b, start_step=start_step, verbose=self.verbose)
# Linear connectivity between model_a and model_b
training_hparams_c = copy.deepcopy(self.lottery_desc.training_hparams)
training_hparams_c.training_steps = '1ep'
for alpha in np.linspace(0, 1.0, 21):
model_c = linear_interpolate(target_model_a, target_model_b, alpha)
output_dir_c = os.path.join(self.branch_root, f'alpha_{alpha}')
# Measure acc of model_c
train.standard_train(model_c, output_dir_c, self.lottery_desc.dataset_hparams,
training_hparams_c, start_step=None, verbose=self.verbose)
@staticmethod
def description():
return "Change the depth of the source network and do linear connectivity exp."
@staticmethod
def name():
return 'change_depth_linear_connect'
|
[
"lottery.branch.morphism.change_depth",
"pruning.mask.Mask.ones_like",
"pruning.mask.Mask.load",
"os.path.join",
"numpy.linspace",
"pruning.pruned_model.PrunedModel",
"copy.deepcopy",
"training.train.standard_train",
"torch.all"
] |
[((516, 545), 'copy.deepcopy', 'copy.deepcopy', (['pruned_model_a'], {}), '(pruned_model_a)\n', (529, 545), False, 'import copy\n'), ((2821, 2847), 'pruning.mask.Mask.load', 'Mask.load', (['self.level_root'], {}), '(self.level_root)\n', (2830, 2847), False, 'from pruning.mask import Mask\n'), ((3227, 3273), 'copy.deepcopy', 'copy.deepcopy', (['self.lottery_desc.model_hparams'], {}), '(self.lottery_desc.model_hparams)\n', (3240, 3273), False, 'import copy\n'), ((3426, 3454), 'pruning.mask.Mask.ones_like', 'Mask.ones_like', (['target_model'], {}), '(target_model)\n', (3440, 3454), False, 'from pruning.mask import Mask\n'), ((3665, 3734), 'lottery.branch.morphism.change_depth', 'change_depth', (['target_model_name', 'src_mask', 'target_ones_mask', 'mappings'], {}), '(target_model_name, src_mask, target_ones_mask, mappings)\n', (3677, 3734), False, 'from lottery.branch.morphism import change_depth\n'), ((3760, 3798), 'pruning.pruned_model.PrunedModel', 'PrunedModel', (['target_model', 'target_mask'], {}), '(target_model, target_mask)\n', (3771, 3798), False, 'from pruning.pruned_model import PrunedModel\n'), ((3824, 3853), 'copy.deepcopy', 'copy.deepcopy', (['target_model_a'], {}), '(target_model_a)\n', (3837, 3853), False, 'import copy\n'), ((3969, 4018), 'copy.deepcopy', 'copy.deepcopy', (['self.lottery_desc.training_hparams'], {}), '(self.lottery_desc.training_hparams)\n', (3982, 4018), False, 'import copy\n'), ((4094, 4142), 'os.path.join', 'os.path.join', (['self.branch_root', 'f"""seed_{seed_a}"""'], {}), "(self.branch_root, f'seed_{seed_a}')\n", (4106, 4142), False, 'import os\n'), ((4190, 4350), 'training.train.standard_train', 'train.standard_train', (['target_model_a', 'output_dir_a', 'self.lottery_desc.dataset_hparams', 'training_hparams_a'], {'start_step': 'start_step', 'verbose': 'self.verbose'}), '(target_model_a, output_dir_a, self.lottery_desc.\n dataset_hparams, training_hparams_a, start_step=start_step, verbose=\n self.verbose)\n', (4210, 4350), False, 'from training import train\n'), ((4486, 4535), 'copy.deepcopy', 'copy.deepcopy', (['self.lottery_desc.training_hparams'], {}), '(self.lottery_desc.training_hparams)\n', (4499, 4535), False, 'import copy\n'), ((4611, 4659), 'os.path.join', 'os.path.join', (['self.branch_root', 'f"""seed_{seed_b}"""'], {}), "(self.branch_root, f'seed_{seed_b}')\n", (4623, 4659), False, 'import os\n'), ((4707, 4867), 'training.train.standard_train', 'train.standard_train', (['target_model_b', 'output_dir_b', 'self.lottery_desc.dataset_hparams', 'training_hparams_b'], {'start_step': 'start_step', 'verbose': 'self.verbose'}), '(target_model_b, output_dir_b, self.lottery_desc.\n dataset_hparams, training_hparams_b, start_step=start_step, verbose=\n self.verbose)\n', (4727, 4867), False, 'from training import train\n'), ((4975, 5024), 'copy.deepcopy', 'copy.deepcopy', (['self.lottery_desc.training_hparams'], {}), '(self.lottery_desc.training_hparams)\n', (4988, 5024), False, 'import copy\n'), ((5096, 5119), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(21)'], {}), '(0, 1.0, 21)\n', (5107, 5119), True, 'import numpy as np\n'), ((755, 774), 'torch.all', 'torch.all', (['(va == vb)'], {}), '(va == vb)\n', (764, 774), False, 'import torch\n'), ((5228, 5276), 'os.path.join', 'os.path.join', (['self.branch_root', 'f"""alpha_{alpha}"""'], {}), "(self.branch_root, f'alpha_{alpha}')\n", (5240, 5276), False, 'import os\n'), ((5326, 5468), 'training.train.standard_train', 'train.standard_train', (['model_c', 'output_dir_c', 'self.lottery_desc.dataset_hparams', 'training_hparams_c'], {'start_step': 'None', 'verbose': 'self.verbose'}), '(model_c, output_dir_c, self.lottery_desc.\n dataset_hparams, training_hparams_c, start_step=None, verbose=self.verbose)\n', (5346, 5468), False, 'from training import train\n')]
|
# ===============================================================================
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from __future__ import absolute_import
from __future__ import print_function
from numpy import ones, vstack, zeros, hstack
from numpy.random import random
# ============= local library imports ==========================
from pychron.classifier.base_classifier import BaseClassifier
def make_sample(iso):
# print 'make sample {} {} {}'.format(iso.mass, iso.n, iso.intercept_percent_error)
return (
iso.mass,
iso.n,
iso.value,
iso.intercept_percent_error,
iso.get_slope(),
iso.standard_fit_error(),
iso.noutliers(),
)
class IsotopeClassifier(BaseClassifier):
"""
klasses:
0= Bad
1= Good
"""
_clf = None
_persistence_name = "clf.isotope.p"
def classifier_factory(self, klass=None, *args, **kw):
kw["n_neighbors"] = 3
return super(IsotopeClassifier, self).classifier_factory(
klass=klass, *args, **kw
)
def predict_isotope(self, iso):
return self.predict(make_sample(iso))
def add_isotopes(self, isos, klasses):
samples = [make_sample(iso) for iso in isos]
self.add_training_data(samples, klasses)
def fit(self, x, y):
"""
x: 2d array, [n_samples, n_features]
y: 1d array, [n_samples]. class for each sample
:param x:
:param y:
:return:
"""
if not self._clf:
self._clf = self.classifier_factory()
self._clf.fit(x, y)
def predict(self, x):
if self._clf is None:
self.load()
klass = None
prob = 0
print(x)
if self._clf:
klass = int(self._clf.predict(x)[0])
prob = self._clf.predict_proba(x)[0][klass]
return klass, prob
if __name__ == "__main__":
ic = IsotopeClassifier()
nsamples = 20
err = random(size=nsamples)
npts = ones(nsamples) * 100 + random(size=nsamples) * 10
xgood = vstack((npts, err)).T
ygood = ones(nsamples)
err = 1 + random(size=nsamples) * 10
npts = ones(nsamples) * 100 + random(size=nsamples)
xbad = vstack((npts, err)).T
ybad = zeros(nsamples)
npts = ones(nsamples) * 10 + random(size=nsamples)
err = random(size=nsamples) * 10
xbad2 = vstack((npts, err)).T
xx = vstack((xgood, xbad, xbad2))
yy = hstack((ygood, ybad, ybad))
ic.fit(xx, yy)
for pt in ([100, 0.11], [100, 11], [10, 1], [75, 0.5]):
k = ic.predict(pt)
print(pt, k)
# print pt, ic.classify(pt)
# ax = plt.subplot(1, 1, 1)
# ax.scatter(x[:, 0], x[:, 1], c=y)
# plt.show()
# ============= EOF =============================================
|
[
"numpy.ones",
"numpy.hstack",
"numpy.random.random",
"numpy.zeros",
"numpy.vstack"
] |
[((2740, 2761), 'numpy.random.random', 'random', ([], {'size': 'nsamples'}), '(size=nsamples)\n', (2746, 2761), False, 'from numpy.random import random\n'), ((2869, 2883), 'numpy.ones', 'ones', (['nsamples'], {}), '(nsamples)\n', (2873, 2883), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((3026, 3041), 'numpy.zeros', 'zeros', (['nsamples'], {}), '(nsamples)\n', (3031, 3041), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((3179, 3207), 'numpy.vstack', 'vstack', (['(xgood, xbad, xbad2)'], {}), '((xgood, xbad, xbad2))\n', (3185, 3207), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((3217, 3244), 'numpy.hstack', 'hstack', (['(ygood, ybad, ybad)'], {}), '((ygood, ybad, ybad))\n', (3223, 3244), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((2835, 2854), 'numpy.vstack', 'vstack', (['(npts, err)'], {}), '((npts, err))\n', (2841, 2854), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((2960, 2981), 'numpy.random.random', 'random', ([], {'size': 'nsamples'}), '(size=nsamples)\n', (2966, 2981), False, 'from numpy.random import random\n'), ((2993, 3012), 'numpy.vstack', 'vstack', (['(npts, err)'], {}), '((npts, err))\n', (2999, 3012), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((3076, 3097), 'numpy.random.random', 'random', ([], {'size': 'nsamples'}), '(size=nsamples)\n', (3082, 3097), False, 'from numpy.random import random\n'), ((3108, 3129), 'numpy.random.random', 'random', ([], {'size': 'nsamples'}), '(size=nsamples)\n', (3114, 3129), False, 'from numpy.random import random\n'), ((3147, 3166), 'numpy.vstack', 'vstack', (['(npts, err)'], {}), '((npts, err))\n', (3153, 3166), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((2773, 2787), 'numpy.ones', 'ones', (['nsamples'], {}), '(nsamples)\n', (2777, 2787), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((2796, 2817), 'numpy.random.random', 'random', ([], {'size': 'nsamples'}), '(size=nsamples)\n', (2802, 2817), False, 'from numpy.random import random\n'), ((2899, 2920), 'numpy.random.random', 'random', ([], {'size': 'nsamples'}), '(size=nsamples)\n', (2905, 2920), False, 'from numpy.random import random\n'), ((2937, 2951), 'numpy.ones', 'ones', (['nsamples'], {}), '(nsamples)\n', (2941, 2951), False, 'from numpy import ones, vstack, zeros, hstack\n'), ((3054, 3068), 'numpy.ones', 'ones', (['nsamples'], {}), '(nsamples)\n', (3058, 3068), False, 'from numpy import ones, vstack, zeros, hstack\n')]
|
import numpy as np
import zmq
class Subscriber:
def __init__(self, address='tcp://127.0.0.1', port=9999):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
self.socket.setsockopt(zmq.SUBSCRIBE, b'')
self.socket.connect(f'{address}:{port}')
def recv(self):
metadata, message = self.socket.recv_json(), self.socket.recv(copy=False)
a = np.frombuffer(message, dtype=metadata['dtype'])
return metadata['msg'], a.reshape(metadata['shape'])
def close(self):
self.socket.close()
self.context.term()
|
[
"numpy.frombuffer",
"zmq.Context"
] |
[((135, 148), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (146, 148), False, 'import zmq\n'), ((415, 462), 'numpy.frombuffer', 'np.frombuffer', (['message'], {'dtype': "metadata['dtype']"}), "(message, dtype=metadata['dtype'])\n", (428, 462), True, 'import numpy as np\n')]
|
import os, sys
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
import cv2
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], '/mywork/tensorflow-tuts/sd19reader'))
from batches2patches_tensorflow import GetFuncToPatches, GetFuncOverlapAdd
from myutils import describe
from vizutils import bw_grid_vis, color_grid_vis
from mypca import my_PCA_scikitlike as PCA
# load image
path2file = os.path.dirname(os.path.realpath(__file__))
inimg = os.path.join(path2file,'Lenna_noise1.png')
testim = cv2.imread(inimg).astype(np.float32) / 255.0
# will use "valid" conv, so pad 1 wide for 3x3 patches
padtest = np.pad(testim, [(1,1), (1,1), (0,0)], 'edge')
# get patching function for local windows
imshape = [int(ii) for ii in padtest.shape]
batchsize = 1
batchimshapefull = [batchsize,]+imshape
patchsize = 3
bordermode = 'valid'
pimshape = (imshape[0]-patchsize+1,imshape[1]-patchsize+1)
reconstrmode = 'full'
N_PCA_COMPS = 6
batchunpadtest = np.expand_dims(testim, 0)
batchtestims = padtest.reshape(batchimshapefull) # only one in batch, so resize the one
featswrtshape = [int(ii) for ii in batchunpadtest.shape]
featswrtshape[-1] = N_PCA_COMPS
patchtheanofunc = GetFuncToPatches(batchimshapefull, patchsize, border_mode=bordermode, filter_flip=False)
overlapaddfunc = GetFuncOverlapAdd(batchimshapefull, patchsize, pimshape, border_mode=reconstrmode, filter_flip=False)
#########################################
# bilateral filter
#tf_stdv_space = tf.get_variable('tf_stdv_space', initializer=tf.constant(1.0))
#tf_stdv_bilat = tf.get_variable('tf_stdv_bilat', initializer=tf.constant(1.0))
tf_placehold_img = tf.placeholder(tf.float32, batchunpadtest.shape, name="tf_placehold_img")
tf_placehold_wrt = tf.placeholder(tf.float32, featswrtshape, name="tf_placehold_wrt")
from test_utils import *
bilateral_filters = load_func_from_lib()
#########################################
# tensorflow sess init
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
outfold = 'test_patch_pca'
#########################################
# compute patch PCA
patches = patchtheanofunc(batchtestims)
print(" ")
describe("patches",patches)
flatpatches = patches.reshape((patches.shape[0]*patches.shape[1]*patches.shape[2], np.prod(patches.shape[3:])))
describe("flatpatches",flatpatches)
pca = PCA(n_components=N_PCA_COMPS, doplot=False).fit(flatpatches)
transfpatches = pca.transform(flatpatches)
reshtransfpatch = transfpatches.reshape((patches.shape[0], patches.shape[1], patches.shape[2], N_PCA_COMPS))
print(" ")
describe("transfpatches", transfpatches)
describe("reshtransfpatch", reshtransfpatch)
print(" ")
procpatches = pca.inverse_transform(transfpatches).reshape(patches.shape)
tehpidx = -1
for tehpatchs in [patches, procpatches]:
tehpidx += 1
FLPTCHS = tehpatchs.reshape((tehpatchs.shape[0], tehpatchs.shape[1]*tehpatchs.shape[2], np.prod(tehpatchs.shape[3:])))
#describe("FLPTCHS", FLPTCHS)
for jj in range(batchsize):
#describe("FLPTCHS[jj,...]", FLPTCHS[jj,...])
color_grid_vis(FLPTCHS[jj,...], savename=os.path.join(outfold,'pcacnn_FLPTCHS_'+str(tehpidx)+'_'+str(jj)+'.png'), flipbgr=True)
#quit()
#########################################
#define the function that's called every time one of the trackbars is moved
def updateWindow(xxx):
stdspace = float(cv2.getTrackbarPos('std_space*10','ImageWindow')) / 10.
stdcolor = float(cv2.getTrackbarPos('std_color*50','ImageWindow')) / 50.
stdspace = max(1e-3, stdspace)
stdcolor = max(1e-3, stdcolor)
#tf_stdv_space = tf.get_variable('tf_stdv_space', initializer=tf.constant(1.0))
#tf_stdv_bilat = tf.get_variable('tf_stdv_bilat', initializer=tf.constant(1.0))
#tf_placehold_img = tf.placeholder(tf.float32, batchimshapefull, name="tf_placehold_img")
#tf_placehold_wrt = tf.placeholder(tf.float32, featswrtshape, name="tf_placehold_wrt")
ret = bilateral_filters(NHWC_to_NCHW(tf_placehold_img),
NHWC_to_NCHW(tf_placehold_wrt),
stdspace, stdcolor)
outbilNCHW = ret
outbilat = NCHW_to_NHWC(outbilNCHW)
tfret = outbilat.eval({tf_placehold_img: batchunpadtest, tf_placehold_wrt: reshtransfpatch})
describe("tfret00", tfret)
tfret[tfret<0.0] = 0.0
tfret[tfret>1.0] = 1.0
describe("tfret11", tfret)
cv2.imshow("ImageWindow", tfret[0,...])
cv2.namedWindow('ImageWindow')
cv2.createTrackbar('std_space*10','ImageWindow',1,200,updateWindow)
cv2.createTrackbar('std_color*50','ImageWindow',1,200,updateWindow)
updateWindow(0) #Creates the window for the first time
cv2.waitKey(0)
|
[
"numpy.prod",
"tensorflow.InteractiveSession",
"tensorflow.initialize_all_variables",
"cv2.imread",
"tensorflow.placeholder",
"batches2patches_tensorflow.GetFuncOverlapAdd",
"os.path.join",
"cv2.imshow",
"os.path.realpath",
"cv2.waitKey",
"cv2.getTrackbarPos",
"numpy.expand_dims",
"batches2patches_tensorflow.GetFuncToPatches",
"mypca.my_PCA_scikitlike",
"numpy.pad",
"cv2.createTrackbar",
"cv2.namedWindow",
"myutils.describe"
] |
[((469, 512), 'os.path.join', 'os.path.join', (['path2file', '"""Lenna_noise1.png"""'], {}), "(path2file, 'Lenna_noise1.png')\n", (481, 512), False, 'import os, sys\n'), ((631, 679), 'numpy.pad', 'np.pad', (['testim', '[(1, 1), (1, 1), (0, 0)]', '"""edge"""'], {}), "(testim, [(1, 1), (1, 1), (0, 0)], 'edge')\n", (637, 679), True, 'import numpy as np\n'), ((968, 993), 'numpy.expand_dims', 'np.expand_dims', (['testim', '(0)'], {}), '(testim, 0)\n', (982, 993), True, 'import numpy as np\n'), ((1190, 1282), 'batches2patches_tensorflow.GetFuncToPatches', 'GetFuncToPatches', (['batchimshapefull', 'patchsize'], {'border_mode': 'bordermode', 'filter_flip': '(False)'}), '(batchimshapefull, patchsize, border_mode=bordermode,\n filter_flip=False)\n', (1206, 1282), False, 'from batches2patches_tensorflow import GetFuncToPatches, GetFuncOverlapAdd\n'), ((1296, 1402), 'batches2patches_tensorflow.GetFuncOverlapAdd', 'GetFuncOverlapAdd', (['batchimshapefull', 'patchsize', 'pimshape'], {'border_mode': 'reconstrmode', 'filter_flip': '(False)'}), '(batchimshapefull, patchsize, pimshape, border_mode=\n reconstrmode, filter_flip=False)\n', (1313, 1402), False, 'from batches2patches_tensorflow import GetFuncToPatches, GetFuncOverlapAdd\n'), ((1639, 1712), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'batchunpadtest.shape'], {'name': '"""tf_placehold_img"""'}), "(tf.float32, batchunpadtest.shape, name='tf_placehold_img')\n", (1653, 1712), True, 'import tensorflow as tf\n'), ((1732, 1798), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', 'featswrtshape'], {'name': '"""tf_placehold_wrt"""'}), "(tf.float32, featswrtshape, name='tf_placehold_wrt')\n", (1746, 1798), True, 'import tensorflow as tf\n'), ((1947, 1970), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1968, 1970), True, 'import tensorflow as tf\n'), ((2153, 2181), 'myutils.describe', 'describe', (['"""patches"""', 'patches'], {}), "('patches', patches)\n", (2161, 2181), False, 'from myutils import describe\n'), ((2294, 2330), 'myutils.describe', 'describe', (['"""flatpatches"""', 'flatpatches'], {}), "('flatpatches', flatpatches)\n", (2302, 2330), False, 'from myutils import describe\n'), ((2562, 2602), 'myutils.describe', 'describe', (['"""transfpatches"""', 'transfpatches'], {}), "('transfpatches', transfpatches)\n", (2570, 2602), False, 'from myutils import describe\n'), ((2603, 2647), 'myutils.describe', 'describe', (['"""reshtransfpatch"""', 'reshtransfpatch'], {}), "('reshtransfpatch', reshtransfpatch)\n", (2611, 2647), False, 'from myutils import describe\n'), ((4408, 4438), 'cv2.namedWindow', 'cv2.namedWindow', (['"""ImageWindow"""'], {}), "('ImageWindow')\n", (4423, 4438), False, 'import cv2\n'), ((4439, 4510), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""std_space*10"""', '"""ImageWindow"""', '(1)', '(200)', 'updateWindow'], {}), "('std_space*10', 'ImageWindow', 1, 200, updateWindow)\n", (4457, 4510), False, 'import cv2\n'), ((4507, 4578), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""std_color*50"""', '"""ImageWindow"""', '(1)', '(200)', 'updateWindow'], {}), "('std_color*50', 'ImageWindow', 1, 200, updateWindow)\n", (4525, 4578), False, 'import cv2\n'), ((4630, 4644), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4641, 4644), False, 'import cv2\n'), ((130, 193), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""/mywork/tensorflow-tuts/sd19reader"""'], {}), "(sys.path[0], '/mywork/tensorflow-tuts/sd19reader')\n", (142, 193), False, 'import os, sys\n'), ((433, 459), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (449, 459), False, 'import os, sys\n'), ((1980, 2009), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (2007, 2009), True, 'import tensorflow as tf\n'), ((4251, 4277), 'myutils.describe', 'describe', (['"""tfret00"""', 'tfret'], {}), "('tfret00', tfret)\n", (4259, 4277), False, 'from myutils import describe\n'), ((4336, 4362), 'myutils.describe', 'describe', (['"""tfret11"""', 'tfret'], {}), "('tfret11', tfret)\n", (4344, 4362), False, 'from myutils import describe\n'), ((4367, 4407), 'cv2.imshow', 'cv2.imshow', (['"""ImageWindow"""', 'tfret[0, ...]'], {}), "('ImageWindow', tfret[0, ...])\n", (4377, 4407), False, 'import cv2\n'), ((2265, 2291), 'numpy.prod', 'np.prod', (['patches.shape[3:]'], {}), '(patches.shape[3:])\n', (2272, 2291), True, 'import numpy as np\n'), ((2336, 2379), 'mypca.my_PCA_scikitlike', 'PCA', ([], {'n_components': 'N_PCA_COMPS', 'doplot': '(False)'}), '(n_components=N_PCA_COMPS, doplot=False)\n', (2339, 2379), True, 'from mypca import my_PCA_scikitlike as PCA\n'), ((521, 538), 'cv2.imread', 'cv2.imread', (['inimg'], {}), '(inimg)\n', (531, 538), False, 'import cv2\n'), ((2897, 2925), 'numpy.prod', 'np.prod', (['tehpatchs.shape[3:]'], {}), '(tehpatchs.shape[3:])\n', (2904, 2925), True, 'import numpy as np\n'), ((3357, 3406), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""std_space*10"""', '"""ImageWindow"""'], {}), "('std_space*10', 'ImageWindow')\n", (3375, 3406), False, 'import cv2\n'), ((3434, 3483), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""std_color*50"""', '"""ImageWindow"""'], {}), "('std_color*50', 'ImageWindow')\n", (3452, 3483), False, 'import cv2\n')]
|
# Copyright 2017 The TensorFlow Authors modified by <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from kerod.utils import ops
def test_indices_to_dense_vector():
size = 10000
num_indices = np.random.randint(size)
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.float32)
expected_output[rand_indices] = 1.
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
np.testing.assert_array_equal(indicator, expected_output)
assert indicator.dtype == expected_output.dtype
def test_indices_to_dense_vector_size_at_inference():
size = 5000
num_indices = 250
all_indices = np.arange(size)
rand_indices = np.random.permutation(all_indices)[0:num_indices]
expected_output = np.zeros(size, dtype=np.float32)
expected_output[rand_indices] = 1.
indicator = ops.indices_to_dense_vector(rand_indices, tf.shape(all_indices)[0])
np.testing.assert_array_equal(indicator, expected_output)
assert indicator.dtype == expected_output.dtype
def test_indices_to_dense_vector_int():
size = 500
num_indices = 25
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.zeros(size, dtype=np.int64)
expected_output[rand_indices] = 1
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size, 1, dtype=tf.int64)
np.testing.assert_array_equal(indicator, expected_output)
assert indicator.dtype == expected_output.dtype
def test_indices_to_dense_vector_custom_values():
size = 100
num_indices = 10
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
indices_value = np.random.rand(1)
default_value = np.random.rand(1)
expected_output = np.float32(np.ones(size) * default_value)
expected_output[rand_indices] = indices_value
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(
tf_rand_indices, size, indices_value=indices_value, default_value=default_value)
np.testing.assert_allclose(indicator, expected_output)
assert indicator.dtype == expected_output.dtype
def test_indices_to_dense_vector_all_indices_as_input():
size = 500
num_indices = 500
rand_indices = np.random.permutation(np.arange(size))[0:num_indices]
expected_output = np.ones(size, dtype=np.float32)
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
np.testing.assert_array_equal(indicator, expected_output)
assert indicator.dtype == expected_output.dtype
def test_indices_to_dense_vector_empty_indices_as_input():
size = 500
rand_indices = []
expected_output = np.zeros(size, dtype=np.float32)
tf_rand_indices = tf.constant(rand_indices)
indicator = ops.indices_to_dense_vector(tf_rand_indices, size)
np.testing.assert_array_equal(indicator, expected_output)
assert indicator.dtype == expected_output.dtype
def test_item_assignment():
tensor = tf.constant([1, 2, 3, 4])
out = ops.item_assignment(tensor, tensor >= 2, 6)
np.testing.assert_array_equal(out, tf.constant([1, 6, 6, 6]))
|
[
"tensorflow.shape",
"numpy.random.rand",
"numpy.ones",
"numpy.arange",
"kerod.utils.ops.item_assignment",
"numpy.testing.assert_allclose",
"kerod.utils.ops.indices_to_dense_vector",
"numpy.random.randint",
"tensorflow.constant",
"numpy.zeros",
"numpy.testing.assert_array_equal",
"numpy.random.permutation"
] |
[((854, 877), 'numpy.random.randint', 'np.random.randint', (['size'], {}), '(size)\n', (871, 877), True, 'import numpy as np\n'), ((974, 1006), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (982, 1006), True, 'import numpy as np\n'), ((1069, 1094), 'tensorflow.constant', 'tf.constant', (['rand_indices'], {}), '(rand_indices)\n', (1080, 1094), True, 'import tensorflow as tf\n'), ((1111, 1161), 'kerod.utils.ops.indices_to_dense_vector', 'ops.indices_to_dense_vector', (['tf_rand_indices', 'size'], {}), '(tf_rand_indices, size)\n', (1138, 1161), False, 'from kerod.utils import ops\n'), ((1167, 1224), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['indicator', 'expected_output'], {}), '(indicator, expected_output)\n', (1196, 1224), True, 'import numpy as np\n'), ((1389, 1404), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (1398, 1404), True, 'import numpy as np\n'), ((1497, 1529), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (1505, 1529), True, 'import numpy as np\n'), ((1659, 1716), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['indicator', 'expected_output'], {}), '(indicator, expected_output)\n', (1688, 1716), True, 'import numpy as np\n'), ((1943, 1973), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.int64'}), '(size, dtype=np.int64)\n', (1951, 1973), True, 'import numpy as np\n'), ((2035, 2060), 'tensorflow.constant', 'tf.constant', (['rand_indices'], {}), '(rand_indices)\n', (2046, 2060), True, 'import tensorflow as tf\n'), ((2077, 2146), 'kerod.utils.ops.indices_to_dense_vector', 'ops.indices_to_dense_vector', (['tf_rand_indices', 'size', '(1)'], {'dtype': 'tf.int64'}), '(tf_rand_indices, size, 1, dtype=tf.int64)\n', (2104, 2146), False, 'from kerod.utils import ops\n'), ((2152, 2209), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['indicator', 'expected_output'], {}), '(indicator, expected_output)\n', (2181, 2209), True, 'import numpy as np\n'), ((2443, 2460), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2457, 2460), True, 'import numpy as np\n'), ((2481, 2498), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2495, 2498), True, 'import numpy as np\n'), ((2637, 2662), 'tensorflow.constant', 'tf.constant', (['rand_indices'], {}), '(rand_indices)\n', (2648, 2662), True, 'import tensorflow as tf\n'), ((2679, 2792), 'kerod.utils.ops.indices_to_dense_vector', 'ops.indices_to_dense_vector', (['tf_rand_indices', 'size'], {'indices_value': 'indices_value', 'default_value': 'default_value'}), '(tf_rand_indices, size, indices_value=\n indices_value, default_value=default_value)\n', (2706, 2792), False, 'from kerod.utils import ops\n'), ((2802, 2856), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['indicator', 'expected_output'], {}), '(indicator, expected_output)\n', (2828, 2856), True, 'import numpy as np\n'), ((3101, 3132), 'numpy.ones', 'np.ones', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (3108, 3132), True, 'import numpy as np\n'), ((3156, 3181), 'tensorflow.constant', 'tf.constant', (['rand_indices'], {}), '(rand_indices)\n', (3167, 3181), True, 'import tensorflow as tf\n'), ((3198, 3248), 'kerod.utils.ops.indices_to_dense_vector', 'ops.indices_to_dense_vector', (['tf_rand_indices', 'size'], {}), '(tf_rand_indices, size)\n', (3225, 3248), False, 'from kerod.utils import ops\n'), ((3254, 3311), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['indicator', 'expected_output'], {}), '(indicator, expected_output)\n', (3283, 3311), True, 'import numpy as np\n'), ((3485, 3517), 'numpy.zeros', 'np.zeros', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (3493, 3517), True, 'import numpy as np\n'), ((3541, 3566), 'tensorflow.constant', 'tf.constant', (['rand_indices'], {}), '(rand_indices)\n', (3552, 3566), True, 'import tensorflow as tf\n'), ((3583, 3633), 'kerod.utils.ops.indices_to_dense_vector', 'ops.indices_to_dense_vector', (['tf_rand_indices', 'size'], {}), '(tf_rand_indices, size)\n', (3610, 3633), False, 'from kerod.utils import ops\n'), ((3639, 3696), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['indicator', 'expected_output'], {}), '(indicator, expected_output)\n', (3668, 3696), True, 'import numpy as np\n'), ((3792, 3817), 'tensorflow.constant', 'tf.constant', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (3803, 3817), True, 'import tensorflow as tf\n'), ((3828, 3871), 'kerod.utils.ops.item_assignment', 'ops.item_assignment', (['tensor', '(tensor >= 2)', '(6)'], {}), '(tensor, tensor >= 2, 6)\n', (3847, 3871), False, 'from kerod.utils import ops\n'), ((1424, 1458), 'numpy.random.permutation', 'np.random.permutation', (['all_indices'], {}), '(all_indices)\n', (1445, 1458), True, 'import numpy as np\n'), ((3911, 3936), 'tensorflow.constant', 'tf.constant', (['[1, 6, 6, 6]'], {}), '([1, 6, 6, 6])\n', (3922, 3936), True, 'import tensorflow as tf\n'), ((919, 934), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (928, 934), True, 'import numpy as np\n'), ((1628, 1649), 'tensorflow.shape', 'tf.shape', (['all_indices'], {}), '(all_indices)\n', (1636, 1649), True, 'import tensorflow as tf\n'), ((1888, 1903), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (1897, 1903), True, 'import numpy as np\n'), ((2391, 2406), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (2400, 2406), True, 'import numpy as np\n'), ((2533, 2546), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (2540, 2546), True, 'import numpy as np\n'), ((3046, 3061), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (3055, 3061), True, 'import numpy as np\n')]
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
convert image npz to LMDB
"""
import argparse
import glob
import io
import json
import multiprocessing as mp
import os
from os.path import basename, exists
from cytoolz import curry
import numpy as np
from tqdm import tqdm
import lmdb
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
def _compute_nbb(img_dump, conf_th, max_bb, min_bb, num_bb):
num_bb = max(min_bb, (img_dump['conf'] > conf_th).sum())
num_bb = min(max_bb, num_bb)
return int(num_bb)
@curry
def load_npz(conf_th, max_bb, min_bb, num_bb, fname, keep_all=False):
try:
img_dump = np.load(fname, allow_pickle=True)
if keep_all:
nbb = None
else:
nbb = _compute_nbb(img_dump, conf_th, max_bb, min_bb, num_bb)
dump = {}
for key, arr in img_dump.items():
if arr.dtype == np.float32:
arr = arr.astype(np.float16)
if arr.ndim == 2:
dump[key] = arr[:nbb, :]
elif arr.ndim == 1:
dump[key] = arr[:nbb]
else:
raise ValueError('wrong ndim')
except Exception as e:
# corrupted file
print(f'corrupted file {fname}', e)
dump = {}
nbb = 0
name = basename(fname)
return name, dump, nbb
def dumps_npz(dump, compress=False):
with io.BytesIO() as writer:
if compress:
np.savez_compressed(writer, **dump, allow_pickle=True)
else:
np.savez(writer, **dump, allow_pickle=True)
return writer.getvalue()
def dumps_msgpack(dump):
return msgpack.dumps(dump, use_bin_type=True)
def main(opts):
if opts.img_dir[-1] == '/':
opts.img_dir = opts.img_dir[:-1]
split = basename(opts.img_dir)
if opts.keep_all:
db_name = 'all'
else:
if opts.conf_th == -1:
db_name = f'feat_numbb{opts.num_bb}'
else:
db_name = (f'feat_th{opts.conf_th}_max{opts.max_bb}'
f'_min{opts.min_bb}')
if opts.compress:
db_name += '_compressed'
if not exists(f'{opts.output}/{split}'):
os.makedirs(f'{opts.output}/{split}')
env = lmdb.open(f'{opts.output}/{split}/{db_name}', map_size=1024**4)
txn = env.begin(write=True)
files = glob.glob(f'{opts.img_dir}/*.npz')
load = load_npz(opts.conf_th, opts.max_bb, opts.min_bb, opts.num_bb,
keep_all=opts.keep_all)
name2nbb = {} # number of bboxes
with mp.Pool(opts.nproc) as pool, tqdm(total=len(files)) as pbar:
for i, (fname, features, nbb) in enumerate(
pool.imap_unordered(load, files, chunksize=128)):
if not features:
continue # corrupted feature
if opts.compress:
dump = dumps_npz(features, compress=True)
else:
dump = dumps_msgpack(features)
txn.put(key=fname.encode('utf-8'), value=dump)
if i % 1000 == 0:
txn.commit()
txn = env.begin(write=True)
name2nbb[fname] = nbb
pbar.update(1)
txn.put(key=b'__keys__',
value=json.dumps(list(name2nbb.keys())).encode('utf-8'))
txn.commit()
env.close()
if opts.conf_th != -1 and not opts.keep_all:
with open(f'{opts.output}/{split}/'
f'nbb_th{opts.conf_th}_'
f'max{opts.max_bb}_min{opts.min_bb}.json', 'w') as f:
json.dump(name2nbb, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--img_dir", default=None, type=str,
help="The input images.")
parser.add_argument("--output", default=None, type=str,
help="output lmdb")
parser.add_argument('--nproc', type=int, default=8,
help='number of cores used')
parser.add_argument('--compress', action='store_true',
help='compress the tensors')
parser.add_argument('--keep_all', action='store_true',
help='keep all features, overrides all following args')
parser.add_argument('--conf_th', type=float, default=0.2,
help='threshold for dynamic bounding boxes '
'(-1 for fixed)')
parser.add_argument('--max_bb', type=int, default=100,
help='max number of bounding boxes')
parser.add_argument('--min_bb', type=int, default=10,
help='min number of bounding boxes')
parser.add_argument('--num_bb', type=int, default=100,
help='number of bounding boxes (fixed)')
args = parser.parse_args()
main(args)
|
[
"os.path.exists",
"numpy.savez",
"msgpack_numpy.patch",
"argparse.ArgumentParser",
"os.makedirs",
"json.dump",
"io.BytesIO",
"glob.glob",
"lmdb.open",
"os.path.basename",
"multiprocessing.Pool",
"numpy.savez_compressed",
"numpy.load",
"msgpack.dumps"
] |
[((347, 368), 'msgpack_numpy.patch', 'msgpack_numpy.patch', ([], {}), '()\n', (366, 368), False, 'import msgpack_numpy\n'), ((1315, 1330), 'os.path.basename', 'basename', (['fname'], {}), '(fname)\n', (1323, 1330), False, 'from os.path import basename, exists\n'), ((1659, 1697), 'msgpack.dumps', 'msgpack.dumps', (['dump'], {'use_bin_type': '(True)'}), '(dump, use_bin_type=True)\n', (1672, 1697), False, 'import msgpack\n'), ((1801, 1823), 'os.path.basename', 'basename', (['opts.img_dir'], {}), '(opts.img_dir)\n', (1809, 1823), False, 'from os.path import basename, exists\n'), ((2240, 2305), 'lmdb.open', 'lmdb.open', (['f"""{opts.output}/{split}/{db_name}"""'], {'map_size': '(1024 ** 4)'}), "(f'{opts.output}/{split}/{db_name}', map_size=1024 ** 4)\n", (2249, 2305), False, 'import lmdb\n'), ((2348, 2382), 'glob.glob', 'glob.glob', (['f"""{opts.img_dir}/*.npz"""'], {}), "(f'{opts.img_dir}/*.npz')\n", (2357, 2382), False, 'import glob\n'), ((3609, 3634), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3632, 3634), False, 'import argparse\n'), ((656, 689), 'numpy.load', 'np.load', (['fname'], {'allow_pickle': '(True)'}), '(fname, allow_pickle=True)\n', (663, 689), True, 'import numpy as np\n'), ((1406, 1418), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1416, 1418), False, 'import io\n'), ((2150, 2182), 'os.path.exists', 'exists', (['f"""{opts.output}/{split}"""'], {}), "(f'{opts.output}/{split}')\n", (2156, 2182), False, 'from os.path import basename, exists\n'), ((2192, 2229), 'os.makedirs', 'os.makedirs', (['f"""{opts.output}/{split}"""'], {}), "(f'{opts.output}/{split}')\n", (2203, 2229), False, 'import os\n'), ((2547, 2566), 'multiprocessing.Pool', 'mp.Pool', (['opts.nproc'], {}), '(opts.nproc)\n', (2554, 2566), True, 'import multiprocessing as mp\n'), ((1463, 1517), 'numpy.savez_compressed', 'np.savez_compressed', (['writer'], {'allow_pickle': '(True)'}), '(writer, **dump, allow_pickle=True)\n', (1482, 1517), True, 'import numpy as np\n'), ((1544, 1587), 'numpy.savez', 'np.savez', (['writer'], {'allow_pickle': '(True)'}), '(writer, **dump, allow_pickle=True)\n', (1552, 1587), True, 'import numpy as np\n'), ((3544, 3566), 'json.dump', 'json.dump', (['name2nbb', 'f'], {}), '(name2nbb, f)\n', (3553, 3566), False, 'import json\n')]
|
import numpy as np
import theano.tensor as tt
from . import Hypers, ones
from ...libs.tensors import tt_to_num
class Metric(Hypers):
def __call__(self, x1, x2):
return tt.abs_(x1 - x2)
def gram(self, x1, x2):
#try:
return (self(x1[:, self.dims].dimshuffle([0, 'x', 1]), x2[:, self.dims].dimshuffle(['x', 0, 1])))
#except ValueError:
# return tt_to_num(self(x1[:, self.dims].dimshuffle([0, 'x']), x2[:, self.dims].dimshuffle(['x', 0])))
def input_sensitivity(self):
return np.ones(self.shape)
def __str__(self):
return str(self.__class__.__name__) + '[h=' + str(self.hypers) + ']'
__repr__ = __str__
class One(Metric):
def __call__(self, x1, x2):
return 1
class Delta(Metric):
def __call__(self, x1, x2):
return tt.eq((x1 - x2), np.float32(0)).sum(axis=2)
def gram(self, x1, x2):
return tt_to_num(self(x1[:, self.dims].dimshuffle([0, 'x', 1]), x2[:, self.dims].dimshuffle(['x', 0, 1])))
class DeltaEq(Metric):
def __call__(self, x1, x2, eq=0):
return (tt.eq(x1, eq)*tt.eq(x2, eq)).sum(axis=2)
def gram(self, x1, x2, eq=0):
return tt_to_num(self(x1[:, self.dims].dimshuffle([0, 'x', 1]), x2[:, self.dims].dimshuffle(['x', 0, 1]), eq))
class DeltaEq2(Metric):
def __call__(self, x1, x2, eq1=0, eq2=0):
return (tt.eq(x1, eq1)*tt.eq(x2, eq2) + tt.eq(x1, eq2)*tt.eq(x2, eq1)).sum(axis=2)
def gram(self, x1, x2, eq1=0, eq2=0):
return tt_to_num(self(x1[:, self.dims].dimshuffle([0, 'x', 1]), x2[:, self.dims].dimshuffle(['x', 0, 1]), eq1, eq2))
class Minimum(Metric):
def __call__(self, x1, x2):
return tt.prod(tt.minimum(x1-x2*0, x2-x1*0), axis=2)
class Difference(Metric):
def __call__(self, x1, x2):
return x1 - x2
class L1(Metric):
def __call__(self, x1, x2):
return tt.sum(tt.abs_(x1 - x2))
class L2(Metric):
def __call__(self, x1, x2):
return tt.sum(0.5*(x1 - x2)**2)
class ARD(Metric):
def __init__(self, x, name=None, rate=None):
super().__init__(x, name)
self.rate = rate
def check_hypers(self, parent=''):
super().check_hypers(parent=parent)
if self.rate is None:
self.rate = Hypers.FlatExp(parent + 'rate', shape=self.shape) #FlatPos
self.hypers += [self.rate]
def input_sensitivity(self):
return ones(self.shape) * self.rate ** 2
class ARD_L1(ARD):
def __call__(self, x1, x2):
return tt.dot(tt.abs_(x1 - x2), self.rate)
def default_hypers(self, x=None, y=None):
return {self.rate: 1 / np.abs(x[1:] - x[:-1]).mean(axis=0)}
def input_sensitivity(self):
return ones(self.shape) * self.rate
class ARD_L2(ARD):
def __call__(self, x1, x2):
return tt.dot((x1 - x2) ** 2, (0.5 * self.rate ** 2))
def default_hypers(self, x=None, y=None):
try:
return {self.rate: 0.5 / np.abs(x[1:] - x[:-1]).mean(axis=0)}
except:
return {}
class ARD_Dot(ARD):
def __call__(self, x1, x2):
return tt.dot(x1 * x2, self.rate ** 2)
def default_hypers(self, x=None, y=None):
return {self.rate: 1 / ((np.sqrt(np.abs(x)).mean(axis=0)) / np.abs(y).mean(axis=0))}
class ARD_DotBias(ARD):
def __init__(self, x, name=None, rate=None, bias=None):
super().__init__(x, name, rate)
self.bias = bias
def check_hypers(self, parent=''):
super().check_hypers(parent=parent)
if self.bias is None:
self.bias = Hypers.FlatExp(parent + 'bias')
self.hypers += [self.bias]
def __call__(self, x1, x2):
return self.bias + tt.dot(x1 * x2, self.rate ** 2)
#return self.bias + tt.dot(tt.dot(x1, self.rate), tt.dot(x2, self.rate))
def default_hypers(self, x=None, y=None):
return {self.bias: np.abs(y).mean()/np.abs(x).mean(),
self.rate: np.sqrt(np.abs(y)).mean(axis=0) / np.abs(x).mean(axis=0)}
class PSD(Metric):
def __init__(self, x, p=1, name=None, rate=None, directions=None):
super().__init__(x, name)
self.rate = rate
self.directions = directions
self.p = p
def check_hypers(self, parent=''):
super().check_hypers(parent=parent)
if self.rate is None:
self.rate = Hypers.FlatExp(parent + 'rate', shape=self.shape)
if self.directions is None:
self.directions = Hypers.FlatExp(parent + 'directions', shape=(self.p, self.shape))
self.hypers += [self.rate, self.directions]
class PSD_Dot(PSD):
def __call__(self, x1, x2):
return tt.dot(tt.dot(x1.T, tt.dot(self.directions.T, self.directions) + tt.diag(self.rate**2)), x2)
def default_hypers(self, x=None, y=None):
return {self.rate: 1 / ((np.sqrt(np.abs(x)).mean(axis=0)) / np.abs(y).mean(axis=0)),
self.directions: np.zeros(self.directions.shape)}
class PSD_L2(PSD):
def __call__(self, x1, x2):
d = (x1 - x2)
M = tt.dot(self.directions.T, self.directions) + tt.diag(self.rate**2)
d * M
return tt.dot(M, d)
def default_hypers(self, x=None, y=None):
return {self.rate: 1 / ((np.sqrt(np.abs(x)).mean(axis=0)) / np.abs(y).mean(axis=0)),
self.directions: np.zeros(self.directions.shape)}
|
[
"numpy.abs",
"theano.tensor.diag",
"numpy.ones",
"theano.tensor.sum",
"theano.tensor.minimum",
"theano.tensor.abs_",
"numpy.zeros",
"theano.tensor.eq",
"numpy.float32",
"theano.tensor.dot"
] |
[((182, 198), 'theano.tensor.abs_', 'tt.abs_', (['(x1 - x2)'], {}), '(x1 - x2)\n', (189, 198), True, 'import theano.tensor as tt\n'), ((539, 558), 'numpy.ones', 'np.ones', (['self.shape'], {}), '(self.shape)\n', (546, 558), True, 'import numpy as np\n'), ((1976, 2004), 'theano.tensor.sum', 'tt.sum', (['(0.5 * (x1 - x2) ** 2)'], {}), '(0.5 * (x1 - x2) ** 2)\n', (1982, 2004), True, 'import theano.tensor as tt\n'), ((2810, 2854), 'theano.tensor.dot', 'tt.dot', (['((x1 - x2) ** 2)', '(0.5 * self.rate ** 2)'], {}), '((x1 - x2) ** 2, 0.5 * self.rate ** 2)\n', (2816, 2854), True, 'import theano.tensor as tt\n'), ((3098, 3129), 'theano.tensor.dot', 'tt.dot', (['(x1 * x2)', '(self.rate ** 2)'], {}), '(x1 * x2, self.rate ** 2)\n', (3104, 3129), True, 'import theano.tensor as tt\n'), ((5122, 5134), 'theano.tensor.dot', 'tt.dot', (['M', 'd'], {}), '(M, d)\n', (5128, 5134), True, 'import theano.tensor as tt\n'), ((1696, 1732), 'theano.tensor.minimum', 'tt.minimum', (['(x1 - x2 * 0)', '(x2 - x1 * 0)'], {}), '(x1 - x2 * 0, x2 - x1 * 0)\n', (1706, 1732), True, 'import theano.tensor as tt\n'), ((1891, 1907), 'theano.tensor.abs_', 'tt.abs_', (['(x1 - x2)'], {}), '(x1 - x2)\n', (1898, 1907), True, 'import theano.tensor as tt\n'), ((2520, 2536), 'theano.tensor.abs_', 'tt.abs_', (['(x1 - x2)'], {}), '(x1 - x2)\n', (2527, 2536), True, 'import theano.tensor as tt\n'), ((3686, 3717), 'theano.tensor.dot', 'tt.dot', (['(x1 * x2)', '(self.rate ** 2)'], {}), '(x1 * x2, self.rate ** 2)\n', (3692, 3717), True, 'import theano.tensor as tt\n'), ((4907, 4938), 'numpy.zeros', 'np.zeros', (['self.directions.shape'], {}), '(self.directions.shape)\n', (4915, 4938), True, 'import numpy as np\n'), ((5026, 5068), 'theano.tensor.dot', 'tt.dot', (['self.directions.T', 'self.directions'], {}), '(self.directions.T, self.directions)\n', (5032, 5068), True, 'import theano.tensor as tt\n'), ((5071, 5094), 'theano.tensor.diag', 'tt.diag', (['(self.rate ** 2)'], {}), '(self.rate ** 2)\n', (5078, 5094), True, 'import theano.tensor as tt\n'), ((5308, 5339), 'numpy.zeros', 'np.zeros', (['self.directions.shape'], {}), '(self.directions.shape)\n', (5316, 5339), True, 'import numpy as np\n'), ((840, 853), 'numpy.float32', 'np.float32', (['(0)'], {}), '(0)\n', (850, 853), True, 'import numpy as np\n'), ((1090, 1103), 'theano.tensor.eq', 'tt.eq', (['x1', 'eq'], {}), '(x1, eq)\n', (1095, 1103), True, 'import theano.tensor as tt\n'), ((1104, 1117), 'theano.tensor.eq', 'tt.eq', (['x2', 'eq'], {}), '(x2, eq)\n', (1109, 1117), True, 'import theano.tensor as tt\n'), ((4661, 4703), 'theano.tensor.dot', 'tt.dot', (['self.directions.T', 'self.directions'], {}), '(self.directions.T, self.directions)\n', (4667, 4703), True, 'import theano.tensor as tt\n'), ((4706, 4729), 'theano.tensor.diag', 'tt.diag', (['(self.rate ** 2)'], {}), '(self.rate ** 2)\n', (4713, 4729), True, 'import theano.tensor as tt\n'), ((1373, 1387), 'theano.tensor.eq', 'tt.eq', (['x1', 'eq1'], {}), '(x1, eq1)\n', (1378, 1387), True, 'import theano.tensor as tt\n'), ((1388, 1402), 'theano.tensor.eq', 'tt.eq', (['x2', 'eq2'], {}), '(x2, eq2)\n', (1393, 1402), True, 'import theano.tensor as tt\n'), ((1405, 1419), 'theano.tensor.eq', 'tt.eq', (['x1', 'eq2'], {}), '(x1, eq2)\n', (1410, 1419), True, 'import theano.tensor as tt\n'), ((1420, 1434), 'theano.tensor.eq', 'tt.eq', (['x2', 'eq1'], {}), '(x2, eq1)\n', (1425, 1434), True, 'import theano.tensor as tt\n'), ((2627, 2649), 'numpy.abs', 'np.abs', (['(x[1:] - x[:-1])'], {}), '(x[1:] - x[:-1])\n', (2633, 2649), True, 'import numpy as np\n'), ((3873, 3882), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (3879, 3882), True, 'import numpy as np\n'), ((3890, 3899), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (3896, 3899), True, 'import numpy as np\n'), ((3969, 3978), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (3975, 3978), True, 'import numpy as np\n'), ((2954, 2976), 'numpy.abs', 'np.abs', (['(x[1:] - x[:-1])'], {}), '(x[1:] - x[:-1])\n', (2960, 2976), True, 'import numpy as np\n'), ((3245, 3254), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (3251, 3254), True, 'import numpy as np\n'), ((3943, 3952), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (3949, 3952), True, 'import numpy as np\n'), ((4849, 4858), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (4855, 4858), True, 'import numpy as np\n'), ((5250, 5259), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (5256, 5259), True, 'import numpy as np\n'), ((3218, 3227), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (3224, 3227), True, 'import numpy as np\n'), ((4822, 4831), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (4828, 4831), True, 'import numpy as np\n'), ((5223, 5232), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (5229, 5232), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 10:12:34 2018
@author: kite
"""
"""
完成策略的回测,绘制以沪深300为基准的收益曲线,计算年化收益、最大回撤、夏普比率
主要的方法包括:
ma10_factor:
is_k_up_break_ma10:当日K线是否上穿10日均线
is_k_down_break_ma10:当日K线是否下穿10日均线
compare_close_2_ma_10:工具方法,某日收盘价和当日对应的10日均线的关系
backtest:回测主逻辑方法,从股票池获取股票后,按照每天的交易日一天天回测
"""
import pickle
from pymongo import DESCENDING, ASCENDING
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from stock_pool_strategy import stock_pool, find_out_stocks
from database import DB_CONN
from factor.ma10_factor import is_k_up_break_ma10, is_k_down_break_ma10
from stock_util import get_trading_dates, compute_drawdown, dynamic_max_drawdown, compute_sharpe_ratio, compute_ir
plt.rcParams['figure.figsize'] = [14, 8]
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plt.style.use('ggplot')
SINGLE_DAY_MAX_DROP_RATE = 0.03
MAX_DROP_RATE = 0.1
ATR_WIN = 14
ATR_RATIO = 2
RISK_RATIO = 0.01
def backtest(begin_date, end_date, stop_method=None, pos_method='equal'):
"""
Arguments:
begin_date: 回测开始日期
end_date: 回测结束日期
stop_method : 止损方式
None : 无止损
fixed : 固定比例止损
float : 浮动止损
ATR_float_dynamic : 动态ATR浮动止损
ATR_float_static : 静态ATR浮动止损
pos_method : 头寸分配方式
equal : 均仓分配头寸
atr : 按照ATR分配头寸
Returns:
Account: 数据类型,dict
init_assets : 初始资产, 默认1E7
history_table : 交割单
net_value : 每日净值
final_net_value : 最终日净值
profit : 收益
day_profit : 每日收益
positions : 每日仓位
stop_loss : 止损的方式和止损参数
position_manage : 头寸管理方式和相关参数
"""
# 记录止损时间点
# stop_lose_position_date_current = []
# stop_lose_position_date = []
# 记录回测账户信息
Account = {}
# 仓位相关的初始化
position_manage = {}
if pos_method == 'equal':
single_position = 2E5
position_manage['头寸分配方式'] = '均仓'
Account['position_manage'] = position_manage
elif pos_method == 'atr':
position_manage['头寸分配方式'] = 'ATR分配头寸'
position_manage['ATR_WIN'] = ATR_WIN
position_manage['RISK_RATIO'] = RISK_RATIO
Account['position_manage'] = position_manage
positions = pd.Series() # 记录每日仓位信息
stop_loss = {}
cash = 1E7
init_assets = cash
Account['init_assets'] = init_assets
Account['start'] = begin_date
Account['end'] = end_date
if stop_method is None:
Account['stop_loss'] = '无止损'
elif stop_method == 'fixed':
stop_loss['单日跌幅比例'] = SINGLE_DAY_MAX_DROP_RATE
stop_loss['累计跌幅比例'] = MAX_DROP_RATE
stop_loss['止损方式'] = '固定比例止损'
Account['stop_loss'] = stop_loss
elif stop_method == 'float':
stop_loss['跌幅比例'] = MAX_DROP_RATE
stop_loss['止损方式'] = '浮动止损'
Account['stop_loss'] = stop_loss
elif (stop_method == 'ATR_float_dynamic') or (stop_method == 'ATR_float_static'):
stop_loss['ATR_WIN'] = ATR_WIN
stop_loss['ATR_RATIO'] = ATR_RATIO
stop_loss['止损方式'] = '动态ATR浮动止损'
Account['stop_loss'] = stop_loss
# 时间为key的净值、收益和同期沪深基准
df_profit = pd.DataFrame(columns=['net_value', 'profit', 'hs300'])
# 时间为key的单日收益和同期沪深基准
df_day_profit = pd.DataFrame(columns=['profit', 'hs300'])
all_dates = get_trading_dates(begin_date, end_date)
hs300_begin_value = DB_CONN['daily'].find_one(
{'code': '000300', 'date': all_dates[0], 'index': True},
projection={'close': True})['close']
adjust_dates, date_codes_dict = stock_pool(begin_date, end_date)
last_phase_codes = None
this_phase_codes = None
to_be_sold_codes = set()
to_be_bought_codes = set()
holding_code_dict = dict()
last_date = None
last_entry_dates = {} # 用于记录入场时间
history_table = pd.DataFrame() # 记录 交割单
last_total_capital = 1e7 # 前一天的总资产值,初始值为初始总资产
last_hs300_close = hs300_begin_value # 前一天的HS300值,初始值为第一天的值
net_value = 1 # 净值
count = 0
# 按照日期一步步回测
for _date in all_dates:
print('Backtest at %s.' % _date)
# 当期持仓股票列表
before_sell_holding_codes = list(holding_code_dict.keys())
# 处理复权
if last_date is not None and len(before_sell_holding_codes) > 0:
last_daily_cursor = DB_CONN['daily'].find(
{'code': {'$in': before_sell_holding_codes}, 'date': last_date, 'index': False},
projection={'code': True, 'au_factor': True, '_id':False})
code_last_aufactor_dict = dict()
for last_daily in last_daily_cursor:
code_last_aufactor_dict[last_daily['code']] = last_daily['au_factor']
current_daily_cursor = DB_CONN['daily'].find(
{'code': {'$in': before_sell_holding_codes}, 'date': _date, 'index': False},
projection={'code': True, 'au_factor': True, '_id':False})
for current_daily in current_daily_cursor:
print(current_daily['code'], _date)
current_aufactor = current_daily['au_factor']
code = current_daily['code']
before_volume = holding_code_dict[code]['volume']
if code in code_last_aufactor_dict:
last_aufactor = code_last_aufactor_dict[code]
after_volume = int(before_volume * (current_aufactor / last_aufactor))
holding_code_dict[code]['volume'] = after_volume
print('持仓量调整:%s, %6d, %10.6f, %6d, %10.6f' %
(code, before_volume, last_aufactor, after_volume, current_aufactor))
# 卖出
print('待卖股票池:', to_be_sold_codes, flush=True)
if len(to_be_sold_codes) > 0:
sell_daily_cursor = DB_CONN['daily'].find(
{'code': {'$in': list(to_be_sold_codes)}, 'date': _date, 'index': False, 'is_trading': True},
projection={'open': True, 'code': True, 'low_limit':True}
)
for sell_daily in sell_daily_cursor:
code = sell_daily['code']
# 若开盘价是跌停价不准卖出
open_price = sell_daily['open']
low_limit = sell_daily['low_limit']
if (code in before_sell_holding_codes) & (open_price > low_limit):
holding_stock = holding_code_dict[code]
holding_volume = holding_stock['volume']
sell_price = sell_daily['open']
sell_amount = holding_volume * sell_price
cash += sell_amount
cost = holding_stock['cost']
single_profit = (sell_amount - cost) * 100 / cost
last_entry_dates[code] = None
print('卖出 %s, %6d, %6.2f, %8.2f, %4.2f' %
(code, holding_volume, sell_price, sell_amount, single_profit))
# 记录 交易记录
count += 1
_order = {'datetime':_date, 'code':code, 'price':sell_price,
'amount':-1 * holding_volume, 'cash':cash}
temp = pd.DataFrame(data=_order, index=[count])
history_table = pd.concat([history_table, temp])
del holding_code_dict[code]
to_be_sold_codes.remove(code)
print('卖出后,现金: %10.2f' % cash)
# 买入
print('待买股票池:', to_be_bought_codes, flush=True)
if len(to_be_bought_codes) > 0:
buy_daily_cursor = DB_CONN['daily'].find(
{'code': {'$in': list(to_be_bought_codes)}, 'date': _date, 'index': False, 'is_trading': True},
projection={'code': True, 'open': True, 'high_limit':True}
)
for buy_daily in buy_daily_cursor:
# 若开盘价是涨停价不准买入
open_price = buy_daily['open']
high_limit = buy_daily['high_limit']
code = buy_daily['code']
# ===========================ATR分配头寸 code start=========================
if pos_method == 'atr':
ATR = calc_ATR(code, _date)
single_position = init_assets * RISK_RATIO / (ATR_RATIO * ATR) // 100 * 100
if (cash > single_position) & (open_price < high_limit):
buy_price = buy_daily['open']
volume = int(int(single_position / buy_price) / 100) * 100
buy_amount = buy_price * volume
cash -= buy_amount
holding_code_dict[code] = {
'volume': volume,
'cost': buy_amount,
'last_value': buy_amount}
last_entry_dates[code] = _date
print('买入 %s, %6d, %6.2f, %8.2f' % (code, volume, buy_price, buy_amount))
# 记录 交易记录
count += 1
_order = {'datetime':_date, 'code':code, 'price':buy_price,
'amount': volume, 'cash':cash}
temp = pd.DataFrame(data=_order, index=[count])
history_table = pd.concat([history_table, temp])
print('买入后,现金: %10.2f' % cash)
# 持仓股代码列表
holding_codes = list(holding_code_dict.keys())
# 如果调整日,则获取新一期的股票列表
if _date in adjust_dates:
print('股票池调整日:%s,备选股票列表:' % _date, flush=True)
# 暂存为上期的日期
if this_phase_codes is not None:
last_phase_codes = this_phase_codes
this_phase_codes = date_codes_dict[_date]
print(this_phase_codes, flush=True)
# 找到所有调出股票代码,在第二日开盘时卖出
if last_phase_codes is not None:
out_codes = find_out_stocks(last_phase_codes, this_phase_codes)
for out_code in out_codes:
if out_code in holding_code_dict:
to_be_sold_codes.add(out_code)
# 检查是否有需要第二天卖出的股票
for holding_code in holding_codes:
if is_k_down_break_ma10(holding_code, _date):
to_be_sold_codes.add(holding_code)
if stop_method is not None:
stop_loss_positions(holding_code, _date, last_entry_dates,
to_be_sold_codes, stop_method)
# 检查是否有需要第二天买入的股票
to_be_bought_codes.clear()
if this_phase_codes is not None:
for _code in this_phase_codes:
if _code not in holding_codes and is_k_up_break_ma10(_code, _date):
to_be_bought_codes.add(_code)
# 计算总资产
total_value = 0
holding_daily_cursor = DB_CONN['daily'].find(
{'code': {'$in': holding_codes}, 'date': _date},
projection={'close': True, 'code': True}
)
for holding_daily in holding_daily_cursor:
code = holding_daily['code']
holding_stock = holding_code_dict[code]
value = holding_daily['close'] * holding_stock['volume']
total_value += value
profit = (value - holding_stock['cost']) * 100 / holding_stock['cost']
one_day_profit = (value - holding_stock['last_value']) * 100 / holding_stock['last_value']
holding_stock['last_value'] = value
print('持仓: %s, %10.2f, %4.2f, %4.2f' %
(code, value, profit, one_day_profit))
total_capital = total_value + cash
positions.loc[_date] = total_value / total_capital
hs300_current_value = DB_CONN['daily'].find_one(
{'code': '000300', 'date': _date, 'index': True},
projection={'close': True})['close']
print('收盘后,现金: %10.2f, 总资产: %10.2f' % (cash, total_capital))
last_date = _date
net_value = np.round(total_capital / 1e7, 4)
df_profit.loc[_date] = {
'net_value': np.round(total_capital / 1e7, 4),
'profit': np.round(100 * (total_capital - 1e7) / 1e7, 4),
'hs300': np.round(100 * (hs300_current_value - hs300_begin_value) / hs300_begin_value, 4)
}
# 计算单日收益
df_day_profit.loc[_date] = {
'profit': np.round(100 * (total_capital - last_total_capital) / last_total_capital, 4),
'hs300': np.round(100 * (hs300_current_value - last_hs300_close) / last_hs300_close, 4)
}
# 暂存当日的总资产和HS300,作为下一个交易日计算单日收益的基础
last_total_capital = total_capital
last_hs300_close = hs300_current_value
Account['history_table'] = history_table
Account['net_value'] = df_profit['net_value']
Account['final_net_value'] = net_value
Account['profit'] = df_profit
Account['day_profit'] = df_day_profit
Account['positions'] = positions
return Account
def stop_loss_positions(holding_code, _date, last_entry_dates, to_be_sold_codes, method):
"""
注意,这里回测中的止损逻辑,应当看做成收盘后的处理,因为盘中不可能知道收盘价的!!
1.固定比例止损
满足以下其一就进行全部止损:
1.单日亏损超过3%;
2.累计亏损超过10%
2.固定比例浮动止损:
回看区间 -- 自买入日到当前回测日
条件 -- 回看区间内的最高价下跌超过一定比例, 就进行止损;
3.动态波动率浮动止损:
回看区间 -- 自买入日到当前回测日
条件 -- 回看区间内的最高价下跌, 超过回测日ATR的倍数, 就进行止损;
"""
# 当前收盘价,使用后复权
current_cursor = DB_CONN['daily_hfq'].find_one(
{'code':holding_code, 'date':_date,'index':False})
# 买入时的价格和日期
entry_date = last_entry_dates[holding_code]
current_close = current_cursor['close']
interval_cursor = DB_CONN['daily_hfq'].find(
{'code':holding_code, 'date':{'$gte': entry_date, '$lte': _date}, 'index':False},
projection={'high':True, '_id':False}
)
high = max([x['high'] for x in interval_cursor])
# ===========================固定比例止损 code start=========================
if method == 'fixed':
current_open = current_cursor['open']
entry_daily_cursor = DB_CONN['daily_hfq'].find_one(
{'code':holding_code, 'date':entry_date,'index':False}
)
entry_price = entry_daily_cursor['open']
if ((current_open - current_close) / current_open) > SINGLE_DAY_MAX_DROP_RATE:
to_be_sold_codes.add(holding_code)
elif ((entry_price - current_close) / entry_price) > MAX_DROP_RATE:
to_be_sold_codes.add(holding_code)
# ===========================固定比例浮动止损 code start===================
elif method == 'float':
if (high - current_close) > MAX_DROP_RATE:
to_be_sold_codes.add(holding_code)
# ===========================波动率浮动止损 code start=========================
# 运用实时(动态)波动率浮动止损
elif method == 'ATR_float_dynamic':
ATR = calc_ATR(holding_code, _date)
if ATR is not None:
if (high - current_close) > ATR * ATR_RATIO:
to_be_sold_codes.add(holding_code)
elif method == 'ATR_float_static':
ATR = calc_ATR(holding_code, entry_date)
if ATR is not None:
if (high - current_close) > ATR * ATR_RATIO:
to_be_sold_codes.add(holding_code)
def calc_ATR(code, date):
ATR_cursor = DB_CONN['daily'].find(
{'code':code, 'date':{'$lte': date}, 'index':False},
projection={'open':True, 'high':True, 'low':True, 'close':True, '_id':False},
limit = ATR_WIN+1)
if ATR_cursor is None:
return None
df = pd.DataFrame([r for r in ATR_cursor])
if len(df) != ATR_WIN+1:
return None
df = df.assign(pdc = df['close'].shift(1))
tr = df.apply(lambda x : max( x['high'] - x['low'], abs(x["high"] - x["pdc"]),
abs(x['low'] - x['pdc'])), axis=1)
ATR = tr[- ATR_WIN :].mean()
return ATR
def account_analysis(Account, start, end):
'''
'''
net_value = Account['net_value']
final_net_value = Account['final_net_value']
profit = Account['profit']
day_profit = Account['day_profit']
positions = Account['positions']
print('累积收益', flush=True)
print(profit, flush=True)
print('单日收益', flush=True)
print(day_profit, flush=True)
# 计算最大回撤
# drawdown = compute_drawdown(net_value)
drawdown = dynamic_max_drawdown(net_value)
# 计算年化收益和夏普比率
annual_profit, sharpe_ratio = compute_sharpe_ratio(final_net_value, day_profit)
# 计算信息率
ir = compute_ir(day_profit)
print('回测结果 %s - %s,年化收益: %7.3f,最大回撤:%7.3f,夏普比率:%4.2f,信息率:%4.2f' %
(start, end, annual_profit, drawdown.max(), sharpe_ratio, ir))
# print(np.sort(list(set(stop_lose_position_date))))
# print(np.sort(list(set(stop_lose_position_date_current))))
profit.index = pd.DatetimeIndex(profit.index, name = 'date')
positions.index = pd.DatetimeIndex(positions.index, name = 'date')
drawdown.index = pd.DatetimeIndex(positions.index, name = 'date')
fig, axes = plt.subplots(3, 1, figsize=(16,20))
axes[0] = plt.subplot2grid((5,3), (0,0), colspan=3, rowspan=3)
axes[0].plot(profit.loc[:,['profit', 'hs300']])
plt.setp(axes[0].get_xticklabels(), visible=False)
axes[0].set(title='Backtest Result')
axes[0].legend(['profit', 'hs300'], loc='best')
axes[1] = plt.subplot2grid((5,3), (3,0), colspan=3, sharex=axes[0])
axes[1].plot(positions)
plt.setp(axes[1].get_xticklabels(), visible=False)
axes[1].set_title('Daily Positions')
axes[1].legend(['Positions'], loc='best')
axes[2] = plt.subplot2grid((5,3), (4,0), colspan=3, sharex=axes[0])
axes[2].plot(drawdown)
axes[2].set_title('Dynamic Max Draw Down')
axes[2].legend(['MaxDrawdown'], loc='best')
plt.show()
def save_file(Account):
with open('backtest--001.file', 'wb') as f:
pickle.dump(Account, f)
if __name__ == "__main__":
start = '2015-01-01'
end = '2015-12-31'
daily_hfq_col = DB_CONN['daily_hfq']
if 'code_1_date_1_index_1_is_trading_1' not in daily_hfq_col.index_information().keys():
daily_hfq_col.create_index(
[('code', ASCENDING), ('date', ASCENDING),
('index', ASCENDING), ('is_trading', ASCENDING)]
)
daily_col = DB_CONN['daily']
if 'code_1_date_1_index_1_is_trading_1' not in daily_col.index_information().keys():
daily_col.create_index(
[('code', ASCENDING), ('date', ASCENDING),
('index', ASCENDING), ('is_trading', ASCENDING)]
)
Account = backtest(start, end, 'fixed')
account_analysis(Account, start, end)
|
[
"pandas.Series",
"pickle.dump",
"matplotlib.pyplot.show",
"stock_pool_strategy.find_out_stocks",
"pandas.DatetimeIndex",
"matplotlib.pyplot.style.use",
"stock_util.get_trading_dates",
"factor.ma10_factor.is_k_down_break_ma10",
"stock_util.dynamic_max_drawdown",
"factor.ma10_factor.is_k_up_break_ma10",
"stock_util.compute_ir",
"stock_pool_strategy.stock_pool",
"pandas.DataFrame",
"matplotlib.pyplot.subplot2grid",
"pandas.concat",
"matplotlib.pyplot.subplots",
"numpy.round",
"stock_util.compute_sharpe_ratio"
] |
[((886, 909), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (899, 909), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2376), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2374, 2376), True, 'import pandas as pd\n'), ((3283, 3337), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['net_value', 'profit', 'hs300']"}), "(columns=['net_value', 'profit', 'hs300'])\n", (3295, 3337), True, 'import pandas as pd\n'), ((3380, 3421), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['profit', 'hs300']"}), "(columns=['profit', 'hs300'])\n", (3392, 3421), True, 'import pandas as pd\n'), ((3439, 3478), 'stock_util.get_trading_dates', 'get_trading_dates', (['begin_date', 'end_date'], {}), '(begin_date, end_date)\n', (3456, 3478), False, 'from stock_util import get_trading_dates, compute_drawdown, dynamic_max_drawdown, compute_sharpe_ratio, compute_ir\n'), ((3678, 3710), 'stock_pool_strategy.stock_pool', 'stock_pool', (['begin_date', 'end_date'], {}), '(begin_date, end_date)\n', (3688, 3710), False, 'from stock_pool_strategy import stock_pool, find_out_stocks\n'), ((3942, 3956), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3954, 3956), True, 'import pandas as pd\n'), ((15893, 15930), 'pandas.DataFrame', 'pd.DataFrame', (['[r for r in ATR_cursor]'], {}), '([r for r in ATR_cursor])\n', (15905, 15930), True, 'import pandas as pd\n'), ((16697, 16728), 'stock_util.dynamic_max_drawdown', 'dynamic_max_drawdown', (['net_value'], {}), '(net_value)\n', (16717, 16728), False, 'from stock_util import get_trading_dates, compute_drawdown, dynamic_max_drawdown, compute_sharpe_ratio, compute_ir\n'), ((16781, 16830), 'stock_util.compute_sharpe_ratio', 'compute_sharpe_ratio', (['final_net_value', 'day_profit'], {}), '(final_net_value, day_profit)\n', (16801, 16830), False, 'from stock_util import get_trading_dates, compute_drawdown, dynamic_max_drawdown, compute_sharpe_ratio, compute_ir\n'), ((16852, 16874), 'stock_util.compute_ir', 'compute_ir', (['day_profit'], {}), '(day_profit)\n', (16862, 16874), False, 'from stock_util import get_trading_dates, compute_drawdown, dynamic_max_drawdown, compute_sharpe_ratio, compute_ir\n'), ((17159, 17202), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['profit.index'], {'name': '"""date"""'}), "(profit.index, name='date')\n", (17175, 17202), True, 'import pandas as pd\n'), ((17227, 17273), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['positions.index'], {'name': '"""date"""'}), "(positions.index, name='date')\n", (17243, 17273), True, 'import pandas as pd\n'), ((17297, 17343), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['positions.index'], {'name': '"""date"""'}), "(positions.index, name='date')\n", (17313, 17343), True, 'import pandas as pd\n'), ((17367, 17403), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(16, 20)'}), '(3, 1, figsize=(16, 20))\n', (17379, 17403), True, 'import matplotlib.pyplot as plt\n'), ((17422, 17476), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(5, 3)', '(0, 0)'], {'colspan': '(3)', 'rowspan': '(3)'}), '((5, 3), (0, 0), colspan=3, rowspan=3)\n', (17438, 17476), True, 'import matplotlib.pyplot as plt\n'), ((17694, 17753), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(5, 3)', '(3, 0)'], {'colspan': '(3)', 'sharex': 'axes[0]'}), '((5, 3), (3, 0), colspan=3, sharex=axes[0])\n', (17710, 17753), True, 'import matplotlib.pyplot as plt\n'), ((17941, 18000), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(5, 3)', '(4, 0)'], {'colspan': '(3)', 'sharex': 'axes[0]'}), '((5, 3), (4, 0), colspan=3, sharex=axes[0])\n', (17957, 18000), True, 'import matplotlib.pyplot as plt\n'), ((18130, 18140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18138, 18140), True, 'import matplotlib.pyplot as plt\n'), ((12221, 12260), 'numpy.round', 'np.round', (['(total_capital / 10000000.0)', '(4)'], {}), '(total_capital / 10000000.0, 4)\n', (12229, 12260), True, 'import numpy as np\n'), ((18231, 18254), 'pickle.dump', 'pickle.dump', (['Account', 'f'], {}), '(Account, f)\n', (18242, 18254), False, 'import pickle\n'), ((10439, 10480), 'factor.ma10_factor.is_k_down_break_ma10', 'is_k_down_break_ma10', (['holding_code', '_date'], {}), '(holding_code, _date)\n', (10459, 10480), False, 'from factor.ma10_factor import is_k_up_break_ma10, is_k_down_break_ma10\n'), ((12312, 12351), 'numpy.round', 'np.round', (['(total_capital / 10000000.0)', '(4)'], {}), '(total_capital / 10000000.0, 4)\n', (12320, 12351), True, 'import numpy as np\n'), ((12368, 12428), 'numpy.round', 'np.round', (['(100 * (total_capital - 10000000.0) / 10000000.0)', '(4)'], {}), '(100 * (total_capital - 10000000.0) / 10000000.0, 4)\n', (12376, 12428), True, 'import numpy as np\n'), ((12437, 12522), 'numpy.round', 'np.round', (['(100 * (hs300_current_value - hs300_begin_value) / hs300_begin_value)', '(4)'], {}), '(100 * (hs300_current_value - hs300_begin_value) / hs300_begin_value, 4\n )\n', (12445, 12522), True, 'import numpy as np\n'), ((12604, 12680), 'numpy.round', 'np.round', (['(100 * (total_capital - last_total_capital) / last_total_capital)', '(4)'], {}), '(100 * (total_capital - last_total_capital) / last_total_capital, 4)\n', (12612, 12680), True, 'import numpy as np\n'), ((12703, 12781), 'numpy.round', 'np.round', (['(100 * (hs300_current_value - last_hs300_close) / last_hs300_close)', '(4)'], {}), '(100 * (hs300_current_value - last_hs300_close) / last_hs300_close, 4)\n', (12711, 12781), True, 'import numpy as np\n'), ((10150, 10201), 'stock_pool_strategy.find_out_stocks', 'find_out_stocks', (['last_phase_codes', 'this_phase_codes'], {}), '(last_phase_codes, this_phase_codes)\n', (10165, 10201), False, 'from stock_pool_strategy import stock_pool, find_out_stocks\n'), ((7350, 7390), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '_order', 'index': '[count]'}), '(data=_order, index=[count])\n', (7362, 7390), True, 'import pandas as pd\n'), ((7427, 7459), 'pandas.concat', 'pd.concat', (['[history_table, temp]'], {}), '([history_table, temp])\n', (7436, 7459), True, 'import pandas as pd\n'), ((9453, 9493), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '_order', 'index': '[count]'}), '(data=_order, index=[count])\n', (9465, 9493), True, 'import pandas as pd\n'), ((9530, 9562), 'pandas.concat', 'pd.concat', (['[history_table, temp]'], {}), '([history_table, temp])\n', (9539, 9562), True, 'import pandas as pd\n'), ((10925, 10957), 'factor.ma10_factor.is_k_up_break_ma10', 'is_k_up_break_ma10', (['_code', '_date'], {}), '(_code, _date)\n', (10943, 10957), False, 'from factor.ma10_factor import is_k_up_break_ma10, is_k_down_break_ma10\n')]
|
import torch
import torch.nn as nn
import numpy as np
import random
from torch import optim
from rl_network import *
class Agent:
"""
학습 에이전트
네트워크 모델을 받고, 학습을 수행함
"""
def __init__(self, num_states, num_actions, network_type, learning_rate, use_rnn=False,
gamma=0.99, capacity=10000, batch_size=16):
self.gamma = gamma
self.network_type = network_type
self.use_rnn = use_rnn
# 신경망 생성
try:
if network_type == 'policy_gradient': # policy gradient
self.network = PolicyGradient(num_states, num_actions, use_rnn=use_rnn)
elif network_type == 'actor_critic':
self.network = ActorCritic(n_in=num_states, n_out=num_actions, use_rnn=use_rnn)
self.large_i = 1 # for update actor
elif network_type == 'dqn':
self.network = DQN(input_size=num_states, output_size=num_actions, use_rnn=use_rnn)
self.target_network = DQN(input_size=num_states, output_size=num_actions, use_rnn=use_rnn)
self.target_network.load_state_dict(self.network.state_dict()) # target Q makes same weights as Q
self.replay_memory = ReplayMemory(capacity, batch_size)
else:
raise Exception('invalid network type {}'.format(network_type))
except Exception as e:
print(e)
# optimizer 생성
self.optimizer = optim.Adam(self.network.parameters(), lr=learning_rate)
def update(self, history=None):
if self.network_type == 'policy_gradient':
assert history is not None
curr_r = 0
loss = torch.zeros(1, 1)
for i in reversed(range(len(history))):
log_prob, reward, entropy = history[i]
curr_r = self.gamma * curr_r + reward
for lgp, etp in zip(log_prob, entropy):
loss -= torch.sum(((curr_r * lgp) + (1e-3 * etp)))
loss /= len(history)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
elif self.network_type == 'actor_critic':
assert history is not None
loss = torch.zeros(1, 1)
log_prob, reward, entropy, value, next_state, done = history[-1]
with torch.no_grad():
if not done:
next_value, _, _ = self.network(next_state)
else:
next_value = 0
delta = reward + self.gamma * next_value - value
for lpb, etp in zip(log_prob, entropy):
loss -= (delta * value + (self.large_i * lpb) + (1e-3 * etp))
self.large_i *= self.gamma
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
elif self.network_type == 'dqn':
loss = torch.zeros(1, 1)
# 저장된 transition 수가 mini_batch 크기보다 작으면 아무 것도 안함
if len(self.replay_memory) < self.replay_memory.batch_size:
return
# 미니배치 생성 (state, action, state_next, reward, done)
transitions = np.array(self.replay_memory.sample(), dtype=object)
state_batch = torch.cat(tuple(transitions[:, 0]))
action_batch = torch.tensor(tuple(transitions[:, 1]))
next_states_batch = torch.cat(tuple(transitions[:, 2]))
reward_batch = torch.tensor(transitions[:, 3].astype(np.float))
done_batch = torch.tensor(tuple(transitions[:, 4]))
# label을 위한 target network 계산
action_target = []
with torch.no_grad():
first, second = self.target_network(next_states_batch)
for act_val in [first, second]:
# if episode terminates at step j + 1 then just reward
max_act_val, _ = torch.max(act_val, dim=1)
target = reward_batch + self.gamma * (~done_batch) * max_act_val
action_target.append(target)
first, second = self.network(state_batch)
for idx, (act_val, target) in enumerate(zip([first, second], action_target)):
action_loss = torch.sum((target -
torch.gather(
act_val,
dim=1,
index=action_batch[:, idx].view(-1, 1))) ** 2)
loss += (action_loss / len(transitions))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
else:
raise Exception('Unknown network type update')
def update_network(self, step, verbose=False):
assert self.network_type == 'dqn'
if verbose:
print('update network, global_step {}'.format(step))
self.target_network.load_state_dict(self.network.state_dict())
def get_action(self, state, *args):
outputs = self.network.get_action(state, *args)
return outputs
class ReplayMemory:
"""
DQN에서 사용할 replay-buffer
"""
def __init__(self, capacity, batch_size):
self.capacity = capacity # 메모리 최대 저장 건수
self.memory = [] # 실제 transition 을 저장할 변수
self.index = 0 # 저장 위치를 가리킬 인덱스 변수
self.batch_size = batch_size
def memorize(self, state, action, state_next, reward, done):
if len(self.memory) < self.capacity:
self.memory.append([state, action, state_next, reward, done])
else:
self.memory[self.index] = [state, action, state_next, reward, done]
self.index = (self.index + 1) % self.capacity
def sample(self):
"""
batch_size 개수만큼 무작위로 저장된 transition 호출
"""
return random.sample(self.memory, self.batch_size)
def reset(self):
"""
replay-buffer 를 초기화
:return:
"""
self.index = 0
self.memory = []
def __len__(self):
return len(self.memory)
def discount_rewards(rewards, gamma):
"""
take 1D float array of rewards and compute discounted reward
"""
reward_shape = rewards.shape
if len(reward_shape) == 1:
discounted_r = np.zeros(shape=(*reward_shape, 1), dtype=np.float)
else:
discounted_r = np.zeros(shape=reward_shape, dtype=np.float)
running_add = 0
for t in reversed(range(0, rewards.size)):
running_add = running_add * gamma + rewards[t]
discounted_r[t] = running_add
return discounted_r
|
[
"random.sample",
"torch.max",
"numpy.zeros",
"torch.sum",
"torch.no_grad",
"torch.zeros"
] |
[((5858, 5901), 'random.sample', 'random.sample', (['self.memory', 'self.batch_size'], {}), '(self.memory, self.batch_size)\n', (5871, 5901), False, 'import random\n'), ((6310, 6360), 'numpy.zeros', 'np.zeros', ([], {'shape': '(*reward_shape, 1)', 'dtype': 'np.float'}), '(shape=(*reward_shape, 1), dtype=np.float)\n', (6318, 6360), True, 'import numpy as np\n'), ((6394, 6438), 'numpy.zeros', 'np.zeros', ([], {'shape': 'reward_shape', 'dtype': 'np.float'}), '(shape=reward_shape, dtype=np.float)\n', (6402, 6438), True, 'import numpy as np\n'), ((1692, 1709), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (1703, 1709), False, 'import torch\n'), ((2242, 2259), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (2253, 2259), False, 'import torch\n'), ((1955, 1992), 'torch.sum', 'torch.sum', (['(curr_r * lgp + 0.001 * etp)'], {}), '(curr_r * lgp + 0.001 * etp)\n', (1964, 1992), False, 'import torch\n'), ((2354, 2369), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2367, 2369), False, 'import torch\n'), ((2918, 2935), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (2929, 2935), False, 'import torch\n'), ((3664, 3679), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3677, 3679), False, 'import torch\n'), ((3912, 3937), 'torch.max', 'torch.max', (['act_val'], {'dim': '(1)'}), '(act_val, dim=1)\n', (3921, 3937), False, 'import torch\n')]
|
import sys
import argparse
import numpy as np
from dataclasses import dataclass
from mchap.application import baseclass
from mchap.application.baseclass import SampleAssemblyError, SAMPLE_ASSEMBLY_ERROR
from mchap.application.arguments import (
CALL_MCMC_PARSER_ARGUMENTS,
collect_call_mcmc_program_arguments,
)
from mchap.calling.classes import CallingMCMC
from mchap.calling.exact import genotype_likelihoods
from mchap.jitutils import natural_log_to_log10
from mchap.io import qual_of_prob
@dataclass
class program(baseclass.program):
mcmc_chains: int = 1
mcmc_steps: int = 1000
mcmc_burn: int = 500
mcmc_incongruence_threshold: float = 0.60
@classmethod
def cli(cls, command):
"""Program initialization from cli command
e.g. `program.cli(sys.argv)`
"""
parser = argparse.ArgumentParser("MCMC haplotype calling")
for arg in CALL_MCMC_PARSER_ARGUMENTS:
arg.add_to(parser)
if len(command) < 3:
parser.print_help()
sys.exit(1)
args = parser.parse_args(command[2:])
# sort argument details
arguments = collect_call_mcmc_program_arguments(args)
return cls(cli_command=command, **arguments)
def call_sample_genotypes(self, data):
"""De novo haplotype assembly of each sample.
Parameters
----------
data : LocusAssemblyData
With sampledata fields: "read_dists_unique", "read_dist_counts".
Returns
-------
data : LocusAssemblyData
With sampledata fields: "alleles", "haplotypes", "GQ", "GPM", "PHPM", "PHQ", "MCI"
and "GL", "GP" if specified.
"""
for field in [
"alleles",
"haplotypes",
"GQ",
"GPM",
"PHPM",
"PHQ",
"MCI",
"GL",
"GP",
"AFP",
]:
data.sampledata[field] = dict()
haplotypes = data.locus.encode_haplotypes()
for sample in data.samples:
# wrap in try clause to pass sample info back with any exception
try:
reads = data.sampledata["read_dists_unique"][sample]
read_counts = read_counts = data.sampledata["read_dist_counts"][sample]
# call haplotypes
trace = (
CallingMCMC(
ploidy=data.sample_ploidy[sample],
haplotypes=haplotypes,
inbreeding=data.sample_inbreeding[sample],
steps=self.mcmc_steps,
chains=self.mcmc_chains,
random_seed=self.random_seed,
)
.fit(
reads=reads,
read_counts=read_counts,
)
.burn(self.mcmc_burn)
)
incongruence = trace.replicate_incongruence(
threshold=self.mcmc_incongruence_threshold
)
posterior = trace.posterior()
alleles, genotype_prob, phenotype_prob = posterior.mode(phenotype=True)
# store variables
data.sampledata["alleles"][sample] = alleles
data.sampledata["haplotypes"][sample] = haplotypes[alleles]
data.sampledata["GQ"][sample] = qual_of_prob(genotype_prob)
data.sampledata["GPM"][sample] = np.round(genotype_prob, self.precision)
data.sampledata["PHPM"][sample] = np.round(
phenotype_prob, self.precision
)
data.sampledata["PHQ"][sample] = qual_of_prob(phenotype_prob)
data.sampledata["MCI"][sample] = incongruence
# posterior allele frequencies if requested
if "AFP" in data.formatfields:
frequencies = np.zeros(len(haplotypes))
alleles, counts = np.unique(trace.genotypes, return_counts=True)
frequencies[alleles] = counts / counts.sum()
data.sampledata["AFP"][sample] = np.round(
frequencies, self.precision
)
# genotype posteriors if requested
if "GP" in data.formatfields:
probabilities = posterior.as_array(len(haplotypes))
data.sampledata["GP"][sample] = np.round(
probabilities, self.precision
)
# genotype likelihoods if requested
if "GL" in data.formatfields:
llks = genotype_likelihoods(
reads=reads,
read_counts=read_counts,
ploidy=data.sample_ploidy[sample],
haplotypes=haplotypes,
)
data.sampledata["GL"][sample] = np.round(
natural_log_to_log10(llks), self.precision
)
# end of try clause for specific sample
except Exception as e:
path = data.sample_bams.get(sample)
message = SAMPLE_ASSEMBLY_ERROR.format(sample=sample, bam=path)
raise SampleAssemblyError(message) from e
return data
|
[
"mchap.io.qual_of_prob",
"mchap.application.baseclass.SAMPLE_ASSEMBLY_ERROR.format",
"numpy.unique",
"argparse.ArgumentParser",
"mchap.jitutils.natural_log_to_log10",
"sys.exit",
"mchap.application.baseclass.SampleAssemblyError",
"mchap.calling.exact.genotype_likelihoods",
"mchap.calling.classes.CallingMCMC",
"mchap.application.arguments.collect_call_mcmc_program_arguments",
"numpy.round"
] |
[((836, 885), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""MCMC haplotype calling"""'], {}), "('MCMC haplotype calling')\n", (859, 885), False, 'import argparse\n'), ((1149, 1190), 'mchap.application.arguments.collect_call_mcmc_program_arguments', 'collect_call_mcmc_program_arguments', (['args'], {}), '(args)\n', (1184, 1190), False, 'from mchap.application.arguments import CALL_MCMC_PARSER_ARGUMENTS, collect_call_mcmc_program_arguments\n'), ((1038, 1049), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1046, 1049), False, 'import sys\n'), ((3449, 3476), 'mchap.io.qual_of_prob', 'qual_of_prob', (['genotype_prob'], {}), '(genotype_prob)\n', (3461, 3476), False, 'from mchap.io import qual_of_prob\n'), ((3526, 3565), 'numpy.round', 'np.round', (['genotype_prob', 'self.precision'], {}), '(genotype_prob, self.precision)\n', (3534, 3565), True, 'import numpy as np\n'), ((3616, 3656), 'numpy.round', 'np.round', (['phenotype_prob', 'self.precision'], {}), '(phenotype_prob, self.precision)\n', (3624, 3656), True, 'import numpy as np\n'), ((3744, 3772), 'mchap.io.qual_of_prob', 'qual_of_prob', (['phenotype_prob'], {}), '(phenotype_prob)\n', (3756, 3772), False, 'from mchap.io import qual_of_prob\n'), ((4041, 4087), 'numpy.unique', 'np.unique', (['trace.genotypes'], {'return_counts': '(True)'}), '(trace.genotypes, return_counts=True)\n', (4050, 4087), True, 'import numpy as np\n'), ((4206, 4243), 'numpy.round', 'np.round', (['frequencies', 'self.precision'], {}), '(frequencies, self.precision)\n', (4214, 4243), True, 'import numpy as np\n'), ((4512, 4551), 'numpy.round', 'np.round', (['probabilities', 'self.precision'], {}), '(probabilities, self.precision)\n', (4520, 4551), True, 'import numpy as np\n'), ((4724, 4845), 'mchap.calling.exact.genotype_likelihoods', 'genotype_likelihoods', ([], {'reads': 'reads', 'read_counts': 'read_counts', 'ploidy': 'data.sample_ploidy[sample]', 'haplotypes': 'haplotypes'}), '(reads=reads, read_counts=read_counts, ploidy=data.\n sample_ploidy[sample], haplotypes=haplotypes)\n', (4744, 4845), False, 'from mchap.calling.exact import genotype_likelihoods\n'), ((5277, 5330), 'mchap.application.baseclass.SAMPLE_ASSEMBLY_ERROR.format', 'SAMPLE_ASSEMBLY_ERROR.format', ([], {'sample': 'sample', 'bam': 'path'}), '(sample=sample, bam=path)\n', (5305, 5330), False, 'from mchap.application.baseclass import SampleAssemblyError, SAMPLE_ASSEMBLY_ERROR\n'), ((5353, 5381), 'mchap.application.baseclass.SampleAssemblyError', 'SampleAssemblyError', (['message'], {}), '(message)\n', (5372, 5381), False, 'from mchap.application.baseclass import SampleAssemblyError, SAMPLE_ASSEMBLY_ERROR\n'), ((5046, 5072), 'mchap.jitutils.natural_log_to_log10', 'natural_log_to_log10', (['llks'], {}), '(llks)\n', (5066, 5072), False, 'from mchap.jitutils import natural_log_to_log10\n'), ((2401, 2599), 'mchap.calling.classes.CallingMCMC', 'CallingMCMC', ([], {'ploidy': 'data.sample_ploidy[sample]', 'haplotypes': 'haplotypes', 'inbreeding': 'data.sample_inbreeding[sample]', 'steps': 'self.mcmc_steps', 'chains': 'self.mcmc_chains', 'random_seed': 'self.random_seed'}), '(ploidy=data.sample_ploidy[sample], haplotypes=haplotypes,\n inbreeding=data.sample_inbreeding[sample], steps=self.mcmc_steps,\n chains=self.mcmc_chains, random_seed=self.random_seed)\n', (2412, 2599), False, 'from mchap.calling.classes import CallingMCMC\n')]
|
import pandas as pd
import os
import click
import numpy as np
opj = os.path.join
@click.command()
@click.option('--datadir', type=str, default='./data/lorenz/bias_experiment')
def main(datadir):
df = pd.read_pickle(opj(datadir, 'results.pkl'))
print(df)
print('\\toprule')
print('$\\sigma_w$ & $\\sigma_y$ & \\multicolumn{1}{c}{$\\sigma$} & \\multicolumn{1}{c}{$\\rho$} & \\multicolumn{1}{c}{$\\beta$} \\\\ \\midrule')
# $1\cdot 10^{-2}$&$1\cdot 10^{-2}$ & 10.017 (0.012) & 28.000 (0.001) & 2.668 (0.001) \\
# $5\cdot 10^{-2}$&$1\cdot 10^{-2}$ & 10.014 (0.018) & 28.002 (0.004) & \outside{2.671 (0.002)} \\
# $1\cdot 10^{-1}$&$1\cdot 10^{-2}$ & 10.051 (0.035) & 27.995 (0.013) & \outside{2.676 (0.003)} \\
# $1\cdot 10^{-2}$&$5\cdot 10^{-2}$ & 10.015 (0.016) & 27.998 (0.002) & 2.667 (0.001) \\
# $1\cdot 10^{-2}$&$1\cdot 10^{-1}$ & 10.011 (0.021) & 27.997 (0.004) & 2.666 (0.001) \\ \bottomrule
# \end{tabular}}
def check(ystd,wstd):
df_ = df.loc[(df['ystd']==ystd) & (df['wstd']==wstd)]
means = df_.mean()
stds = df_.std() / np.sqrt(len(df_))
sigmam = means['sigma']
sigmas = stds['sigma']
rhom = means['rho']
rhos = stds['rho']
betam = means['beta']
betas = stds['beta']
if np.abs(sigmam - 10.) / sigmas > 2.0:
sigmastr = '\\outside{%.3f (%.3f)}' % (sigmam, sigmas)
else:
sigmastr = '%.3f (%.3f)' % (sigmam,sigmas)
if np.abs(rhom - 28.) / rhos > 2.0:
rhostr = '\\outside{%.3f (%.3f)}' % (rhom, rhos)
else:
rhostr = '%.3f (%.3f)' % (rhom, rhos)
if np.abs(betam - 8./3.) / betas > 2.0:
betastr = '\\outside{%.3f (%.3f)}' % (betam, betas)
else:
betastr = '%.3f (%.3f)' % (betam, betas)
line = '$%.3f$ & $%.2f$ & %s & %s & %s \\\\' % (wstd, ystd, sigmastr, rhostr, betastr)
print(line)
ystd = 1e-2
for wstd in [1e-3, 1e-2, 1e-1]:
check(ystd, wstd)
wstd = 1e-3
for ystd in [5e-2, 1e-1]:
check(ystd, wstd)
if __name__ == '__main__':
main()
|
[
"click.option",
"numpy.abs",
"click.command"
] |
[((85, 100), 'click.command', 'click.command', ([], {}), '()\n', (98, 100), False, 'import click\n'), ((102, 178), 'click.option', 'click.option', (['"""--datadir"""'], {'type': 'str', 'default': '"""./data/lorenz/bias_experiment"""'}), "('--datadir', type=str, default='./data/lorenz/bias_experiment')\n", (114, 178), False, 'import click\n'), ((1314, 1335), 'numpy.abs', 'np.abs', (['(sigmam - 10.0)'], {}), '(sigmam - 10.0)\n', (1320, 1335), True, 'import numpy as np\n'), ((1499, 1518), 'numpy.abs', 'np.abs', (['(rhom - 28.0)'], {}), '(rhom - 28.0)\n', (1505, 1518), True, 'import numpy as np\n'), ((1669, 1694), 'numpy.abs', 'np.abs', (['(betam - 8.0 / 3.0)'], {}), '(betam - 8.0 / 3.0)\n', (1675, 1694), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# projectS and projectC were written by <NAME>.
import time
start = time.time()
import argparse
import cv2
import os
import dlib
import numpy as np
np.set_printoptions(precision=2)
import openface
from matplotlib import cm
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
def getRep(bgrImg):
start = time.time()
if bgrImg is None:
raise Exception("Unable to load image/frame")
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
# Get all bounding boxes
bb = align.getAllFaceBoundingBoxes(rgbImg)
if bb is None:
# raise Exception("Unable to find a face: {}".format(imgPath))
return None
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFaces = []
for box in bb:
alignedFaces.append(
align.align(
args.imgDim,
rgbImg,
box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
if alignedFaces is None:
raise Exception("Unable to align the frame")
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
reps = []
for alignedFace in alignedFaces:
reps.append(net.forward(alignedFace))
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
return reps
def projectS(rho, theta, z):
p = np.array([np.sqrt(3.) * rho * (np.cos(theta) + np.sin(theta)) / 2.,
z + 1. + rho * (np.cos(theta) - np.sin(theta)) / 2.])
p += np.array([1.5, 0.5])
p /= 3.
return p
def projectC(x, y, z):
rho = np.sqrt(x**2 + y**2)
if x == 0 and y == 0:
theta = 0
elif x >= 0:
theta = np.arcsin(y / rho)
else:
theta = -np.arcsin(y / rho) + np.pi
return projectS(rho, theta, z)
def draw(pts=[], clrs=[], cSz=400):
def toFrame(x):
return tuple((cSz * x).astype(np.int32))
cFrame = np.full((cSz, cSz, 3), 255, dtype=np.uint8)
for z in np.linspace(-1, 1, 9):
r = np.sqrt(1. - z**2)
last = None
for theta in np.linspace(0, 2 * np.pi, 50):
x = toFrame(projectS(r, theta, z))
if last is not None:
cv2.line(cFrame, x, last, color=(0, 0, 0))
last = x
for x in np.linspace(-1, 1, 9):
last = None
for theta in np.linspace(0, 2 * np.pi, 50):
r = np.sqrt(1. - x**2)
z = r * np.sin(theta)
y = r * np.cos(theta)
# x = toFrame(projectS(r, theta, z))
p = toFrame(projectC(x, y, z))
if last is not None:
cv2.line(cFrame, p, last, color=(0, 0, 0))
last = p
s = 1
x = toFrame(projectC(-s, 0, 0))
y = toFrame(projectC(s, 0, 0))
cv2.line(cFrame, x, y, color=(0, 0, 0), thickness=4)
x = toFrame(projectC(0, -s, 0))
y = toFrame(projectC(0, s, 0))
cv2.line(cFrame, x, y, color=(0, 0, 0), thickness=4)
x = toFrame(projectC(0, 0, -s))
y = toFrame(projectC(0, 0, s))
cv2.line(cFrame, x, y, color=(0, 0, 0), thickness=4)
for pt, c in zip(pts, clrs):
fPt = toFrame(projectC(pt[0], pt[1], pt[2]))
fPt_noz = toFrame(projectC(pt[0], pt[1], 0))
fPt_nozy = toFrame(projectC(pt[0], 0, 0))
fPt_nozx = toFrame(projectC(0, pt[1], 0))
cv2.line(cFrame, fPt, fPt_noz, color=c, thickness=2)
cv2.line(cFrame, fPt_noz, fPt_nozy, color=c, thickness=2)
cv2.line(cFrame, fPt_noz, fPt_nozx, color=c, thickness=2)
cv2.circle(cFrame, fPt, 5, color=c, thickness=-1)
return cFrame
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default='nn4.small2.3d.v1.t7')
# Download the 3D model from:
# https://storage.cmusatyalab.org/openface-models/nn4.small2.3d.v1.t7
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument(
'--captureDevice',
type=int,
default=0,
help='Capture device. 0 for latop webcam and 1 for usb webcam')
# parser.add_argument('--width', type=int, default=640)
# parser.add_argument('--height', type=int, default=480)
parser.add_argument('--width', type=int, default=1280)
parser.add_argument('--height', type=int, default=800)
parser.add_argument('--scale', type=int, default=0.25)
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(
args.networkModel,
imgDim=args.imgDim,
cuda=args.cuda)
# Capture device. Usually 0 will be webcam and 1 will be usb cam.
video_capture = cv2.VideoCapture(args.captureDevice)
video_capture.set(3, args.width)
video_capture.set(4, args.height)
cv2.namedWindow('video', cv2.WINDOW_NORMAL)
class Tracker:
def __init__(self, img, bb, rep):
self.t = dlib.correlation_tracker()
self.t.start_track(img, bb)
self.rep = rep
self.bb = bb
self.pings = 0
def updateRep(self, rep):
self.pings = 0
alpha = 0.9
self.rep = alpha * self.rep + (1. - alpha) * rep
return self.rep
def overlap(self, bb):
p = float(self.bb.intersect(bb).area()) / float(self.bb.area())
return p > 0.3
def ping(self):
self.pings += 1
trackers = []
while True:
ret, frame = video_capture.read()
frame = cv2.flip(frame, 1)
frameSmall = cv2.resize(frame, (int(args.width * args.scale),
int(args.height * args.scale)))
bbs = align.getAllFaceBoundingBoxes(frameSmall)
pts, clrs = [], []
for i, bb in enumerate(bbs):
alignedFace = align.align(96, frameSmall, bb,
landmarkIndices=openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP)
rep = net.forward(alignedFace)
center = bb.center()
centerI = 0.7 * center.x * center.y / \
(args.scale * args.scale * args.width * args.height)
color_np = cm.Set1(centerI)
color_cv = list(np.multiply(color_np[:3], 255))
bl = (int(bb.left() / args.scale), int(bb.bottom() / args.scale))
tr = (int(bb.right() / args.scale), int(bb.top() / args.scale))
cv2.rectangle(frame, bl, tr, color=color_cv, thickness=3)
tracked = False
for i in xrange(len(trackers) - 1, -1, -1):
t = trackers[i]
t.t.update(frame)
if t.overlap(bb):
rep = t.updateRep(rep)
pts.append(rep)
clrs.append(color_cv)
tracked = True
break
if not tracked:
trackers.append(Tracker(frame, bb, rep))
pts.append(rep)
clrs.append(color_cv)
for i in xrange(len(trackers) - 1, -1, -1):
t = trackers[i]
t.ping()
if t.pings > 10:
del trackers[i]
continue
for j in range(i):
if t.t.get_position().intersect(trackers[j].t.get_position()).area() / \
t.t.get_position().area() > 0.4:
del trackers[i]
continue
cSz = 450
sphere = np.copy(frame)
sphere[0:cSz, 0:cSz, :] = draw(pts, clrs, cSz)
alpha = 0.25
beta = 1. - alpha
cv2.putText(sphere, "CMU OpenFace", (50, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 2.,
(0, 0, 0), 1, cv2.cv.CV_AA)
cv2.addWeighted(frame, alpha, sphere, beta, 0.0, frame)
cv2.imshow('video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
|
[
"openface.TorchNeuralNet",
"cv2.rectangle",
"numpy.sqrt",
"matplotlib.cm.Set1",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.sin",
"numpy.multiply",
"argparse.ArgumentParser",
"cv2.line",
"cv2.addWeighted",
"numpy.linspace",
"cv2.waitKey",
"dlib.correlation_tracker",
"cv2.putText",
"cv2.circle",
"numpy.cos",
"cv2.cvtColor",
"openface.AlignDlib",
"time.time",
"cv2.namedWindow",
"numpy.set_printoptions",
"numpy.copy",
"cv2.flip",
"os.path.join",
"numpy.arcsin",
"os.path.realpath",
"cv2.VideoCapture",
"numpy.full"
] |
[((92, 103), 'time.time', 'time.time', ([], {}), '()\n', (101, 103), False, 'import time\n'), ((174, 206), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (193, 206), True, 'import numpy as np\n'), ((316, 353), 'os.path.join', 'os.path.join', (['fileDir', '""".."""', '"""models"""'], {}), "(fileDir, '..', 'models')\n", (328, 353), False, 'import os\n'), ((369, 399), 'os.path.join', 'os.path.join', (['modelDir', '"""dlib"""'], {}), "(modelDir, 'dlib')\n", (381, 399), False, 'import os\n'), ((277, 303), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (293, 303), False, 'import os\n'), ((434, 445), 'time.time', 'time.time', ([], {}), '()\n', (443, 445), False, 'import time\n'), ((537, 576), 'cv2.cvtColor', 'cv2.cvtColor', (['bgrImg', 'cv2.COLOR_BGR2RGB'], {}), '(bgrImg, cv2.COLOR_BGR2RGB)\n', (549, 576), False, 'import cv2\n'), ((773, 784), 'time.time', 'time.time', ([], {}), '()\n', (782, 784), False, 'import time\n'), ((1084, 1095), 'time.time', 'time.time', ([], {}), '()\n', (1093, 1095), False, 'import time\n'), ((1528, 1539), 'time.time', 'time.time', ([], {}), '()\n', (1537, 1539), False, 'import time\n'), ((1968, 1988), 'numpy.array', 'np.array', (['[1.5, 0.5]'], {}), '([1.5, 0.5])\n', (1976, 1988), True, 'import numpy as np\n'), ((2049, 2073), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2056, 2073), True, 'import numpy as np\n'), ((2377, 2420), 'numpy.full', 'np.full', (['(cSz, cSz, 3)', '(255)'], {'dtype': 'np.uint8'}), '((cSz, cSz, 3), 255, dtype=np.uint8)\n', (2384, 2420), True, 'import numpy as np\n'), ((2435, 2456), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(9)'], {}), '(-1, 1, 9)\n', (2446, 2456), True, 'import numpy as np\n'), ((2735, 2756), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(9)'], {}), '(-1, 1, 9)\n', (2746, 2756), True, 'import numpy as np\n'), ((3224, 3276), 'cv2.line', 'cv2.line', (['cFrame', 'x', 'y'], {'color': '(0, 0, 0)', 'thickness': '(4)'}), '(cFrame, x, y, color=(0, 0, 0), thickness=4)\n', (3232, 3276), False, 'import cv2\n'), ((3353, 3405), 'cv2.line', 'cv2.line', (['cFrame', 'x', 'y'], {'color': '(0, 0, 0)', 'thickness': '(4)'}), '(cFrame, x, y, color=(0, 0, 0), thickness=4)\n', (3361, 3405), False, 'import cv2\n'), ((3482, 3534), 'cv2.line', 'cv2.line', (['cFrame', 'x', 'y'], {'color': '(0, 0, 0)', 'thickness': '(4)'}), '(cFrame, x, y, color=(0, 0, 0), thickness=4)\n', (3490, 3534), False, 'import cv2\n'), ((4086, 4111), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4109, 4111), False, 'import argparse\n'), ((5400, 5442), 'openface.AlignDlib', 'openface.AlignDlib', (['args.dlibFacePredictor'], {}), '(args.dlibFacePredictor)\n', (5418, 5442), False, 'import openface\n'), ((5453, 5531), 'openface.TorchNeuralNet', 'openface.TorchNeuralNet', (['args.networkModel'], {'imgDim': 'args.imgDim', 'cuda': 'args.cuda'}), '(args.networkModel, imgDim=args.imgDim, cuda=args.cuda)\n', (5476, 5531), False, 'import openface\n'), ((5648, 5684), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.captureDevice'], {}), '(args.captureDevice)\n', (5664, 5684), False, 'import cv2\n'), ((5765, 5808), 'cv2.namedWindow', 'cv2.namedWindow', (['"""video"""', 'cv2.WINDOW_NORMAL'], {}), "('video', cv2.WINDOW_NORMAL)\n", (5780, 5808), False, 'import cv2\n'), ((8917, 8940), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8938, 8940), False, 'import cv2\n'), ((2470, 2491), 'numpy.sqrt', 'np.sqrt', (['(1.0 - z ** 2)'], {}), '(1.0 - z ** 2)\n', (2477, 2491), True, 'import numpy as np\n'), ((2530, 2559), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (2541, 2559), True, 'import numpy as np\n'), ((2799, 2828), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (2810, 2828), True, 'import numpy as np\n'), ((3783, 3835), 'cv2.line', 'cv2.line', (['cFrame', 'fPt', 'fPt_noz'], {'color': 'c', 'thickness': '(2)'}), '(cFrame, fPt, fPt_noz, color=c, thickness=2)\n', (3791, 3835), False, 'import cv2\n'), ((3844, 3901), 'cv2.line', 'cv2.line', (['cFrame', 'fPt_noz', 'fPt_nozy'], {'color': 'c', 'thickness': '(2)'}), '(cFrame, fPt_noz, fPt_nozy, color=c, thickness=2)\n', (3852, 3901), False, 'import cv2\n'), ((3910, 3967), 'cv2.line', 'cv2.line', (['cFrame', 'fPt_noz', 'fPt_nozx'], {'color': 'c', 'thickness': '(2)'}), '(cFrame, fPt_noz, fPt_nozx, color=c, thickness=2)\n', (3918, 3967), False, 'import cv2\n'), ((3976, 4025), 'cv2.circle', 'cv2.circle', (['cFrame', 'fPt', '(5)'], {'color': 'c', 'thickness': '(-1)'}), '(cFrame, fPt, 5, color=c, thickness=-1)\n', (3986, 4025), False, 'import cv2\n'), ((6496, 6514), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (6504, 6514), False, 'import cv2\n'), ((8445, 8459), 'numpy.copy', 'np.copy', (['frame'], {}), '(frame)\n', (8452, 8459), True, 'import numpy as np\n'), ((8570, 8685), 'cv2.putText', 'cv2.putText', (['sphere', '"""CMU OpenFace"""', '(50, 30)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', '(2.0)', '(0, 0, 0)', '(1)', 'cv2.cv.CV_AA'], {}), "(sphere, 'CMU OpenFace', (50, 30), cv2.\n FONT_HERSHEY_COMPLEX_SMALL, 2.0, (0, 0, 0), 1, cv2.cv.CV_AA)\n", (8581, 8685), False, 'import cv2\n'), ((8728, 8783), 'cv2.addWeighted', 'cv2.addWeighted', (['frame', 'alpha', 'sphere', 'beta', '(0.0)', 'frame'], {}), '(frame, alpha, sphere, beta, 0.0, frame)\n', (8743, 8783), False, 'import cv2\n'), ((8792, 8818), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'frame'], {}), "('video', frame)\n", (8802, 8818), False, 'import cv2\n'), ((2147, 2165), 'numpy.arcsin', 'np.arcsin', (['(y / rho)'], {}), '(y / rho)\n', (2156, 2165), True, 'import numpy as np\n'), ((2846, 2867), 'numpy.sqrt', 'np.sqrt', (['(1.0 - x ** 2)'], {}), '(1.0 - x ** 2)\n', (2853, 2867), True, 'import numpy as np\n'), ((4249, 4316), 'os.path.join', 'os.path.join', (['dlibModelDir', '"""shape_predictor_68_face_landmarks.dat"""'], {}), "(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')\n", (4261, 4316), False, 'import os\n'), ((5893, 5919), 'dlib.correlation_tracker', 'dlib.correlation_tracker', ([], {}), '()\n', (5917, 5919), False, 'import dlib\n'), ((7158, 7174), 'matplotlib.cm.Set1', 'cm.Set1', (['centerI'], {}), '(centerI)\n', (7165, 7174), False, 'from matplotlib import cm\n'), ((7402, 7459), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'bl', 'tr'], {'color': 'color_cv', 'thickness': '(3)'}), '(frame, bl, tr, color=color_cv, thickness=3)\n', (7415, 7459), False, 'import cv2\n'), ((2657, 2699), 'cv2.line', 'cv2.line', (['cFrame', 'x', 'last'], {'color': '(0, 0, 0)'}), '(cFrame, x, last, color=(0, 0, 0))\n', (2665, 2699), False, 'import cv2\n'), ((2885, 2898), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2891, 2898), True, 'import numpy as np\n'), ((2919, 2932), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2925, 2932), True, 'import numpy as np\n'), ((3074, 3116), 'cv2.line', 'cv2.line', (['cFrame', 'p', 'last'], {'color': '(0, 0, 0)'}), '(cFrame, p, last, color=(0, 0, 0))\n', (3082, 3116), False, 'import cv2\n'), ((7203, 7233), 'numpy.multiply', 'np.multiply', (['color_np[:3]', '(255)'], {}), '(color_np[:3], 255)\n', (7214, 7233), True, 'import numpy as np\n'), ((8831, 8845), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8842, 8845), False, 'import cv2\n'), ((738, 749), 'time.time', 'time.time', ([], {}), '()\n', (747, 749), False, 'import time\n'), ((1049, 1060), 'time.time', 'time.time', ([], {}), '()\n', (1058, 1060), False, 'import time\n'), ((1493, 1504), 'time.time', 'time.time', ([], {}), '()\n', (1502, 1504), False, 'import time\n'), ((1741, 1752), 'time.time', 'time.time', ([], {}), '()\n', (1750, 1752), False, 'import time\n'), ((2193, 2211), 'numpy.arcsin', 'np.arcsin', (['(y / rho)'], {}), '(y / rho)\n', (2202, 2211), True, 'import numpy as np\n'), ((1829, 1841), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (1836, 1841), True, 'import numpy as np\n'), ((1850, 1863), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1856, 1863), True, 'import numpy as np\n'), ((1866, 1879), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1872, 1879), True, 'import numpy as np\n'), ((1921, 1934), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1927, 1934), True, 'import numpy as np\n'), ((1937, 1950), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1943, 1950), True, 'import numpy as np\n')]
|
# The dataset code has been adapted from:
# https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
# from https://github.com/pytorch/tutorials
# which has been distributed under the following license:
################################################################################
# BSD 3-Clause License
#
# Copyright (c) 2017, Pytorch contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
# For the Avalanche data loader adaptation:
################################################################################
# Copyright (c) 2022 ContinualAI #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 21-03-2022 #
# Author: <NAME> #
# #
# E-mail: <EMAIL> #
# Website: www.continualai.org #
################################################################################
from pathlib import Path
from typing import Union
import numpy as np
import torch
from PIL import Image
from torchvision.datasets.folder import default_loader
from avalanche.benchmarks.datasets import (
SimpleDownloadableDataset,
default_dataset_location,
)
from avalanche.benchmarks.datasets.penn_fudan.penn_fudan_data import (
penn_fudan_data,
)
def default_mask_loader(mask_path):
return Image.open(mask_path)
class PennFudanDataset(SimpleDownloadableDataset):
"""
The Penn-Fudan Pedestrian detection and segmentation dataset
Adapted from the "TorchVision Object Detection Finetuning Tutorial":
https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
"""
def __init__(
self,
root: Union[str, Path] = None,
*,
transform=None,
loader=default_loader,
mask_loader=default_mask_loader,
download=True
):
"""
Creates an instance of the Penn-Fudan dataset.
:param root: The directory where the dataset can be found or downloaded.
Defaults to None, which means that the default location for
"pennfudanped" will be used.
:param transform: The transformation to apply to (img, annotations)
values.
:param loader: The image loader to use.
:param mask_loader: The mask image loader to use.
:param download: If True, the dataset will be downloaded if needed.
"""
if root is None:
root = default_dataset_location("pennfudanped")
self.imgs = None
self.masks = None
self.targets = None
self.transform = transform
self.loader = loader
self.mask_loader = mask_loader
super().__init__(
root,
penn_fudan_data[0],
penn_fudan_data[1],
download=download,
verbose=True,
)
self._load_dataset()
def _load_metadata(self):
# load all image files, sorting them to
# ensure that they are aligned
self.imgs = (self.root / "PennFudanPed" / "PNGImages").iterdir()
self.masks = (self.root / "PennFudanPed" / "PedMasks").iterdir()
self.imgs = list(sorted(self.imgs))
self.masks = list(sorted(self.masks))
self.targets = [self.make_targets(i) for i in range(len(self.imgs))]
return Path(self.imgs[0]).exists() and Path(self.masks[0]).exists()
def make_targets(self, idx):
# load images and masks
mask_path = self.masks[idx]
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = self.mask_loader(mask_path)
# convert the PIL Image into a numpy array
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
return target
def __getitem__(self, idx):
target = self.targets[idx]
img_path = self.imgs[idx]
img = self.loader(img_path)
if self.transform is not None:
img, target = self.transform(img, target)
return img, target
def __len__(self):
return len(self.imgs)
if __name__ == "__main__":
# this little example script can be used to visualize the first image
# loaded from the dataset.
from torch.utils.data.dataloader import DataLoader
import matplotlib.pyplot as plt
from torchvision import transforms
import torch
train_data = PennFudanDataset(
transform=lambda im, ann: (transforms.ToTensor()(im), ann)
)
dataloader = DataLoader(train_data, batch_size=1)
for batch_data in dataloader:
x, y = batch_data
plt.imshow(transforms.ToPILImage()(torch.squeeze(x)))
plt.show()
print(x.shape)
print(y)
break
__all__ = ["PennFudanDataset"]
|
[
"torch.squeeze",
"torch.as_tensor",
"PIL.Image.open",
"numpy.unique",
"matplotlib.pyplot.show",
"avalanche.benchmarks.datasets.default_dataset_location",
"numpy.where",
"torchvision.transforms.ToPILImage",
"pathlib.Path",
"torch.utils.data.dataloader.DataLoader",
"numpy.max",
"numpy.array",
"torch.tensor",
"numpy.min",
"torchvision.transforms.ToTensor",
"torch.zeros",
"torch.ones"
] |
[((3270, 3291), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (3280, 3291), False, 'from PIL import Image\n'), ((7804, 7840), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['train_data'], {'batch_size': '(1)'}), '(train_data, batch_size=1)\n', (7814, 7840), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((5686, 5700), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (5694, 5700), True, 'import numpy as np\n'), ((5771, 5786), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (5780, 5786), True, 'import numpy as np\n'), ((6421, 6464), 'torch.as_tensor', 'torch.as_tensor', (['boxes'], {'dtype': 'torch.float32'}), '(boxes, dtype=torch.float32)\n', (6436, 6464), False, 'import torch\n'), ((6516, 6558), 'torch.ones', 'torch.ones', (['(num_objs,)'], {'dtype': 'torch.int64'}), '((num_objs,), dtype=torch.int64)\n', (6526, 6558), False, 'import torch\n'), ((6575, 6616), 'torch.as_tensor', 'torch.as_tensor', (['masks'], {'dtype': 'torch.uint8'}), '(masks, dtype=torch.uint8)\n', (6590, 6616), False, 'import torch\n'), ((6637, 6656), 'torch.tensor', 'torch.tensor', (['[idx]'], {}), '([idx])\n', (6649, 6656), False, 'import torch\n'), ((6794, 6837), 'torch.zeros', 'torch.zeros', (['(num_objs,)'], {'dtype': 'torch.int64'}), '((num_objs,), dtype=torch.int64)\n', (6805, 6837), False, 'import torch\n'), ((7972, 7982), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7980, 7982), True, 'import matplotlib.pyplot as plt\n'), ((4378, 4418), 'avalanche.benchmarks.datasets.default_dataset_location', 'default_dataset_location', (['"""pennfudanped"""'], {}), "('pennfudanped')\n", (4402, 4418), False, 'from avalanche.benchmarks.datasets import SimpleDownloadableDataset, default_dataset_location\n'), ((6149, 6167), 'numpy.where', 'np.where', (['masks[i]'], {}), '(masks[i])\n', (6157, 6167), True, 'import numpy as np\n'), ((6187, 6201), 'numpy.min', 'np.min', (['pos[1]'], {}), '(pos[1])\n', (6193, 6201), True, 'import numpy as np\n'), ((6221, 6235), 'numpy.max', 'np.max', (['pos[1]'], {}), '(pos[1])\n', (6227, 6235), True, 'import numpy as np\n'), ((6255, 6269), 'numpy.min', 'np.min', (['pos[0]'], {}), '(pos[0])\n', (6261, 6269), True, 'import numpy as np\n'), ((6289, 6303), 'numpy.max', 'np.max', (['pos[0]'], {}), '(pos[0])\n', (6295, 6303), True, 'import numpy as np\n'), ((7921, 7944), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (7942, 7944), False, 'from torchvision import transforms\n'), ((7945, 7961), 'torch.squeeze', 'torch.squeeze', (['x'], {}), '(x)\n', (7958, 7961), False, 'import torch\n'), ((5256, 5274), 'pathlib.Path', 'Path', (['self.imgs[0]'], {}), '(self.imgs[0])\n', (5260, 5274), False, 'from pathlib import Path\n'), ((5288, 5307), 'pathlib.Path', 'Path', (['self.masks[0]'], {}), '(self.masks[0])\n', (5292, 5307), False, 'from pathlib import Path\n'), ((7749, 7770), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7768, 7770), False, 'from torchvision import transforms\n')]
|
import sys
import functools
import signal
import select
import socket
import numpy as np
import pickle
import matplotlib.pyplot as plt
import time
import datetime
from multiprocessing import Process, Queue
sys.path.append('../dhmsw/')
import interface
import telemetry_iface_ag
import struct
PLOT = True
headerStruct = struct.Struct('III')
class guiclient(object):
def __init__(self):
self.sock = None
self.displaythread = Process(target=self.DisplayThread)
self.displaythread.daemon = True
self.displayQ = Queue()
self.exit = False
self.maxlen = 150995023
for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, self.signal_handler)
def signal_handler(self, signal, frame):
self.exit = True
self.displayQ.put(None)
def restore_frame(self, data, meta, z):
w, h, compval, val, size, actualsize,ts, gain, ccdtemp = meta
dtidx = 0
for i in range(0, w*h,compval):
z[i] = data[dtidx];
dtidx += 1
def DisplayThread(self):
if PLOT:
f, axes = plt.subplots(sharex=True)
for i in range(1):
#axes[i].imshow(z, extent=[0,2448,0,2050], aspect="auto", cmap='gray')
axes.clear()
#axes.imshow(z, extent=[0,2448,0,2050], aspect="auto", cmap='gray')
reconst_telemetry = telemetry_iface_ag.Reconstruction_Telemetry()
heartbeat_telemetry = telemetry_iface_ag.Heartbeat_Telemetry()
framesource_telemetry = telemetry_iface_ag.Framesource_Telemetry()
datalogger_telemetry = telemetry_iface_ag.Datalogger_Telemetry()
guiserver_telemetry = telemetry_iface_ag.Guiserver_Telemetry()
session_telemetry = telemetry_iface_ag.Session_Telemetry()
hologram_telemetry = telemetry_iface_ag.Hologram_Telemetry()
fouriermask_telemetry = telemetry_iface_ag.Fouriermask_Telemetry()
while True:
msg = self.displayQ.get()
if msg is None:
break
#print("**************** Display Thread")
msgid, srcid, totalbytes= headerStruct.unpack(msg[0:struct.calcsize(headerStruct.format)])
meta = (msgid, srcid, totalbytes)
offset = struct.calcsize(headerStruct.format)
#print('offset=%d'%(offset))
data = None
if srcid == interface.SRCID_TELEMETRY_RECONSTRUCTION:
print('Received RECONSTRUCTION Telemetry')
data = reconst_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_HEARTBEAT:
data = heartbeat_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_FRAMESOURCE:
data = framesource_telemetry.unpack_from(msg, offset=offset)
print('Framesource state: ', data.state)
elif srcid == interface.SRCID_TELEMETRY_SESSION:
data = session_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_DATALOGGER:
data = datalogger_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_HOLOGRAM:
data = hologram_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_GUISERVER:
data = guiserver_telemetry.unpack_from(msg, offset=offset)
elif srcid == interface.SRCID_TELEMETRY_FOURIERMASK:
data = fouriermask_telemetry.unpack_from(msg, offset=offset)
if PLOT:
mask = np.frombuffer(data.mask, dtype=np.uint8).reshape((2048,2048))
#mask = np.asarray(data.mask,dtype=np.int8).reshape((2048,2048))
axes.clear()
#axes.imshow(mask[:,:], extent=[2048,0,0,2048], aspect="auto")
axes.imshow(mask[:,:], aspect="auto")
plt.suptitle(repr(time.time()))
#axes.set_ylim(axes.get_ylim()[::-1])
plt.draw()
plt.pause(0.001)
else:
print('Unknown Telemetry')
if data and srcid != interface.SRCID_TELEMETRY_HEARTBEAT:
print(time.time(), datetime.datetime.now())
print(data)
pass
print('End of DisplayThread')
def connect_to_server(self, server, port):
#headerStruct = struct.Struct('HHBIIIHH')
totlen = 0
count = 0
### Continous receive of data
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((server, port))
self.readfds = [self.sock]
### Start Display Thread
self.displaythread.start()
length = None
buf = b''
data = b''
msg=b''
lasttime = time.time()
meta = None
totalbytes = 0
while True:
infds, outfds, errfds = select.select(self.readfds, [], [], 5)
if not (infds or outfds or errfds):
continue
if self.exit: break
for s in infds:
if s is self.sock:
### Get as much data as we can
packet = self.sock.recv(255)
if not packet:
self.exit = True
self.displayQ.put_nowait(None)
break
data += packet
datalen = len(data)
#print('len packet= %d'%(len(packet)))
### If he haven't processed the header/meta, then lets.
#if meta is None and datalen > struct.calcsize(headerStruct.format)+25:
if meta is None and datalen >= struct.calcsize(headerStruct.format):
#packet = self.sock.recv(12)
#print("Recieve: %s"%(':'.join("{:02x}".format(ord(c)) for c in packet[0:50])))
msg_id, srcid, totalbytes = headerStruct.unpack(data[0:struct.calcsize(headerStruct.format)])
totalbytes += struct.calcsize(headerStruct.format)
meta = (msg_id, srcid)
#print('msg_id=%d, srcid=%d, totalbytes=%d'%(msg_id, srcid, totalbytes))
if datalen >= totalbytes: ### We have a complete packet stored.
msg = data[:totalbytes]
data = data[totalbytes:]
meta = None
totalbytes = 0
#print('%.2f Hz'%(1/(time.time()-lasttime)))
lasttime = time.time()
#plt.show(block=False)
count+=1
self.displayQ.put_nowait(msg)
#print('Full message received after getting meta: datalen=%d, datalen after=%d'%(datalen, len(data)))
else:
if datalen < totalbytes:
continue
### We have a complete message
msg = data[:totalbytes]
data = data[totalbytes:]
#print('Full message received: datalen=%d, datalen after=%d'%(datalen, len(data)))
meta = None
totalbytes = 0
self.displayQ.put_nowait(msg)
#print('%.2f Hz'%(1/(time.time()-lasttime)))
lasttime = time.time()
count+=1
if self.exit: break
self.sock.close()
if __name__ == "__main__":
a = guiclient()
host= socket.gethostbyname('localhost')
port = 9996
print("Client host: %s: port: %d"%(host, port))
a.connect_to_server(host, port)
|
[
"struct.calcsize",
"telemetry_iface_ag.Framesource_Telemetry",
"multiprocessing.Process",
"telemetry_iface_ag.Session_Telemetry",
"sys.path.append",
"telemetry_iface_ag.Hologram_Telemetry",
"telemetry_iface_ag.Heartbeat_Telemetry",
"telemetry_iface_ag.Datalogger_Telemetry",
"numpy.frombuffer",
"telemetry_iface_ag.Reconstruction_Telemetry",
"select.select",
"matplotlib.pyplot.pause",
"struct.Struct",
"multiprocessing.Queue",
"matplotlib.pyplot.draw",
"time.time",
"socket.gethostbyname",
"signal.signal",
"telemetry_iface_ag.Guiserver_Telemetry",
"socket.socket",
"datetime.datetime.now",
"telemetry_iface_ag.Fouriermask_Telemetry",
"matplotlib.pyplot.subplots"
] |
[((206, 234), 'sys.path.append', 'sys.path.append', (['"""../dhmsw/"""'], {}), "('../dhmsw/')\n", (221, 234), False, 'import sys\n'), ((320, 340), 'struct.Struct', 'struct.Struct', (['"""III"""'], {}), "('III')\n", (333, 340), False, 'import struct\n'), ((7944, 7977), 'socket.gethostbyname', 'socket.gethostbyname', (['"""localhost"""'], {}), "('localhost')\n", (7964, 7977), False, 'import socket\n'), ((445, 479), 'multiprocessing.Process', 'Process', ([], {'target': 'self.DisplayThread'}), '(target=self.DisplayThread)\n', (452, 479), False, 'from multiprocessing import Process, Queue\n'), ((545, 552), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (550, 552), False, 'from multiprocessing import Process, Queue\n'), ((1471, 1516), 'telemetry_iface_ag.Reconstruction_Telemetry', 'telemetry_iface_ag.Reconstruction_Telemetry', ([], {}), '()\n', (1514, 1516), False, 'import telemetry_iface_ag\n'), ((1547, 1587), 'telemetry_iface_ag.Heartbeat_Telemetry', 'telemetry_iface_ag.Heartbeat_Telemetry', ([], {}), '()\n', (1585, 1587), False, 'import telemetry_iface_ag\n'), ((1620, 1662), 'telemetry_iface_ag.Framesource_Telemetry', 'telemetry_iface_ag.Framesource_Telemetry', ([], {}), '()\n', (1660, 1662), False, 'import telemetry_iface_ag\n'), ((1694, 1735), 'telemetry_iface_ag.Datalogger_Telemetry', 'telemetry_iface_ag.Datalogger_Telemetry', ([], {}), '()\n', (1733, 1735), False, 'import telemetry_iface_ag\n'), ((1766, 1806), 'telemetry_iface_ag.Guiserver_Telemetry', 'telemetry_iface_ag.Guiserver_Telemetry', ([], {}), '()\n', (1804, 1806), False, 'import telemetry_iface_ag\n'), ((1835, 1873), 'telemetry_iface_ag.Session_Telemetry', 'telemetry_iface_ag.Session_Telemetry', ([], {}), '()\n', (1871, 1873), False, 'import telemetry_iface_ag\n'), ((1903, 1942), 'telemetry_iface_ag.Hologram_Telemetry', 'telemetry_iface_ag.Hologram_Telemetry', ([], {}), '()\n', (1940, 1942), False, 'import telemetry_iface_ag\n'), ((1975, 2017), 'telemetry_iface_ag.Fouriermask_Telemetry', 'telemetry_iface_ag.Fouriermask_Telemetry', ([], {}), '()\n', (2015, 2017), False, 'import telemetry_iface_ag\n'), ((4739, 4788), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (4752, 4788), False, 'import socket\n'), ((5029, 5040), 'time.time', 'time.time', ([], {}), '()\n', (5038, 5040), False, 'import time\n'), ((708, 747), 'signal.signal', 'signal.signal', (['sig', 'self.signal_handler'], {}), '(sig, self.signal_handler)\n', (721, 747), False, 'import signal\n'), ((1185, 1210), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'sharex': '(True)'}), '(sharex=True)\n', (1197, 1210), True, 'import matplotlib.pyplot as plt\n'), ((2355, 2391), 'struct.calcsize', 'struct.calcsize', (['headerStruct.format'], {}), '(headerStruct.format)\n', (2370, 2391), False, 'import struct\n'), ((5141, 5179), 'select.select', 'select.select', (['self.readfds', '[]', '[]', '(5)'], {}), '(self.readfds, [], [], 5)\n', (5154, 5179), False, 'import select\n'), ((4416, 4427), 'time.time', 'time.time', ([], {}), '()\n', (4425, 4427), False, 'import time\n'), ((4429, 4452), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4450, 4452), False, 'import datetime\n'), ((2249, 2285), 'struct.calcsize', 'struct.calcsize', (['headerStruct.format'], {}), '(headerStruct.format)\n', (2264, 2285), False, 'import struct\n'), ((6315, 6351), 'struct.calcsize', 'struct.calcsize', (['headerStruct.format'], {}), '(headerStruct.format)\n', (6330, 6351), False, 'import struct\n'), ((7780, 7791), 'time.time', 'time.time', ([], {}), '()\n', (7789, 7791), False, 'import time\n'), ((5965, 6001), 'struct.calcsize', 'struct.calcsize', (['headerStruct.format'], {}), '(headerStruct.format)\n', (5980, 6001), False, 'import struct\n'), ((6886, 6897), 'time.time', 'time.time', ([], {}), '()\n', (6895, 6897), False, 'import time\n'), ((6238, 6274), 'struct.calcsize', 'struct.calcsize', (['headerStruct.format'], {}), '(headerStruct.format)\n', (6253, 6274), False, 'import struct\n'), ((4190, 4200), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4198, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4221, 4237), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (4230, 4237), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3779), 'numpy.frombuffer', 'np.frombuffer', (['data.mask'], {'dtype': 'np.uint8'}), '(data.mask, dtype=np.uint8)\n', (3752, 3779), True, 'import numpy as np\n'), ((4098, 4109), 'time.time', 'time.time', ([], {}), '()\n', (4107, 4109), False, 'import time\n')]
|
# coding: utf-8
import numpy as np
from typing import List, Union
from collections import OrderedDict
from datetime import datetime
from .objects import Direction, BI, FakeBI, Signal
from .enum import Freq
from .utils.ta import MACD, SMA, KDJ
from .cobra.utils import kdj_gold_cross
from . import analyze
def check_three_bi(bis: List[Union[BI, FakeBI]], freq: Freq, di: int = 1) -> Signal:
"""识别由远及近的三笔形态
:param freq: K线周期,也可以称为级别
:param bis: 由远及近的三笔形态
:param di: 最近一笔为倒数第i笔
:return:
"""
di_name = f"倒{di}笔"
v = Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='其他', v2='其他', v3='其他')
if len(bis) != 3:
return v
bi1, bi2, bi3 = bis
if not (bi1.direction == bi3.direction):
print(f"1,3 的 direction 不一致,无法识别三笔形态,{bi3}")
return v
assert bi3.direction in [Direction.Down, Direction.Up], "direction 的取值错误"
if bi3.direction == Direction.Down:
# 向下不重合
if bi3.low > bi1.high:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向下不重合')
# 向下奔走型
if bi2.low < bi3.low < bi1.high < bi2.high:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向下奔走型')
# 向下收敛
if bi1.high > bi3.high and bi1.low < bi3.low:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向下收敛')
if bi1.high < bi3.high and bi1.low > bi3.low:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向下扩张')
if bi3.low < bi1.low and bi3.high < bi1.high:
if bi3.power < bi1.power:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向下盘背')
else:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向下无背')
if bi3.direction == Direction.Up:
if bi3.high < bi1.low:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向上不重合')
if bi2.low < bi1.low < bi3.high < bi2.high:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向上奔走型')
if bi1.high > bi3.high and bi1.low < bi3.low:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向上收敛')
if bi1.high < bi3.high and bi1.low > bi3.low:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向上扩张')
if bi3.low > bi1.low and bi3.high > bi1.high:
if bi3.power < bi1.power:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向上盘背')
else:
return Signal(k1=freq.value, k2=di_name, k3='三笔形态', v1='向上无背')
return v
def check_five_bi(bis: List[Union[BI, FakeBI]], freq: Freq, di: int = 1) -> Signal:
"""识别五笔形态
:param freq: K线周期,也可以称为级别
:param bis: 由远及近的五笔
:param di: 最近一笔为倒数第i笔
:return:
"""
di_name = f"倒{di}笔"
v = Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='其他', v2='其他', v3='其他')
if len(bis) != 5:
return v
bi1, bi2, bi3, bi4, bi5 = bis
if not (bi1.direction == bi3.direction == bi5.direction):
print(f"1,3,5 的 direction 不一致,无法识别五段形态;{bi1}{bi3}{bi5}")
return v
direction = bi1.direction
max_high = max([x.high for x in bis])
min_low = min([x.low for x in bis])
assert direction in [Direction.Down, Direction.Up], "direction 的取值错误"
if direction == Direction.Down:
# aAb式底背驰
if min(bi2.high, bi4.high) > max(bi2.low, bi4.low) and max_high == bi1.high and bi5.power < bi1.power:
if (min_low == bi3.low and bi5.low < bi1.low) or (min_low == bi5.low):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='底背驰', v2='五笔aAb式')
# 类趋势底背驰
if max_high == bi1.high and min_low == bi5.low and bi4.high < bi2.low and bi5.power < max(bi3.power, bi1.power):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='底背驰', v2='五笔类趋势')
# 上颈线突破
if (min_low == bi1.low and bi5.high > min(bi1.high, bi2.high) > bi5.low > bi1.low) \
or (min_low == bi3.low and bi5.high > bi3.high > bi5.low > bi3.low):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='上颈线突破', v2='五笔')
# 五笔三买,要求bi5.high是最高点
if max_high == bi5.high > bi5.low > max(bi1.high, bi3.high) \
> min(bi1.high, bi3.high) > max(bi1.low, bi3.low) > min_low:
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='类三买', v2='五笔')
if direction == Direction.Up:
# aAb式类一卖
if min(bi2.high, bi4.high) > max(bi2.low, bi4.low) and min_low == bi1.low and bi5.power < bi1.power:
if (max_high == bi3.high and bi5.high > bi1.high) or (max_high == bi5.high):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='顶背驰', v2='五笔aAb式')
# 类趋势类一卖
if min_low == bi1.low and max_high == bi5.high and bi5.power < max(bi1.power, bi3.power) and bi4.low > bi2.high:
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='顶背驰', v2='五笔类趋势')
# 下颈线突破
if (max_high == bi1.high and bi5.low < max(bi1.low, bi2.low) < bi5.high < max_high) \
or (max_high == bi3.high and bi5.low < bi3.low < bi5.high < max_high):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='下颈线突破', v2='五笔')
# 五笔三卖,要求bi5.low是最低点
if min_low == bi5.low < bi5.high < min(bi1.low, bi3.low) \
< max(bi1.low, bi3.low) < min(bi1.high, bi3.high) < max_high:
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='类三卖', v2='五笔')
return v
def check_seven_bi(bis: List[Union[BI, FakeBI]], freq: Freq, di: int = 1) -> Signal:
"""识别七笔形态
:param freq: K线周期,也可以称为级别
:param bis: 由远及近的七笔
:param di: 最近一笔为倒数第i笔
"""
di_name = f"倒{di}笔"
v = Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='其他', v2='其他', v3='其他')
if len(bis) != 7:
return v
bi1, bi2, bi3, bi4, bi5, bi6, bi7 = bis
max_high = max([x.high for x in bis])
min_low = min([x.low for x in bis])
direction = bi7.direction
assert direction in [Direction.Down, Direction.Up], "direction 的取值错误"
if direction == Direction.Down:
if bi1.high == max_high and bi7.low == min_low:
# aAbcd式底背驰
if min(bi2.high, bi4.high) > max(bi2.low, bi4.low) > bi6.high and bi7.power < bi5.power:
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='底背驰', v2='七笔aAbcd式')
# abcAd式底背驰
if bi2.low > min(bi4.high, bi6.high) > max(bi4.low, bi6.low) and bi7.power < (bi1.high - bi3.low):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='底背驰', v2='七笔abcAd式')
# aAb式底背驰
if min(bi2.high, bi4.high, bi6.high) > max(bi2.low, bi4.low, bi6.low) and bi7.power < bi1.power:
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='底背驰', v2='七笔aAb式')
# 类趋势底背驰
if bi2.low > bi4.high and bi4.low > bi6.high and bi7.power < max(bi5.power, bi3.power, bi1.power):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='底背驰', v2='七笔类趋势')
# 向上中枢完成
if bi4.low == min_low and min(bi1.high, bi3.high) > max(bi1.low, bi3.low) \
and min(bi5.high, bi7.high) > max(bi5.low, bi7.low) \
and max(bi4.high, bi6.high) > min(bi3.high, bi4.high):
if max(bi1.low, bi3.low) < max(bi5.high, bi7.high):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='向上中枢完成', v2='七笔')
# 七笔三买:1~3构成中枢,最低点在1~3,最高点在5~7,5~7的最低点大于1~3的最高点
if min(bi1.low, bi3.low) == min_low and max(bi5.high, bi7.high) == max_high \
and min(bi5.low, bi7.low) > max(bi1.high, bi3.high) \
and min(bi1.high, bi3.high) > max(bi1.low, bi3.low):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='类三买', v2='七笔')
if direction == Direction.Up:
# 顶背驰
if bi1.low == min_low and bi7.high == max_high:
# aAbcd式顶背驰
if bi6.low > min(bi2.high, bi4.high) > max(bi2.low, bi4.low) and bi7.power < bi5.power:
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='顶背驰', v2='七笔aAbcd式')
# abcAd式顶背驰
if min(bi4.high, bi6.high) > max(bi4.low, bi6.low) > bi2.high and bi7.power < (bi3.high - bi1.low):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='顶背驰', v2='七笔abcAd式')
# aAb式顶背驰
if min(bi2.high, bi4.high, bi6.high) > max(bi2.low, bi4.low, bi6.low) and bi7.power < bi1.power:
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='顶背驰', v2='七笔aAb式')
# 类趋势顶背驰
if bi2.high < bi4.low and bi4.high < bi6.low and bi7.power < max(bi5.power, bi3.power, bi1.power):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='顶背驰', v2='七笔类趋势')
# 向下中枢完成
if bi4.high == max_high and min(bi1.high, bi3.high) > max(bi1.low, bi3.low) \
and min(bi5.high, bi7.high) > max(bi5.low, bi7.low) \
and min(bi4.low, bi6.low) < max(bi3.low, bi4.low):
if min(bi1.high, bi3.high) > min(bi5.low, bi7.low):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='向下中枢完成', v2='七笔')
# 七笔三卖:1~3构成中枢,最高点在1~3,最低点在5~7,5~7的最高点小于1~3的最低点
if min(bi5.low, bi7.low) == min_low and max(bi1.high, bi3.high) == max_high \
and max(bi7.high, bi5.high) < min(bi1.low, bi3.low) \
and min(bi1.high, bi3.high) > max(bi1.low, bi3.low):
return Signal(k1=freq.value, k2=di_name, k3='基础形态', v1='类三卖', v2='七笔')
return v
def check_nine_bi(bis: List[Union[BI, FakeBI]], freq: Freq, di: int = 1) -> Signal:
"""识别九笔形态
:param freq: K线周期,也可以称为级别
:param bis: 由远及近的九笔
:param di: 最近一笔为倒数第i笔
"""
di_name = f"倒{di}笔"
v = Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='其他', v2='其他', v3='其他')
if len(bis) != 9:
return v
direction = bis[-1].direction
bi1, bi2, bi3, bi4, bi5, bi6, bi7, bi8, bi9 = bis
max_high = max([x.high for x in bis])
min_low = min([x.low for x in bis])
assert direction in [Direction.Down, Direction.Up], "direction 的取值错误"
if direction == Direction.Down:
if min_low == bi9.low and max_high == bi1.high:
# aAb式类一买
if min(bi2.high, bi4.high, bi6.high, bi8.high) > max(bi2.low, bi4.low, bi6.low, bi8.low) \
and bi9.power < bi1.power and bi3.low >= bi1.low and bi7.high <= bi9.high:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2='九笔aAb式')
# aAbcd式类一买
if min(bi2.high, bi4.high, bi6.high) > max(bi2.low, bi4.low, bi6.low) > bi8.high \
and bi9.power < bi7.power:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2='九笔aAbcd式')
# ABC式类一买
if bi3.low < bi1.low and bi7.high > bi9.high \
and min(bi4.high, bi6.high) > max(bi4.low, bi6.low) \
and (bi1.high - bi3.low) > (bi7.high - bi9.low):
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2='九笔ABC式')
# 类趋势一买
if bi8.high < bi6.low < bi6.high < bi4.low < bi4.high < bi2.low \
and bi9.power < max([bi1.power, bi3.power, bi5.power, bi7.power]):
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2='九笔类趋势')
# 九笔类一买(2~4构成中枢A,6~8构成中枢B,9背驰)
if max_high == max(bi1.high, bi3.high) and min_low == bi9.low \
and min(bi2.high, bi4.high) > max(bi2.low, bi4.low) \
and min(bi2.low, bi4.low) > max(bi6.high, bi8.high) \
and min(bi6.high, bi8.high) > max(bi6.low, bi8.low) \
and bi9.power < bi5.power:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2='九笔aAbBc式')
# 类三买(1357构成中枢,最低点在3或5)
if max_high == bi9.high > bi9.low \
> max([x.high for x in [bi1, bi3, bi5, bi7]]) \
> min([x.high for x in [bi1, bi3, bi5, bi7]]) \
> max([x.low for x in [bi1, bi3, bi5, bi7]]) \
> min([x.low for x in [bi3, bi5]]) == min_low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类三买', v2='九笔GG三买')
# 类三买(357构成中枢,8的力度小于2,9回调不跌破GG构成三买)
if bi8.power < bi2.power and max_high == bi9.high > bi9.low \
> max([x.high for x in [bi3, bi5, bi7]]) \
> min([x.high for x in [bi3, bi5, bi7]]) \
> max([x.low for x in [bi3, bi5, bi7]]) > bi1.low == min_low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类三买', v2='九笔GG三买')
if min_low == bi5.low and max_high == bi1.high and bi4.high < bi2.low: # 前五笔构成向下类趋势
zd = max([x.low for x in [bi5, bi7]])
zg = min([x.high for x in [bi5, bi7]])
gg = max([x.high for x in [bi5, bi7]])
if zg > zd and bi8.high > gg: # 567构成中枢,且8的高点大于gg
if bi9.low > zg:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类三买', v2='九笔ZG三买')
# 类二买
if bi9.high > gg > zg > bi9.low > zd:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类二买', v2='九笔')
if direction == Direction.Up:
if max_high == bi9.high and min_low == bi1.low:
# aAbBc式类一卖
if bi6.low > min(bi2.high, bi4.high) > max(bi2.low, bi4.low) \
and min(bi6.high, bi8.high) > max(bi6.low, bi8.low) \
and max(bi2.high, bi4.high) < min(bi6.low, bi8.low) \
and bi9.power < bi5.power:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2='九笔aAbBc式')
# aAb式类一卖
if min(bi2.high, bi4.high, bi6.high, bi8.high) > max(bi2.low, bi4.low, bi6.low, bi8.low) \
and bi9.power < bi1.power and bi3.high <= bi1.high and bi7.low >= bi9.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2='九笔aAb式')
# aAbcd式类一卖
if bi8.low > min(bi2.high, bi4.high, bi6.high) > max(bi2.low, bi4.low, bi6.low) \
and bi9.power < bi7.power:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2='九笔aAbcd式')
# ABC式类一卖
if bi3.high > bi1.high and bi7.low < bi9.low \
and min(bi4.high, bi6.high) > max(bi4.low, bi6.low) \
and (bi3.high - bi1.low) > (bi9.high - bi7.low):
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2='九笔ABC式')
# 类趋势一卖
if bi8.low > bi6.high > bi6.low > bi4.high > bi4.low > bi2.high \
and bi9.power < max([bi1.power, bi3.power, bi5.power, bi7.power]):
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2='九笔类趋势')
# 九笔三卖
if max_high == bi1.high and min_low == bi9.low \
and bi9.high < max([x.low for x in [bi3, bi5, bi7]]) < min([x.high for x in [bi3, bi5, bi7]]):
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类三卖', v2='九笔')
if min_low == bi1.low and max_high == bi5.high and bi2.high < bi4.low: # 前五笔构成向上类趋势
zd = max([x.low for x in [bi5, bi7]])
zg = min([x.high for x in [bi5, bi7]])
dd = min([x.low for x in [bi5, bi7]])
if zg > zd and bi8.low < dd: # 567构成中枢,且8的低点小于dd
if bi9.high < zd:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类三卖', v2='九笔ZD三卖')
# 类二卖
if dd < zd <= bi9.high < zg:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类二卖', v2='九笔')
return v
def check_eleven_bi(bis: List[Union[BI, FakeBI]], freq: Freq, di: int = 1) -> Signal:
"""识别十一笔形态
:param freq: K线周期,也可以称为级别
:param bis: 由远及近的十一笔
:param di: 最近一笔为倒数第i笔
"""
di_name = f"倒{di}笔"
v = Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='其他', v2='其他', v3='其他')
if len(bis) != 11:
return v
direction = bis[-1].direction
bi1, bi2, bi3, bi4, bi5, bi6, bi7, bi8, bi9, bi10, bi11 = bis
max_high = max([x.high for x in bis])
min_low = min([x.low for x in bis])
assert direction in [Direction.Down, Direction.Up], "direction 的取值错误"
if direction == Direction.Down:
if min_low == bi11.low and max_high == bi1.high:
# ABC式类一买,A5B3C3
if bi5.low == min([x.low for x in [bi1, bi3, bi5]]) \
and bi9.low > bi11.low and bi9.high > bi11.high \
and bi8.high > bi6.low and bi1.high - bi5.low > bi9.high - bi11.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2="11笔A5B3C3式")
# ABC式类一买,A3B3C5
if bi1.high > bi3.high and bi1.low > bi3.low \
and bi7.high == max([x.high for x in [bi7, bi9, bi11]]) \
and bi6.high > bi4.low and bi1.high - bi3.low > bi7.high - bi11.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2="11笔A3B3C5式")
# ABC式类一买,A3B5C3
if bi1.low > bi3.low and min(bi4.high, bi6.high, bi8.high) > max(bi4.low, bi6.low, bi8.low) \
and bi9.high > bi11.high and bi1.high - bi3.low > bi9.high - bi11.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2="11笔A3B5C3式")
# a1Ab式类一买,a1(1~7构成的类趋势)
if bi2.low > bi4.high > bi4.low > bi6.high > bi5.low > bi7.low and bi10.high > bi8.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2="11笔a1Ab式")
# 类二买(1~7构成盘整背驰,246构成下跌中枢,9/11构成上涨中枢,且上涨中枢GG大于下跌中枢ZG)
if bi7.power < bi1.power and min_low == bi7.low < max([x.low for x in [bi2, bi4, bi6]]) \
< min([x.high for x in [bi2, bi4, bi6]]) < max([x.high for x in [bi9, bi11]]) < bi1.high == max_high \
and bi11.low > min([x.low for x in [bi2, bi4, bi6]]) \
and min([x.high for x in [bi9, bi11]]) > max([x.low for x in [bi9, bi11]]):
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类二买', v2="11笔")
# 类二买(1~7为区间极值,9~11构成上涨中枢,上涨中枢GG大于4~6的最大值,上涨中枢DD大于4~6的最小值)
if max_high == bi1.high and min_low == bi7.low \
and min(bi9.high, bi11.high) > max(bi9.low, bi11.low) \
and max(bi11.high, bi9.high) > max(bi4.high, bi6.high) \
and min(bi9.low, bi11.low) > min(bi4.low, bi6.low):
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类二买', v2="11笔")
# 类三买(1~9构成大级别中枢,10离开,11回调不跌破GG)
gg = max([x.high for x in [bi1, bi2, bi3]])
zg = min([x.high for x in [bi1, bi2, bi3]])
zd = max([x.low for x in [bi1, bi2, bi3]])
dd = min([x.low for x in [bi1, bi2, bi3]])
if max_high == bi11.high and bi11.low > zg > zd \
and gg > bi5.low and gg > bi7.low and gg > bi9.low \
and dd < bi5.high and dd < bi7.high and dd < bi9.high:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类三买', v2="11笔GG三买")
if direction == Direction.Up:
if max_high == bi11.high and min_low == bi1.low:
# ABC式类一卖,A5B3C3
if bi5.high == max([bi1.high, bi3.high, bi5.high]) and bi9.low < bi11.low and bi9.high < bi11.high \
and bi8.low < bi6.high and bi11.high - bi9.low < bi5.high - bi1.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2="11笔A5B3C3式")
# ABC式类一卖,A3B3C5
if bi7.low == min([bi11.low, bi9.low, bi7.low]) and bi1.high < bi3.high and bi1.low < bi3.low \
and bi6.low < bi4.high and bi11.high - bi7.low < bi3.high - bi1.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2="11笔A3B3C5式")
# ABC式类一卖,A3B5C3
if bi1.high < bi3.high and min(bi4.high, bi6.high, bi8.high) > max(bi4.low, bi6.low, bi8.low) \
and bi9.low < bi11.low and bi3.high - bi1.low > bi11.high - bi9.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2="11笔A3B5C3式")
# 类二卖:1~9构成类趋势,11不创新高
if max_high == bi9.high > bi8.low > bi6.high > bi6.low > bi4.high > bi4.low > bi2.high > bi1.low == min_low \
and bi11.high < bi9.high:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类二卖', v2="11笔")
return v
def check_thirteen_bi(bis: List[Union[BI, FakeBI]], freq: Freq, di: int = 1) -> Signal:
"""识别十三笔形态
:param freq: K线周期,也可以称为级别
:param bis: 由远及近的十三笔
:param di: 最近一笔为倒数第i笔
"""
di_name = f"倒{di}笔"
v = Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='其他', v2='其他', v3='其他')
if len(bis) != 13:
return v
direction = bis[-1].direction
bi1, bi2, bi3, bi4, bi5, bi6, bi7, bi8, bi9, bi10, bi11, bi12, bi13 = bis
max_high = max([x.high for x in bis])
min_low = min([x.low for x in bis])
assert direction in [Direction.Down, Direction.Up], "direction 的取值错误"
if direction == Direction.Down:
if min_low == bi13.low and max_high == bi1.high:
# ABC式类一买,A5B3C5
if bi5.low < min(bi1.low, bi3.low) and bi9.high > max(bi11.high, bi13.high) \
and bi8.high > bi6.low and bi1.high - bi5.low > bi9.high - bi13.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2="13笔A5B3C5式")
# ABC式类一买,A3B5C5
if bi3.low < min(bi1.low, bi5.low) and bi9.high > max(bi11.high, bi13.high) \
and min(bi4.high, bi6.high, bi8.high) > max(bi4.low, bi6.low, bi8.low) \
and bi1.high - bi3.low > bi9.high - bi13.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2="13笔A3B5C5式")
# ABC式类一买,A5B5C3
if bi5.low < min(bi1.low, bi3.low) and bi11.high > max(bi9.high, bi13.high) \
and min(bi6.high, bi8.high, bi10.high) > max(bi6.low, bi8.low, bi10.low) \
and bi1.high - bi5.low > bi11.high - bi13.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一买', v2="13笔A5B5C3式")
if direction == Direction.Up:
if max_high == bi13.high and min_low == bi1.low:
# ABC式类一卖,A5B3C5
if bi5.high > max(bi3.high, bi1.high) and bi9.low < min(bi11.low, bi13.low) \
and bi8.low < bi6.high and bi5.high - bi1.low > bi13.high - bi9.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2="13笔A5B3C5式")
# ABC式类一卖,A3B5C5
if bi3.high > max(bi5.high, bi1.high) and bi9.low < min(bi11.low, bi13.low) \
and min(bi4.high, bi6.high, bi8.high) > max(bi4.low, bi6.low, bi8.low) \
and bi3.high - bi1.low > bi13.high - bi9.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2="13笔A3B5C5式")
# ABC式类一卖,A5B5C3
if bi5.high > max(bi3.high, bi1.high) and bi11.low < min(bi9.low, bi13.low) \
and min(bi6.high, bi8.high, bi10.high) > max(bi6.low, bi8.low, bi10.low) \
and bi5.high - bi1.low > bi13.high - bi11.low:
return Signal(k1=freq.value, k2=di_name, k3='类买卖点', v1='类一卖', v2="13笔A5B5C3式")
return v
# 以上是信号计算的辅助函数,主要是形态识别等。
# ----------------------------------------------------------------------------------------------------------------------
# 以下是信号计算函数(前缀固定为 get_s)
def get_s_three_bi(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""倒数第i笔的三笔形态信号
:param c: CZSC 对象
:param di: 最近一笔为倒数第i笔
:return: 信号字典
"""
assert di >= 1
bis = c.finished_bis
freq: Freq = c.freq
s = OrderedDict()
v = Signal(k1=str(freq.value), k2=f"倒{di}笔", k3="三笔形态", v1="其他", v2='其他', v3='其他')
s[v.key] = v.value
if not bis:
return s
if di == 1:
three_bi = bis[-3:]
else:
three_bi = bis[-3 - di + 1: -di + 1]
v = check_three_bi(three_bi, freq, di)
s[v.key] = v.value
return s
def get_s_base_xt(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""倒数第i笔的基础形态信号
:param c: CZSC 对象
:param di: 最近一笔为倒数第i笔
:return: 信号字典
"""
assert di >= 1
bis = c.finished_bis
freq: Freq = c.freq
s = OrderedDict()
v = Signal(k1=str(freq.value), k2=f"倒{di}笔", k3="基础形态", v1="其他", v2='其他', v3='其他')
s[v.key] = v.value
if not bis:
return s
if di == 1:
five_bi = bis[-5:]
seven_bi = bis[-7:]
else:
five_bi = bis[-5 - di + 1: -di + 1]
seven_bi = bis[-7 - di + 1: -di + 1]
for v in [check_five_bi(five_bi, freq, di), check_seven_bi(seven_bi, freq, di)]:
if "其他" not in v.value:
s[v.key] = v.value
return s
def get_s_like_bs(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""倒数第i笔的类买卖点信号
:param c: CZSC 对象
:param di: 最近一笔为倒数第i笔
:return: 信号字典
"""
assert di >= 1
bis = c.finished_bis
freq: Freq = c.freq
s = OrderedDict()
v = Signal(k1=str(freq.value), k2=f"倒{di}笔", k3="类买卖点", v1="其他", v2='其他', v3='其他')
s[v.key] = v.value
if not bis:
return s
if di == 1:
nine_bi = bis[-9:]
eleven_bi = bis[-11:]
thirteen_bi = bis[-13:]
else:
nine_bi = bis[-9 - di + 1: -di + 1]
eleven_bi = bis[-11 - di + 1: -di + 1]
thirteen_bi = bis[-13 - di + 1: -di + 1]
for v in [check_nine_bi(nine_bi, freq, di), check_eleven_bi(eleven_bi, freq, di),
check_thirteen_bi(thirteen_bi, freq, di)]:
if "其他" not in v.value:
s[v.key] = v.value
return s
def get_s_bi_status(c: analyze.CZSC) -> OrderedDict:
"""倒数第1笔的表里关系信号
:param c: CZSC 对象
:return: 信号字典
"""
freq: Freq = c.freq
s = OrderedDict()
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="其他", v2='其他', v3='其他')
s[v.key] = v.value
if c.bi_list:
# 表里关系的定义参考:http://blog.sina.com.cn/s/blog_486e105c01007wc1.html
min_ubi = min([x.low for x in c.bars_ubi])
max_ubi = max([x.high for x in c.bars_ubi])
last_bi = c.bi_list[-1]
v = None
if last_bi.direction == Direction.Down:
if min_ubi < last_bi.low:
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="向下延伸")
else:
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="底分完成")
if last_bi.direction == Direction.Up:
if max_ubi > last_bi.high:
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="向上延伸")
else:
v = Signal(k1=str(freq.value), k2="倒1笔", k3="表里关系", v1="顶分完成")
if v and "其他" not in v.value:
s[v.key] = v.value
return s
def get_s_d0_bi(c: analyze.CZSC) -> OrderedDict:
"""倒数第0笔信号
:param c: CZSC 对象
:return: 信号字典
"""
freq: Freq = c.freq
s = OrderedDict()
default_signals = [
Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
bis = c.finished_bis
if bis:
# 倒0笔方向
last_bi = bis[-1]
if last_bi.direction == Direction.Down:
v = Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向上")
elif last_bi.direction == Direction.Up:
v = Signal(k1=str(freq.value), k2="倒0笔", k3="方向", v1="向下")
else:
raise ValueError
if v and "其他" not in v.value:
s[v.key] = v.value
# 倒0笔长度
bars_ubi = [x for x in c.bars_raw[-20:] if x.dt >= bis[-1].fx_b.elements[0].dt]
if len(bars_ubi) >= 9:
v = Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="9根K线以上")
elif 9 > len(bars_ubi) > 5:
v = Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="5到9根K线")
else:
v = Signal(k1=str(freq.value), k2="倒0笔", k3="长度", v1="5根K线以下")
if "其他" not in v.value:
s[v.key] = v.value
return s
def get_s_di_bi(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""倒数第i笔的表里关系信号
:param c: CZSC 对象
:param di: 最近一笔为倒数第i笔
:return: 信号字典
"""
assert di >= 1
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}笔"
default_signals = [
Signal(k1=k1, k2=k2, k3="方向", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="长度", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="拟合优度", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
bis = c.finished_bis
if not bis:
return s
last_bi = bis[-di]
# 方向
v1 = Signal(k1=k1, k2=k2, k3="方向", v1=last_bi.direction.value)
s[v1.key] = v1.value
# 长度
if len(last_bi.bars) >= 15:
v = Signal(k1=k1, k2=k2, k3="长度", v1="15根K线以上")
elif 15 > len(c.bars_ubi) > 9:
v = Signal(k1=k1, k2=k2, k3="长度", v1="9到15根K线")
else:
v = Signal(k1=k1, k2=k2, k3="长度", v1="9根K线以下")
if "其他" not in v.value:
s[v.key] = v.value
# 拟合优度
if last_bi.rsq > 0.8:
v = Signal(k1=k1, k2=k2, k3="拟合优度", v1="大于0.8")
elif last_bi.rsq < 0.2:
v = Signal(k1=k1, k2=k2, k3="拟合优度", v1="小于0.2")
else:
v = Signal(k1=k1, k2=k2, k3="拟合优度", v1="0.2到0.8之间")
if "其他" not in v.value:
s[v.key] = v.value
return s
def get_s_three_k(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""倒数第i根K线的三K形态信号
:param c: CZSC 对象
:param di: 最近一根K线为倒数第i根
:return: 信号字典
"""
assert di >= 1
freq: Freq = c.freq
k1 = str(freq.value)
k2 = f"倒{di}K"
s = OrderedDict()
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="其他", v2='其他', v3='其他')
s[v.key] = v.value
if len(c.bars_ubi) < 3:
return s
if di == 1:
tri = c.bars_ubi[-3:]
else:
tri = c.bars_ubi[-3 - di + 1:-di + 1]
if tri[0].high > tri[1].high < tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="底分型")
elif tri[0].high < tri[1].high < tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="向上走")
elif tri[0].high < tri[1].high > tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="顶分型")
elif tri[0].high > tri[1].high > tri[2].high:
v = Signal(k1=k1, k2=k2, k3="三K形态", v1="向下走")
else:
v = None
if v and "其他" not in v.value:
s[v.key] = v.value
return s
def get_s_macd(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""获取倒数第i根K线的MACD相关信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K"
default_signals = [
Signal(k1=k1, k2=k2, k3="DIF多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DIF方向", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DEA多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="DEA方向", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="MACD多空", v1="其他", v2='其他', v3='其他'),
Signal(k1=k1, k2=k2, k3="MACD方向", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
if len(c.bars_raw) < 100:
return s
if di == 1:
close = np.array([x.close for x in c.bars_raw[-100:]])
else:
close = np.array([x.close for x in c.bars_raw[-100-di+1:-di+1]])
dif, dea, macd = MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
# DIF 多空信号
dif_base = sum([abs(dif[-2] - dif[-1]), abs(dif[-3] - dif[-2]), abs(dif[-4] - dif[-3])]) / 3
if dif[-1] > dif_base:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="多头")
elif dif[-1] < -dif_base:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="空头")
else:
v = Signal(k1=k1, k2=k2, k3="DIF多空", v1="模糊")
s[v.key] = v.value
if dif[-1] > dif[-2] > dif[-3]:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="向上")
elif dif[-1] < dif[-2] < dif[-3]:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="DIF方向", v1="模糊")
s[v.key] = v.value
# DEA 多空信号
dea_base = sum([abs(dea[-2] - dea[-1]), abs(dea[-3] - dea[-2]), abs(dea[-4] - dea[-3])]) / 3
if dea[-1] > dea_base:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="多头")
elif dea[-1] < -dea_base:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="空头")
else:
v = Signal(k1=k1, k2=k2, k3="DEA多空", v1="模糊")
s[v.key] = v.value
# DEA 方向信号
if dea[-1] > dea[-2]:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="向上")
elif dea[-1] < dea[-2]:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="DEA方向", v1="模糊")
s[v.key] = v.value
# MACD 多空信号
if macd[-1] >= 0:
v = Signal(k1=k1, k2=k2, k3="MACD多空", v1="多头")
else:
v = Signal(k1=k1, k2=k2, k3="MACD多空", v1="空头")
s[v.key] = v.value
# MACD 方向信号
if macd[-1] > macd[-2] > macd[-3]:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="向上")
elif macd[-1] < macd[-2] < macd[-3]:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="向下")
else:
v = Signal(k1=k1, k2=k2, k3="MACD方向", v1="模糊")
s[v.key] = v.value
return s
def get_s_sma(c: analyze.CZSC, di: int = 1, t_seq=(5, 10, 20, 60)) -> OrderedDict:
"""获取倒数第i根K线的SMA相关信号"""
freq: Freq = c.freq
s = OrderedDict()
k1 = str(freq.value)
k2 = f"倒{di}K"
for t in t_seq:
x1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="其他", v2='其他', v3='其他')
x2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="其他", v2='其他', v3='其他')
s[x1.key] = x1.value
s[x2.key] = x2.value
if len(c.bars_raw) < 100:
return s
if di == 1:
close = np.array([x.close for x in c.bars_raw[-100:]])
else:
close = np.array([x.close for x in c.bars_raw[-100-di+1:-di+1]])
for t in t_seq:
sma = SMA(close, timeperiod=t)
if close[-1] >= sma[-1]:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="多头")
else:
v1 = Signal(k1=k1, k2=k2, k3=f"SMA{t}多空", v1="空头")
s[v1.key] = v1.value
if sma[-1] >= sma[-2]:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向上")
else:
v2 = Signal(k1=k1, k2=k2, k3=f"SMA{t}方向", v1="向下")
s[v2.key] = v2.value
return s
def get_s_bar_end(c: analyze.CZSC) -> OrderedDict:
"""K线结束时间判断"""
freq: Freq = c.freq
if freq != Freq.F1:
return OrderedDict()
s = OrderedDict()
default_signals = [
Signal(k1="5分钟", k2="倒1K", k3="结束", v1="其他", v2='其他', v3='其他'),
Signal(k1="15分钟", k2="倒1K", k3="结束", v1="其他", v2='其他', v3='其他'),
Signal(k1="30分钟", k2="倒1K", k3="结束", v1="其他", v2='其他', v3='其他'),
Signal(k1="60分钟", k2="倒1K", k3="结束", v1="其他", v2='其他', v3='其他'),
Signal(k1="日线", k2="倒1K", k3="结束", v1="其他", v2='其他', v3='其他'),
Signal(k1="股票", k2="开仓", k3="时间范围A", v1="其他", v2='其他', v3='其他'),
Signal(k1="股票", k2="开仓", k3="时间范围B", v1="其他", v2='其他', v3='其他'),
Signal(k1="股票", k2="开仓", k3="时间范围C", v1="其他", v2='其他', v3='其他'),
Signal(k1="股票", k2="开仓", k3="时间范围D", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
dt: datetime = c.bars_raw[-1].dt
for i in [5, 15, 30, 60]:
if dt.minute % i == 0:
v = Signal(k1=f"{i}分钟", k2="倒1K", k3="结束", v1="是")
else:
v = Signal(k1=f"{i}分钟", k2="倒1K", k3="结束", v1="否")
s[v.key] = v.value
if dt.hour == 14 and 45 < dt.minute < 56:
v = Signal(k1="日线", k2="倒1K", k3="结束", v1="是")
s[v.key] = v.value
if "10:00" <= dt.strftime("%H:%M") <= "14:59":
v = Signal(k1="股票", k2="开仓", k3="时间范围A", v1="上午十点", v2='下午三点')
s[v.key] = v.value
if "11:00" <= dt.strftime("%H:%M") <= "14:59":
v = Signal(k1="股票", k2="开仓", k3="时间范围B", v1="上午十一点", v2='下午三点')
s[v.key] = v.value
if "13:30" <= dt.strftime("%H:%M") <= "14:59":
v = Signal(k1="股票", k2="开仓", k3="时间范围C", v1="下午一点半", v2='下午三点')
s[v.key] = v.value
if "14:30" <= dt.strftime("%H:%M") <= "14:59":
v = Signal(k1="股票", k2="开仓", k3="时间范围D", v1="下午两点半", v2='下午三点')
s[v.key] = v.value
return s
def get_s_k(c: analyze.CZSC, di: int = 1) -> OrderedDict:
"""获取倒数第i根K线的信号"""
if c.freq not in [Freq.D, Freq.W]:
return OrderedDict()
if len(c.bars_raw) < di:
return OrderedDict()
s = OrderedDict()
freq: Freq = c.freq
k1 = str(freq.value)
default_signals = [
Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
k = c.bars_raw[-di]
if k.close > k.open:
v = Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="上涨")
else:
v = Signal(k1=k1, k2=f"倒{di}K", k3="状态", v1="下跌")
s[v.key] = v.value
return s
# ----------------------------------------------------------------------------------------------------------------------
# 以下是信号函数,可以作为 CZSC 对象的参数
def get_default_signals(c: analyze.CZSC) -> OrderedDict:
"""在 CZSC 对象上计算信号,这个是标准函数,主要用于研究。
实盘时可以按照自己的需要自定义计算哪些信号。
:param c: CZSC 对象
:return: 信号字典
"""
s = OrderedDict({"symbol": c.symbol, "dt": c.bars_raw[-1].dt, "close": c.bars_raw[-1].close})
s.update(get_s_d0_bi(c))
s.update(get_s_three_k(c, 1))
s.update(get_s_di_bi(c, 1))
s.update(get_s_macd(c, 1))
s.update(get_s_k(c, 1))
s.update(get_s_bi_status(c))
for di in range(1, 8):
s.update(get_s_three_bi(c, di))
for di in range(1, 8):
s.update(get_s_base_xt(c, di))
for di in range(1, 8):
s.update(get_s_like_bs(c, di))
return s
def get_selector_signals(c: analyze.CZSC) -> OrderedDict:
"""在 CZSC 对象上计算选股信号
:param c: CZSC 对象
:return: 信号字典
"""
freq: Freq = c.freq
s = OrderedDict({"symbol": c.symbol, "dt": c.bars_raw[-1].dt, "close": c.bars_raw[-1].close})
s.update(get_s_three_k(c, 1))
s.update(get_s_bi_status(c))
for di in range(1, 3):
s.update(get_s_three_bi(c, di))
for di in range(1, 3):
s.update(get_s_base_xt(c, di))
for di in range(1, 3):
s.update(get_s_like_bs(c, di))
default_signals = [
# 以下是技术指标相关信号
Signal(k1=str(freq.value), k2="成交量", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="MA5状态", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="KDJ状态", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="MACD状态", v1="其他", v2='其他', v3='其他'),
Signal(k1=str(freq.value), k2="倒0笔", k3="潜在三买", v1="其他", v2='其他', v3='其他'),
]
for signal in default_signals:
s[signal.key] = signal.value
if not c.bi_list:
return s
if len(c.bars_raw) > 30 and c.freq == Freq.D:
last_vols = [k_.open * k_.vol for k_ in c.bars_raw[-10:]]
if sum(last_vols) > 15e8 and min(last_vols) > 1e7:
v = Signal(k1=str(freq.value), k2="成交量", v1="近10个交易日累计成交金额大于15亿", v2='近10个交易日最低成交额大于1亿')
s[v.key] = v.value
if len(c.bars_raw) > 30 and c.freq in [Freq.W, Freq.M]:
if kdj_gold_cross(c.bars_raw, just=False):
v = Signal(k1=str(freq.value), k2="KDJ状态", v1="金叉")
s[v.key] = v.value
if len(c.bars_raw) > 100:
close = np.array([x.close for x in c.bars_raw[-100:]])
ma5 = SMA(close, timeperiod=5)
if c.bars_raw[-1].close >= ma5[-1]:
v = Signal(k1=str(freq.value), k2="MA5状态", v1="收盘价在MA5上方", v2='')
s[v.key] = v.value
if ma5[-1] > ma5[-2] > ma5[-3]:
v = Signal(k1=str(freq.value), k2="MA5状态", v1='收盘价在MA5上方', v2="向上趋势")
s[v.key] = v.value
diff, dea, macd = MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
if diff[-3:-1].mean() > 0 and dea[-3:-1].mean() > 0 and macd[-3] < macd[-2] < macd[-1]:
v = Signal(k1=str(freq.value), k2="MACD状态", v1="DIFF大于0", v2='DEA大于0', v3='柱子增大')
s[v.key] = v.value
# 倒0笔潜在三买
if len(c.bi_list) >= 5:
if c.bi_list[-1].direction == Direction.Down:
gg = max(c.bi_list[-1].high, c.bi_list[-3].high)
zg = min(c.bi_list[-1].high, c.bi_list[-3].high)
zd = max(c.bi_list[-1].low, c.bi_list[-3].low)
else:
gg = min(c.bi_list[-2].high, c.bi_list[-4].high)
zg = min(c.bi_list[-2].high, c.bi_list[-4].high)
zd = max(c.bi_list[-2].low, c.bi_list[-4].low)
if zg > zd:
v = Signal(k1=str(freq.value), k2="倒0笔", k3="潜在三买", v1="构成中枢")
if gg * 1.1 > min([x.low for x in c.bars_raw[-3:]]) > zg > zd:
v = Signal(k1=str(freq.value), k2="倒0笔", k3="潜在三买", v1="构成中枢", v2="近3K在中枢上沿附近")
if max([x.high for x in c.bars_raw[-7:-3]]) > gg:
v = Signal(k1=str(freq.value), k2="倒0笔", k3="潜在三买",
v1="构成中枢", v2="近3K在中枢上沿附近", v3='近7K突破中枢GG')
if v and "其他" not in v.value:
s[v.key] = v.value
return s
|
[
"numpy.array",
"collections.OrderedDict"
] |
[((23899, 23912), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23910, 23912), False, 'from collections import OrderedDict\n'), ((24475, 24488), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24486, 24488), False, 'from collections import OrderedDict\n'), ((25203, 25216), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25214, 25216), False, 'from collections import OrderedDict\n'), ((25993, 26006), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (26004, 26006), False, 'from collections import OrderedDict\n'), ((27111, 27124), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (27122, 27124), False, 'from collections import OrderedDict\n'), ((28522, 28535), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (28533, 28535), False, 'from collections import OrderedDict\n'), ((29956, 29969), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (29967, 29969), False, 'from collections import OrderedDict\n'), ((30851, 30864), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (30862, 30864), False, 'from collections import OrderedDict\n'), ((33620, 33633), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (33631, 33633), False, 'from collections import OrderedDict\n'), ((34756, 34769), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (34767, 34769), False, 'from collections import OrderedDict\n'), ((36758, 36771), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (36769, 36771), False, 'from collections import OrderedDict\n'), ((37538, 37632), 'collections.OrderedDict', 'OrderedDict', (["{'symbol': c.symbol, 'dt': c.bars_raw[-1].dt, 'close': c.bars_raw[-1].close}"], {}), "({'symbol': c.symbol, 'dt': c.bars_raw[-1].dt, 'close': c.\n bars_raw[-1].close})\n", (37549, 37632), False, 'from collections import OrderedDict\n'), ((38196, 38290), 'collections.OrderedDict', 'OrderedDict', (["{'symbol': c.symbol, 'dt': c.bars_raw[-1].dt, 'close': c.bars_raw[-1].close}"], {}), "({'symbol': c.symbol, 'dt': c.bars_raw[-1].dt, 'close': c.\n bars_raw[-1].close})\n", (38207, 38290), False, 'from collections import OrderedDict\n'), ((31511, 31557), 'numpy.array', 'np.array', (['[x.close for x in c.bars_raw[-100:]]'], {}), '([x.close for x in c.bars_raw[-100:]])\n', (31519, 31557), True, 'import numpy as np\n'), ((31584, 31646), 'numpy.array', 'np.array', (['[x.close for x in c.bars_raw[-100 - di + 1:-di + 1]]'], {}), '([x.close for x in c.bars_raw[-100 - di + 1:-di + 1]])\n', (31592, 31646), True, 'import numpy as np\n'), ((33992, 34038), 'numpy.array', 'np.array', (['[x.close for x in c.bars_raw[-100:]]'], {}), '([x.close for x in c.bars_raw[-100:]])\n', (34000, 34038), True, 'import numpy as np\n'), ((34065, 34127), 'numpy.array', 'np.array', (['[x.close for x in c.bars_raw[-100 - di + 1:-di + 1]]'], {}), '([x.close for x in c.bars_raw[-100 - di + 1:-di + 1]])\n', (34073, 34127), True, 'import numpy as np\n'), ((34733, 34746), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (34744, 34746), False, 'from collections import OrderedDict\n'), ((36676, 36689), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (36687, 36689), False, 'from collections import OrderedDict\n'), ((36735, 36748), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (36746, 36748), False, 'from collections import OrderedDict\n'), ((39666, 39712), 'numpy.array', 'np.array', (['[x.close for x in c.bars_raw[-100:]]'], {}), '([x.close for x in c.bars_raw[-100:]])\n', (39674, 39712), True, 'import numpy as np\n')]
|
# This module contains the code to create maps
import numpy as np
from itertools import combinations
import matplotlib.pyplot as plt
import itertools
# For plotting
import seaborn as sns
import logging
import statistics
import time
def manhattan(coords_ind1, coords_ind2):
return abs(coords_ind1[0] - coords_ind2[0]) + abs(coords_ind1[1] - coords_ind2[1])
class IlluminationAxisDefinition:
"""
Data structure that model one axis of the map. In general a map can have multiple axes, even if we visualize
only a subset of them. On axis usually correspond to a feature to explore.
For the moment we assume that each axis is equally split in `num_cells`
"""
def __init__(self, feature_name, min_value, max_value, num_cells):
self.logger = logging.getLogger('illumination_map.IlluminationAxisDefinition')
self.logger.debug('Creating an instance of IlluminationAxisDefinition for feature %s', feature_name)
self.feature_name = feature_name
self.min_value = min_value
self.max_value = max_value
self.num_cells = num_cells
# Definition of the inner map, values might fall outside it if less than min
self.original_bins = np.linspace(min_value, max_value, num_cells)
# Definition of the outer map
# Include the default boundary conditions. Note that we do not add np.PINF, but the max value.
# Check: https://stackoverflow.com/questions/4355132/numpy-digitize-returns-values-out-of-range
self.bins = np.concatenate(([np.NINF], self.original_bins, [max_value+0.001]))
def get_bins_labels(self):
"""
Note that here we return explicitly the last bin
Returns: All the bins plus the default
"""
return self.original_bins
def get_coordinate_for(self, sample):
"""
Return the coordinate of this sample according to the definition of this axis. It triggers exception if the
sample does not declare a field with the name of this axis, i.e., the sample lacks this feature
Args:
sample:
Returns:
an integer representing the coordinate of the sample in this dimension
Raises:
an exception is raised if the sample does not contain the feature
"""
# TODO Check whether the sample has the feature
value = sample[self.feature_name]
if value < self.min_value:
self.logger.warning("Sample %s has value %s below the min value %s for feature %s",
sample.id, value, self.min_value, self.feature_name)
elif value > self.max_value:
self.logger.warning("Sample %s has value %s above the max value %s for feature %s",
sample.id, value, self.max_value, self.feature_name)
return np.digitize(value, self.original_bins, right=False)
def is_outlier(self, sample):
value = sample[self.feature_name]
return value < self.min_value or value > self.max_value
def to_dict(self):
the_dict = {
"name" : self.feature_name,
"min-value" : self.min_value,
"max-value": self.max_value,
"num-cells": self.num_cells
}
return the_dict
class IlluminationMap:
"""
Data structure that represent a map. The map is defined in terms of its axes
"""
def __init__(self, feature1: IlluminationAxisDefinition, feature2: IlluminationAxisDefinition):
"""
Note that axes are positional, the first [0] is x, the second[1] is y, the third [2] is z, etc.
Args:
axes:
"""
self.logger = logging.getLogger('illumination_map.IlluminationMapDefinition')
self.feature_x = feature1
self.feature_y = feature2
self.samples = set()
# Since we consider only input features we do not care aboutr misbehaviors here
self.coverage_data = np.zeros(shape=(feature1.num_cells, feature2.num_cells), dtype=int)
# def _compute_maps_data(self, feature1, feature2, samples):
# """
# Create the raw data for the map by placing the samples on the map and counting for each cell how many samples
# are there and how many misbehaviors
# Args:
# feature1:
# feature2:
# samples:
#
# Returns:
# coverage_map, misbehavior_map
# coverage_outer_map, misbehavior_outer_map
# """
# # TODO Refactor:
#
# # Reshape the data as ndimensional array. But account for the lower and upper bins.
# coverage_data = np.zeros(shape=(feature1.num_cells, feature2.num_cells), dtype=int)
# misbehaviour_data = np.zeros(shape=(feature1.num_cells, feature2.num_cells), dtype=int)
#
# coverage_outer_data = np.zeros(shape=(feature1.num_cells + 2, feature2.num_cells + 2), dtype=int)
# misbehaviour_outer_data = np.zeros(shape=(feature1.num_cells + 2, feature2.num_cells + 2), dtype=int)
#
# for sample in samples:
# self.add_sample(sample)
#
#
# return coverage_data, misbehaviour_data, coverage_outer_data, misbehaviour_outer_data
#
# def visualize_probability(self, tags=None, feature_selector=None, sample_selector=None):
# """
# Visualize the probability of finding a misbehavior in a give cell, computed as the total of misbehavior over
# the total samples in each cell. This is defined only for cells that have samples in them. Also store
# the probability data so they can be post-processed (e.g., average across run/configuration)
# """
# # Prepare the data by selecting samples and features
#
# filtered_samples = self.samples
# self.logger.debug("All samples: %s", len(filtered_samples))
# if sample_selector is not None:
# filtered_samples = sample_selector(self.samples)
# self.logger.debug("Filtered samples: %s", len(filtered_samples))
#
# filtered_features = self.axes
# if feature_selector is not None:
# filtered_features = feature_selector(self.axes)
#
# figures = []
# # Might be redundant if we store also misbehaviour_maps and coverage_maps
# probability_maps = []
# # To compute confidence intervals and possibly other metrics on the map
# misbehaviour_maps = []
# coverage_maps = []
#
# total_samples_in_the_map = filtered_samples
#
# # Create one visualization for each pair of self.axes selected in order
# for feature1, feature2 in itertools.combinations(filtered_features, 2):
#
# # Make sure we reset this for each feature combination
# filtered_samples = total_samples_in_the_map
# # Remove samples that are outliers for this map
# if self.drop_outliers:
# filtered_samples = drop_outliers_for(feature1, filtered_samples)
# filtered_samples = drop_outliers_for(feature2, filtered_samples)
#
# coverage_data, misbehaviour_data, _, _ = self._compute_maps_data(feature1, feature2, filtered_samples)
#
# # figure
# fig, ax = plt.subplots(figsize=(8, 8))
#
# cmap = sns.cubehelix_palette(dark=0.1, light=0.9, as_cmap=True)
# # Cells have a value between 0.0 and 1.0 since they represent probabilities
#
# # Set the color for the under the limit to be white (0.0) so empty cells are not visualized
# # cmap.set_under('0.0')
# # Plot NaN in white
# cmap.set_bad(color='white')
#
# # Coverage data might be zero, so this produces Nan. We convert that to 0.0
# # probability_data = np.nan_to_num(misbehaviour_data / coverage_data)
# raw_probability_data = misbehaviour_data / coverage_data
#
# # For some weird reason the data in the heatmap are shown with the first dimension on the y and the
# # second on the x. So we transpose
# probability_data = np.transpose(raw_probability_data)
#
# sns.heatmap(probability_data, vmin=0.0, vmax=1.0, square=True, cmap=cmap)
#
# xtickslabel = [round(the_bin, 1) for the_bin in feature1.get_bins_labels()]
# ytickslabel = [round(the_bin, 1) for the_bin in feature2.get_bins_labels()]
# #
# ax.set_xticklabels(xtickslabel)
# plt.xticks(rotation=45)
# ax.set_yticklabels(ytickslabel)
# plt.yticks(rotation=0)
#
# tool_name = str(self._get_tool(filtered_samples))
# run_id = str(self._get_run_id(filtered_samples)).zfill(3)
#
# title_tokens = ["Mishbehavior Probability", "\n"]
# title_tokens.extend(["Tool:", tool_name, "--", "Run ID:", run_id])
#
# if tags is not None and len(tags) > 0:
# title_tokens.extend(["\n", "Tags:"])
# title_tokens.extend([str(t) for t in tags])
#
# the_title = " ".join(title_tokens)
#
# fig.suptitle(the_title, fontsize=16)
#
# # Plot small values of y below.
# # We need this to have the y axis start from zero at the bottom
# ax.invert_yaxis()
#
# # axis labels
# plt.xlabel(feature1.feature_name)
# plt.ylabel(feature2.feature_name)
#
# # Include data to store the file with same prefix
#
# # Add the store_to attribute to the figure and maps object
# setattr(fig, "store_to", "-".join(["probability", tool_name, run_id, feature1.feature_name, feature2.feature_name]))
# figures.append(fig)
#
# probability_maps.append({
# "data": raw_probability_data,
# "store_to": "-".join(["probability", tool_name, run_id, feature1.feature_name, feature2.feature_name])
# })
#
# misbehaviour_maps.append({
# "data": misbehaviour_data,
# "store_to": "-".join(["misbehaviour", tool_name, run_id, feature1.feature_name, feature2.feature_name])
# })
#
# coverage_maps.append({
# "data": coverage_data,
# "store_to": "-".join(["coverage", tool_name, run_id, feature1.feature_name, feature2.feature_name])
# })
#
#
# return figures, probability_maps, misbehaviour_maps, coverage_maps
def is_cell_free(self, sample):
# Coordinates reason in terms of bins 1, 2, 3, while data is 0-indexed
x_coord = self.feature_x.get_coordinate_for(sample) - 1
y_coord = self.feature_y.get_coordinate_for(sample) - 1
return self.coverage_data[x_coord, y_coord] == 0
def add_sample(self, sample):
# Coordinates reason in terms of bins 1, 2, 3, while data is 0-indexed
x_coord = self.feature_x.get_coordinate_for(sample) - 1
y_coord = self.feature_y.get_coordinate_for(sample) - 1
# Increment the coverage cell
self.coverage_data[x_coord, y_coord] += 1
def visualize(self):
"""
Visualize the current map and the features on a map. The map cells contains the number of samples for each
cell, so empty cells (0) are white, cells with few elements have a light color, while cells with more
elements have darker color. This gives an intuition on the distribution of the misbheaviors and the
collisions
Args:
Returns:
figure
"""
# Compute data
#coverage_data, misbehaviour_data, _, _ = self._compute_maps_data(feature1, feature2, self.samples)
# figure
figure, ax = plt.subplots(figsize=(8, 8))
# Set the heatmap
cmap = sns.cubehelix_palette(dark=0.5, light=0.9, as_cmap=True)
# Set the color for the under the limit to be white (so they are not visualized)
cmap.set_under('1.0')
# For some weird reason the data in the heatmap are shown with the first dimension on the y and the
# second on the x. So we transpose
coverage_data = np.transpose(self.coverage_data)
sns.heatmap(coverage_data, vmin=1, vmax=20, square=True, cmap=cmap)
xtickslabel = [round(the_bin, 1) for the_bin in self.feature_x.get_bins_labels()]
ytickslabel = [round(the_bin, 1) for the_bin in self.feature_y.get_bins_labels()]
#
ax.set_xticklabels(xtickslabel)
plt.xticks(rotation=45)
ax.set_yticklabels(ytickslabel)
plt.yticks(rotation=0)
figure.suptitle("Feature Map Coverage", fontsize=16)
# Plot small values of y below.
# We need this to have the y axis start from zero at the bottom
ax.invert_yaxis()
# axis labels
plt.xlabel(self.feature_x.feature_name)
plt.ylabel(self.feature_y.feature_name)
return figure
|
[
"logging.getLogger",
"seaborn.cubehelix_palette",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.digitize",
"matplotlib.pyplot.xlabel",
"seaborn.heatmap",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.yticks",
"numpy.concatenate",
"numpy.transpose",
"matplotlib.pyplot.subplots"
] |
[((792, 856), 'logging.getLogger', 'logging.getLogger', (['"""illumination_map.IlluminationAxisDefinition"""'], {}), "('illumination_map.IlluminationAxisDefinition')\n", (809, 856), False, 'import logging\n'), ((1227, 1271), 'numpy.linspace', 'np.linspace', (['min_value', 'max_value', 'num_cells'], {}), '(min_value, max_value, num_cells)\n', (1238, 1271), True, 'import numpy as np\n'), ((1537, 1605), 'numpy.concatenate', 'np.concatenate', (['([np.NINF], self.original_bins, [max_value + 0.001])'], {}), '(([np.NINF], self.original_bins, [max_value + 0.001]))\n', (1551, 1605), True, 'import numpy as np\n'), ((2871, 2922), 'numpy.digitize', 'np.digitize', (['value', 'self.original_bins'], {'right': '(False)'}), '(value, self.original_bins, right=False)\n', (2882, 2922), True, 'import numpy as np\n'), ((3715, 3778), 'logging.getLogger', 'logging.getLogger', (['"""illumination_map.IlluminationMapDefinition"""'], {}), "('illumination_map.IlluminationMapDefinition')\n", (3732, 3778), False, 'import logging\n'), ((3996, 4063), 'numpy.zeros', 'np.zeros', ([], {'shape': '(feature1.num_cells, feature2.num_cells)', 'dtype': 'int'}), '(shape=(feature1.num_cells, feature2.num_cells), dtype=int)\n', (4004, 4063), True, 'import numpy as np\n'), ((11976, 12004), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (11988, 12004), True, 'import matplotlib.pyplot as plt\n'), ((12047, 12103), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'dark': '(0.5)', 'light': '(0.9)', 'as_cmap': '(True)'}), '(dark=0.5, light=0.9, as_cmap=True)\n', (12068, 12103), True, 'import seaborn as sns\n'), ((12399, 12431), 'numpy.transpose', 'np.transpose', (['self.coverage_data'], {}), '(self.coverage_data)\n', (12411, 12431), True, 'import numpy as np\n'), ((12441, 12508), 'seaborn.heatmap', 'sns.heatmap', (['coverage_data'], {'vmin': '(1)', 'vmax': '(20)', 'square': '(True)', 'cmap': 'cmap'}), '(coverage_data, vmin=1, vmax=20, square=True, cmap=cmap)\n', (12452, 12508), True, 'import seaborn as sns\n'), ((12748, 12771), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (12758, 12771), True, 'import matplotlib.pyplot as plt\n'), ((12820, 12842), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (12830, 12842), True, 'import matplotlib.pyplot as plt\n'), ((13075, 13114), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['self.feature_x.feature_name'], {}), '(self.feature_x.feature_name)\n', (13085, 13114), True, 'import matplotlib.pyplot as plt\n'), ((13123, 13162), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.feature_y.feature_name'], {}), '(self.feature_y.feature_name)\n', (13133, 13162), True, 'import matplotlib.pyplot as plt\n')]
|
import os
import warnings
from typing import Optional, Tuple, Union, List
import joblib
import numpy as np
from ConfigSpace import Configuration
from sklearn import clone
from sklearn.base import is_classifier
from sklearn.model_selection import check_cv
from sklearn.model_selection._validation import _fit_and_predict, _check_is_permutation
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
from dswizard.components.base import EstimatorComponent
from dswizard.core.config_cache import ConfigCache
from dswizard.core.logger import ProcessLogger
from dswizard.core.model import CandidateId, ConfigKey, Dataset
from dswizard.core.worker import Worker
from dswizard.pipeline.pipeline import FlexiblePipeline
from dswizard.util import util
from dswizard.util.util import model_file
warnings.filterwarnings("ignore", category=UserWarning)
class SklearnWorker(Worker):
def compute(self,
ds: Dataset,
cid: CandidateId,
config: Optional[Configuration],
cfg_cache: Optional[ConfigCache],
cfg_keys: Optional[List[ConfigKey]],
pipeline: FlexiblePipeline,
process_logger: ProcessLogger) -> List[float]:
if config is None:
# Derive configuration on complete data set. Test performance via CV
cloned_pipeline = clone(pipeline)
cloned_pipeline.cfg_cache = cfg_cache
cloned_pipeline.cfg_keys = cfg_keys
cloned_pipeline.fit(ds.X, ds.y, logger=process_logger)
config = process_logger.get_config(cloned_pipeline)
cloned_pipeline = clone(pipeline)
cloned_pipeline.set_hyperparameters(config.get_dictionary())
score, _, _, models = self._score(ds, cloned_pipeline)
self._store_models(cid, models)
return score
def transform_dataset(self, ds: Dataset, cid: CandidateId, component: EstimatorComponent,
config: Configuration) -> Tuple[np.ndarray, Optional[float]]:
component.set_hyperparameters(config.get_dictionary())
if is_classifier(component):
score, y_pred, y_prob, models = self._score(ds, component)
try:
y_pred = y_pred.astype(float)
except ValueError:
pass
X = np.hstack((ds.X, y_prob, np.reshape(y_pred, (-1, 1))))
else:
models = [component.fit(ds.X, ds.y)]
X = models[0].transform(ds.X)
score = None
self._store_models(cid, models)
return X, score
def _score(self, ds: Dataset, estimator: Union[EstimatorComponent, FlexiblePipeline], n_folds: int = 4):
y = ds.y
y_pred, y_prob, models = self._cross_val_predict(estimator, ds.X, y, cv=n_folds)
# Meta-learning only considers f1. Calculate f1 score for structure search
score = [util.score(y, y_prob, y_pred, ds.metric), util.score(y, y_prob, y_pred, 'f1')]
return score, y_pred, y_prob, models
@staticmethod
def _cross_val_predict(pipeline, X, y=None, cv=None):
X, y, groups = indexable(X, y, None)
cv = check_cv(cv, y, classifier=is_classifier(pipeline))
prediction_blocks = []
probability_blocks = []
fitted_pipelines = []
for train, test in cv.split(X, y, groups):
cloned_pipeline = clone(pipeline)
probability_blocks.append(_fit_and_predict(cloned_pipeline, X, y, train, test, 0, {}, 'predict_proba'))
prediction_blocks.append(cloned_pipeline.predict(X))
fitted_pipelines.append(cloned_pipeline)
# Concatenate the predictions
probabilities = [prob_block_i for prob_block_i, _ in probability_blocks]
predictions = [pred_block_i for pred_block_i in prediction_blocks]
test_indices = np.concatenate([indices_i for _, indices_i in probability_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
probabilities = np.concatenate(probabilities)
predictions = np.concatenate(predictions)
if isinstance(predictions, list):
return [p[inv_test_indices] for p in predictions], [p[inv_test_indices] for p in
probabilities], fitted_pipelines
else:
return predictions[inv_test_indices], probabilities[inv_test_indices], fitted_pipelines
def _store_models(self, cid: CandidateId, models: List[EstimatorComponent]):
name = model_file(cid)
file = os.path.join(self.workdir, name)
with open(file, 'wb') as f:
joblib.dump(models, f)
|
[
"dswizard.util.util.model_file",
"dswizard.util.util.score",
"numpy.reshape",
"sklearn.base.is_classifier",
"os.path.join",
"sklearn.model_selection._validation._fit_and_predict",
"sklearn.utils.indexable",
"sklearn.utils.validation._num_samples",
"numpy.concatenate",
"sklearn.clone",
"joblib.dump",
"warnings.filterwarnings"
] |
[((819, 874), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (842, 874), False, 'import warnings\n'), ((1661, 1676), 'sklearn.clone', 'clone', (['pipeline'], {}), '(pipeline)\n', (1666, 1676), False, 'from sklearn import clone\n'), ((2127, 2151), 'sklearn.base.is_classifier', 'is_classifier', (['component'], {}), '(component)\n', (2140, 2151), False, 'from sklearn.base import is_classifier\n'), ((3145, 3166), 'sklearn.utils.indexable', 'indexable', (['X', 'y', 'None'], {}), '(X, y, None)\n', (3154, 3166), False, 'from sklearn.utils import indexable\n'), ((3875, 3941), 'numpy.concatenate', 'np.concatenate', (['[indices_i for _, indices_i in probability_blocks]'], {}), '([indices_i for _, indices_i in probability_blocks])\n', (3889, 3941), True, 'import numpy as np\n'), ((4250, 4279), 'numpy.concatenate', 'np.concatenate', (['probabilities'], {}), '(probabilities)\n', (4264, 4279), True, 'import numpy as np\n'), ((4302, 4329), 'numpy.concatenate', 'np.concatenate', (['predictions'], {}), '(predictions)\n', (4316, 4329), True, 'import numpy as np\n'), ((4774, 4789), 'dswizard.util.util.model_file', 'model_file', (['cid'], {}), '(cid)\n', (4784, 4789), False, 'from dswizard.util.util import model_file\n'), ((4805, 4837), 'os.path.join', 'os.path.join', (['self.workdir', 'name'], {}), '(self.workdir, name)\n', (4817, 4837), False, 'import os\n'), ((1389, 1404), 'sklearn.clone', 'clone', (['pipeline'], {}), '(pipeline)\n', (1394, 1404), False, 'from sklearn import clone\n'), ((2921, 2961), 'dswizard.util.util.score', 'util.score', (['y', 'y_prob', 'y_pred', 'ds.metric'], {}), '(y, y_prob, y_pred, ds.metric)\n', (2931, 2961), False, 'from dswizard.util import util\n'), ((2963, 2998), 'dswizard.util.util.score', 'util.score', (['y', 'y_prob', 'y_pred', '"""f1"""'], {}), "(y, y_prob, y_pred, 'f1')\n", (2973, 2998), False, 'from dswizard.util import util\n'), ((3407, 3422), 'sklearn.clone', 'clone', (['pipeline'], {}), '(pipeline)\n', (3412, 3422), False, 'from sklearn import clone\n'), ((4886, 4908), 'joblib.dump', 'joblib.dump', (['models', 'f'], {}), '(models, f)\n', (4897, 4908), False, 'import joblib\n'), ((3207, 3230), 'sklearn.base.is_classifier', 'is_classifier', (['pipeline'], {}), '(pipeline)\n', (3220, 3230), False, 'from sklearn.base import is_classifier\n'), ((3461, 3537), 'sklearn.model_selection._validation._fit_and_predict', '_fit_and_predict', (['cloned_pipeline', 'X', 'y', 'train', 'test', '(0)', '{}', '"""predict_proba"""'], {}), "(cloned_pipeline, X, y, train, test, 0, {}, 'predict_proba')\n", (3477, 3537), False, 'from sklearn.model_selection._validation import _fit_and_predict, _check_is_permutation\n'), ((3994, 4009), 'sklearn.utils.validation._num_samples', '_num_samples', (['X'], {}), '(X)\n', (4006, 4009), False, 'from sklearn.utils.validation import _num_samples\n'), ((2380, 2407), 'numpy.reshape', 'np.reshape', (['y_pred', '(-1, 1)'], {}), '(y_pred, (-1, 1))\n', (2390, 2407), True, 'import numpy as np\n')]
|
"""
Module realize VLImage - structure for storing image in special format.
"""
from enum import Enum
from pathlib import Path
from typing import Optional, Union
import requests
from FaceEngine import FormatType, Image as CoreImage # pylint: disable=E0611,E0401
import numpy as np
from PIL.Image import Image as PilImage
from PIL import Image as pilImage
from ..errors.errors import LunaVLError
from ..errors.exceptions import LunaSDKException
from .geometry import Rect
class ImageFormat(Enum):
"""
Enum for image format
"""
#: jpg
JPEG = "jpg"
#: png
PNG = "png"
#: ppm
PPM = "ppm"
#: tif
TIFF = "tif"
#: bmp
BMP = "bmp"
class ColorFormat(Enum):
"""
Enum for vl luna color formats
"""
#: 3 channel, 8 bit per channel, B-G-R color order format;
B8G8R8 = "B8G8R8"
#: 3 channel, 8 bit per channel, B-G-R color order format with 8 bit padding before next pixel;
B8G8R8X8 = "B8G8R8X8"
#: 3 channel, 8 bit per channel format with InfraRed semantics
IR_X8X8X8 = "IR_X8X8X8"
#: 1 channel, 16 bit per channel format;
R16 = "R16"
#: 1 channel, 8 bit per channel format;
R8 = "R8"
#: 3 channel, 8 bit per channel, R-G-B color order format;
R8G8B8 = "R8G8B8"
#: 3 channel, 8 bit per channel, R-G-B color order format with 8 bit padding before next pixel;
R8G8B8X8 = "R8G8B8X8"
#: unknown format
Unknown = "Unknown"
@property
def coreFormat(self) -> FormatType:
"""
Convert format to luna core format.
Returns:
luna core format
"""
return getattr(FormatType, self.value)
@staticmethod
def convertCoreFormat(imageFormat: FormatType) -> "ColorFormat":
"""
Convert FormatType to Format
Args:
imageFormat: core image format
Returns:
corresponding lunavl image format
"""
return getattr(ColorFormat, imageFormat.name)
@classmethod
def load(cls, colorFormat: str) -> "ColorFormat":
"""
Load color format from known sources:
1. some PIL image "mode" https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
2. FSDK color format
Args:
colorFormat: input color format
Returns:
corresponding lunavl image format
Raises:
NotImplementedError if color format is not supported
"""
if colorFormat == "RGB":
return cls.R8G8B8
if colorFormat == "RGBa":
return cls.R8G8B8X8
if colorFormat == "RGBA":
return cls.R8G8B8X8
if colorFormat == "RGBX":
return cls.R8G8B8X8
if colorFormat == "BGR":
return cls.B8G8R8
if colorFormat == "BGRa":
return cls.B8G8R8X8
if colorFormat == "BGRx":
return cls.B8G8R8X8
if colorFormat == "RGB":
return cls.R8G8B8
if colorFormat in "LP":
return cls.R8
try:
return getattr(cls, colorFormat)
except AttributeError:
pass
raise ValueError(f"Cannot load '{colorFormat}' color format.")
class VLImage:
"""
Class image.
Attributes:
coreImage (CoreFE.Image): core image object
source (Union[bytes, bytearray, PilImage, CoreImage]): body of image
filename (str): filename of the file which is source of image
"""
__slots__ = ("coreImage", "source", "filename")
def __init__(
self,
body: Union[bytes, bytearray, PilImage, CoreImage],
colorFormat: Optional[ColorFormat] = None,
filename: str = "",
):
"""
Init.
Args:
body: body of image - bytes numpy array or core image
colorFormat: img format to cast into
filename: user mark a source of image
Raises:
TypeError: if body has incorrect type
LunaSDKException: if failed to load image to sdk Image
"""
if isinstance(body, bytearray):
body = bytes(body)
if isinstance(body, CoreImage):
if colorFormat is None or colorFormat.coreFormat == body.getFormat():
self.coreImage = body
else:
error, self.coreImage = body.convert(colorFormat.coreFormat)
if error.isError:
raise LunaSDKException(LunaVLError.fromSDKError(error))
elif isinstance(body, bytes):
self.coreImage = CoreImage()
imgFormat = (colorFormat or ColorFormat.R8G8B8).coreFormat
error = self.coreImage.loadFromMemory(body, len(body), imgFormat)
if error.isError:
raise LunaSDKException(LunaVLError.fromSDKError(error))
elif isinstance(body, PilImage):
array = np.array(body)
colorFormat = ColorFormat.load(body.mode)
self.coreImage = self._coreImageFromNumpyArray(
ndarray=array, inputColorFormat=colorFormat, colorFormat=colorFormat
)
else:
raise TypeError(f"Bad image type: {type(body)}")
self.source = body
self.filename = filename
@classmethod
def load(
cls, *, filename: Optional[str] = None, url: Optional[str] = None, colorFormat: Optional[ColorFormat] = None
) -> "VLImage":
"""
Load image from numpy array or file or url.
Args:
*: for positional argument removal
filename: filename
url: url
colorFormat: img format to cast into
Returns:
vl image
Raises:
ValueError: if no one argument did not set.
>>> VLImage.load(url='https://st.kp.yandex.net/im/kadr/3/1/4/kinopoisk.ru-Keira-Knightley-3142930.jpg').rect
x = 0, y = 0, width = 1000, height = 1288
todo: more doc test
"""
if filename is not None:
path = Path(filename)
with path.open("rb") as file:
body = file.read()
img = cls(body, colorFormat)
img.filename = path.name
return img
if url is not None:
response = requests.get(url=url)
if response.status_code == 200:
img = cls(response.content, colorFormat)
img.filename = url
return img
raise ValueError
@staticmethod
def _coreImageFromNumpyArray(
ndarray: np.ndarray, inputColorFormat: ColorFormat, colorFormat: Optional[ColorFormat] = None
) -> CoreImage:
"""
Load VLImage from numpy array into `self`.
Args:
ndarray: numpy pixel array
inputColorFormat: numpy pixel array format
colorFormat: pixel format to cast into
Returns:
core image instance
"""
baseCoreImage = CoreImage()
baseCoreImage.setData(ndarray, inputColorFormat.coreFormat)
if colorFormat is None or baseCoreImage.getFormat() == colorFormat.coreFormat:
return baseCoreImage
error, convertedCoreImage = baseCoreImage.convert(colorFormat.coreFormat)
if error.isError:
raise LunaSDKException(LunaVLError.fromSDKError(error))
return convertedCoreImage
@classmethod
def fromNumpyArray(
cls,
arr: np.ndarray,
inputColorFormat: Union[str, ColorFormat],
colorFormat: Optional[ColorFormat] = None,
filename: str = "",
) -> "VLImage":
"""
Load VLImage from numpy array.
Args:
arr: numpy pixel array
inputColorFormat: input numpy pixel array format
colorFormat: pixel format to cast into
filename: optional filename
Returns:
vl image
"""
if isinstance(inputColorFormat, str):
inputColorFormat = ColorFormat.load(inputColorFormat)
coreImage = cls._coreImageFromNumpyArray(ndarray=arr, inputColorFormat=inputColorFormat)
return cls(coreImage, filename=filename, colorFormat=colorFormat)
@property
def format(self) -> ColorFormat:
""" getFormat(self: FaceEngine.Image) -> FaceEngine.FormatType
>>> image = VLImage.load(url='https://st.kp.yandex.net/im/kadr/3/1/4/kinopoisk.ru-Keira-Knightley-3142930.jpg')
>>> image.format.value
'R8G8B8'
"""
return ColorFormat.convertCoreFormat(self.coreImage.getFormat())
@property
def rect(self) -> Rect:
"""
Get rect of image.
Returns:
rect of the image
"""
return Rect.fromCoreRect(self.coreImage.getRect())
def computePitch(self, rowWidth) -> int:
"""
Compute row size in bytes
Args:
rowWidth: row width in pixels.
Returns:
row size in bytes.
"""
return self.coreImage.computePitch(rowWidth)
@property
def bitDepth(self) -> int:
"""
Get number of bits per pixel.
Returns:
number of bits per pixel.
"""
return self.coreImage.getBitDepth()
@property
def getByteDepth(self) -> int:
"""
Get number of bytes per pixel.
Returns:
number of bytes per pixel.
"""
return self.coreImage.getByteDepth()
@property
def channelCount(self) -> int:
"""
Get chanel count of the image.
Returns:
channel count.
>>> img = VLImage.load(url='https://st.kp.yandex.net/im/kadr/3/1/4/kinopoisk.ru-Keira-Knightley-3142930.jpg')
>>> img.channelCount
3
"""
return self.coreImage.getChannelCount()
@property
def channelSize(self) -> int:
"""
Get size of one chanel in bites.
Returns:
channel size in bytes.
>>> img = VLImage.load(url='https://st.kp.yandex.net/im/kadr/3/1/4/kinopoisk.ru-Keira-Knightley-3142930.jpg')
>>> img.channelSize
8
"""
return self.coreImage.getChannelSize()
@property
def channelStep(self) -> int:
"""
Get chanel step.
todo: more description
Returns:
channel size in bytes.
Notes:
padding bytes are considered spare channels.
>>> img = VLImage.load(url='https://st.kp.yandex.net/im/kadr/3/1/4/kinopoisk.ru-Keira-Knightley-3142930.jpg')
>>> img.channelStep
3
"""
return self.coreImage.getChannelStep()
def asNPArray(self) -> np.ndarray:
"""
Get image as numpy array.
!!!WARNING!!! Does NOT return the same image as in the self.coreImage.
Returns:
numpy array
todo: doctest
"""
if self.format == ColorFormat.R16:
return self.coreImage.getDataR16()
return self.coreImage.getData()
def asPillow(self) -> PilImage:
"""
Get image as pillow image.
!!!WARNING!!! Does NOT return the same image as in the self.coreImage.
Returns:
pillow image
todo: doctest
"""
imageArray = self.asNPArray()
return pilImage.fromarray(imageArray)
def isBGR(self) -> bool:
"""
Check whether format image is bgr or not.
Returns:
True if the image is bgr image otherwise False
Notes:
padding is ignored for padded channels.
>>> VLImage.load(url='https://st.kp.yandex.net/im/kadr/3/1/4/kinopoisk.ru-Keira-Knightley-3142930.jpg').isBGR()
False
"""
return self.coreImage.isBGR()
def isPadded(self) -> bool:
"""
Determinate image format has padding bytes or not.
Returns:
true if image format has padding bytes.
todo examples
"""
return self.coreImage.isPadded()
def save(self, filename: str, colorFormat: Optional[ColorFormat] = None):
"""
Save image to disk. Support image format: *ppm, jpg, png, tif*.
Args:
filename: filename
colorFormat: color format to save image in
Raises:
LunaSDKException: if failed to save image to sdk Image
"""
if colorFormat is None:
saveRes = self.coreImage.save(filename)
else:
saveRes = self.coreImage.save(filename, colorFormat.coreFormat)
if saveRes.isError:
raise LunaSDKException(LunaVLError.fromSDKError(saveRes))
def convertToBinaryImg(self, imageFormat: ImageFormat = ImageFormat.PPM) -> bytes:
"""
Convert VL image to binary image
Args:
imageFormat: format
Returns:
bytes
"""
pass
def isValid(self) -> bool:
"""
Check image is valid loaded to core image or not
Returns:
True if image is valid otherwise False
"""
return self.coreImage.isValid()
def convert(self, colorFormat: ColorFormat) -> "VLImage":
"""
Convert current VLImage into image with another color format.
Args:
colorFormat: color format to convert into
Returns:
converted vl image
Raises:
LunaSDKException: if failed to convert image
"""
error, coreImage = self.coreImage.convert(colorFormat.coreFormat)
if error.isError:
raise LunaSDKException(LunaVLError.fromSDKError(error))
return self.__class__(body=coreImage, filename=self.filename)
|
[
"PIL.Image.fromarray",
"pathlib.Path",
"FaceEngine.Image",
"requests.get",
"numpy.array"
] |
[((6990, 7001), 'FaceEngine.Image', 'CoreImage', ([], {}), '()\n', (6999, 7001), True, 'from FaceEngine import FormatType, Image as CoreImage\n'), ((11343, 11373), 'PIL.Image.fromarray', 'pilImage.fromarray', (['imageArray'], {}), '(imageArray)\n', (11361, 11373), True, 'from PIL import Image as pilImage\n'), ((6039, 6053), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (6043, 6053), False, 'from pathlib import Path\n'), ((6296, 6317), 'requests.get', 'requests.get', ([], {'url': 'url'}), '(url=url)\n', (6308, 6317), False, 'import requests\n'), ((4581, 4592), 'FaceEngine.Image', 'CoreImage', ([], {}), '()\n', (4590, 4592), True, 'from FaceEngine import FormatType, Image as CoreImage\n'), ((4905, 4919), 'numpy.array', 'np.array', (['body'], {}), '(body)\n', (4913, 4919), True, 'import numpy as np\n')]
|
# modify from PointGroup
# Written by <NAME>
import os
import os.path as osp
import logging
from typing import Optional
from operator import itemgetter
from copy import deepcopy
import gorilla
import torch
import numpy as np
import open3d as o3d
COLORSEMANTIC = np.array([
[171, 198, 230], # rgb(171, 198, 230)
[143, 223, 142], # rgb(143, 223, 142)
[0, 120, 177], # rgb(0, 120, 177)
[255, 188, 126], # rgb(255, 188, 126)
[189, 189, 57], # rgb(189, 189, 57)
[144, 86, 76], # rgb(144, 86, 76)
[255, 152, 153], # rgb(255, 152, 153)
[222, 40, 47], # rgb(222, 40, 47)
[197, 176, 212], # rgb(197, 176, 212)
[150, 103, 185], # rgb(150, 103, 185)
[200, 156, 149], # rgb(200, 156, 149)
[0, 190, 206], # rgb(0, 190, 206)
[252, 183, 210], # rgb(252, 183, 210)
[219, 219, 146], # rgb(219, 219, 146)
[255, 127, 43], # rgb(255, 127, 43)
[234, 119, 192], # rgb(234, 119, 192)
[150, 218, 228], # rgb(150, 218, 228)
[0, 160, 55], # rgb(0, 160, 55)
[110, 128, 143], # rgb(110, 128, 143)
[80, 83, 160] # rgb(80, 83, 160)
])
COLOR20 = np.array([
[230, 25, 75], # rgb(230, 25, 75)
[60, 180, 75], # rgb(60, 180, 75)
[255, 225, 25], # rgb(255, 225, 25)
[0, 130, 200], # rgb(0, 130, 200)
[245, 130, 48], # rgb(245, 130, 48)
[145, 30, 180], # rgb(145, 30, 180)
[70, 240, 240], # rgb(70, 240, 240)
[240, 50, 230], # rgb(240, 50, 230)
[210, 245, 60], # rgb(210, 245, 60)
[250, 190, 190], # rgb(250, 190, 190)
[0, 128, 128], # rgb(0, 128, 128)
[230, 190, 255], # rgb(230, 190, 255)
[170, 110, 40], # rgb(170, 110, 40)
[255, 250, 200], # rgb(255, 250, 200)
[128, 0, 0], # rgb(128, 0, 0)
[170, 255, 195], # rgb(170, 255, 195)
[128, 128, 0], # rgb(128, 128, 0)
[255, 215, 180], # rgb(255, 215, 180)
[0, 0, 128], # rgb(0, 0, 128)
[128, 128, 128] # rgb(128, 128, 128)
])
COLOR40 = np.array([
[88, 170, 108], # rgb(88,170,108)
[174, 105, 226], # rgb(174,105,226)
[78, 194, 83], # rgb(78,194,83)
[198, 62, 165], # rgb(198,62,165)
[133, 188, 52], # rgb(133,188,52)
[97, 101, 219], # rgb(97,101,219)
[190, 177, 52], # rgb(190,177,52)
[139, 65, 168], # rgb(139,65,168)
[75, 202, 137], # rgb(75,202,137)
[225, 66, 129], # rgb(225,66,129)
[68, 135, 42], # rgb(68,135,42)
[226, 116, 210], # rgb(226,116,210)
[146, 186, 98], # rgb(146,186,98)
[68, 105, 201], # rgb(68,105,201)
[219, 148, 53], # rgb(219,148,53)
[85, 142, 235], # rgb(85,142,235)
[212, 85, 42], # rgb(212,85,42)
[78, 176, 223], # rgb(78,176,223)
[221, 63, 77], # rgb(221,63,77)
[68, 195, 195], # rgb(68,195,195)
[175, 58, 119], # rgb(175,58,119)
[81, 175, 144], # rgb(81,175,144)
[184, 70, 74], # rgb(184,70,74)
[40, 116, 79], # rgb(40,116,79)
[184, 134, 219], # rgb(184,134,219)
[130, 137, 46], # rgb(130,137,46)
[110, 89, 164], # rgb(110,89,164)
[92, 135, 74], # rgb(92,135,74)
[220, 140, 190], # rgb(220,140,190)
[94, 103, 39], # rgb(94,103,39)
[144, 154, 219], # rgb(144,154,219)
[160, 86, 40], # rgb(160,86,40)
[67, 107, 165], # rgb(67,107,165)
[194, 170, 104], # rgb(194,170,10)
[162, 95, 150], # rgb(162,95,150)
[143, 110, 44], # rgb(143,110,44)
[146, 72, 105], # rgb(146,72,105)
[225, 142, 106], # rgb(225,142,106)
[162, 83, 86], # rgb(162,83,86)
[227, 124, 143] # rgb(227,124,143)
])
SEMANTIC_IDXS = np.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
SEMANTIC_NAMES = np.array([
"wall", "floor", "cabinet", "bed", "chair", "sofa", "table", "door",
"window", "bookshelf", "picture", "counter", "desk", "curtain",
"refridgerator", "shower curtain", "toilet", "sink", "bathtub",
"otherfurniture"
])
CLASS_COLOR = {
"unannotated": [0, 0, 0],
"floor": [143, 223, 142],
"wall": [171, 198, 230],
"cabinet": [0, 120, 177],
"bed": [255, 188, 126],
"chair": [189, 189, 57],
"sofa": [144, 86, 76],
"table": [255, 152, 153],
"door": [222, 40, 47],
"window": [197, 176, 212],
"bookshelf": [150, 103, 185],
"picture": [200, 156, 149],
"counter": [0, 190, 206],
"desk": [252, 183, 210],
"curtain": [219, 219, 146],
"refridgerator": [255, 127, 43],
"bathtub": [234, 119, 192],
"shower curtain": [150, 218, 228],
"toilet": [0, 160, 55],
"sink": [110, 128, 143],
"otherfurniture": [80, 83, 160]
}
SEMANTIC_IDX2NAME = {
1: "wall",
2: "floor",
3: "cabinet",
4: "bed",
5: "chair",
6: "sofa",
7: "table",
8: "door",
9: "window",
10: "bookshelf",
11: "picture",
12: "counter",
14: "desk",
16: "curtain",
24: "refridgerator",
28: "shower curtain",
33: "toilet",
34: "sink",
36: "bathtub",
39: "otherfurniture"
}
def visualize_instance_mask(clusters: np.ndarray,
room_name: str,
visual_dir: str,
data_root: str,
cluster_scores: Optional[np.ndarray] = None,
semantic_pred: Optional[np.ndarray] = None,
color: int = 20,
**kwargs):
logger = gorilla.derive_logger(__name__)
assert color in [20, 40]
colors = globals()[f"COLOR{color}"]
mesh_file = osp.join(data_root, room_name, room_name + "_vh_clean_2.ply")
mesh = o3d.io.read_triangle_mesh(mesh_file)
pred_mesh = deepcopy(mesh)
points = np.array(pred_mesh.vertices)
inst_label_pred_rgb = np.zeros_like(points) # np.ones(rgb.shape) * 255 #
logger.info(f"room_name: {room_name}")
for cluster_id, cluster in enumerate(clusters):
if logger is not None:
# NOTE: remove the handlers are not FileHandler to avoid
# outputing this message on console(StreamHandler)
# and final will recover the handlers of logger
handler_storage = []
for handler in logger.handlers:
if not isinstance(handler, logging.FileHandler):
handler_storage.append(handler)
logger.removeHandler(handler)
message = f"{cluster_id:<4}: pointnum: {int(cluster.sum()):<7} "
if semantic_pred is not None:
semantic_label = np.argmax(
np.bincount(semantic_pred[np.where(cluster == 1)[0]]))
semantic_id = int(SEMANTIC_IDXS[semantic_label])
semantic_name = SEMANTIC_IDX2NAME[semantic_id]
message += f"semantic: {semantic_id:<3}-{semantic_name:<15} "
if cluster_scores is not None:
score = float(cluster_scores[cluster_id])
message += f"score: {score:.4f} "
logger.info(message)
for handler in handler_storage:
logger.addHandler(handler)
inst_label_pred_rgb[cluster == 1] = colors[cluster_id % len(colors)]
rgb = inst_label_pred_rgb
pred_mesh.vertex_colors = o3d.utility.Vector3dVector(rgb / 255)
points[:, 1] += (points[:, 1].max() + 0.5)
pred_mesh.vertices = o3d.utility.Vector3dVector(points)
mesh += pred_mesh
o3d.io.write_triangle_mesh(osp.join(visual_dir, room_name + ".ply"), mesh)
# TODO: add the semantic visualization
def visualize_pts_rgb(rgb, room_name, data_root, output_dir, mode="test"):
if "test" in mode:
split = "scans_test"
else:
split = "scans"
mesh_file = osp.join(data_root, split, room_name,
room_name + "_vh_clean_2.ply")
mesh = o3d.io.read_triangle_mesh(mesh_file)
pred_mesh = deepcopy(mesh)
pred_mesh.vertex_colors = o3d.utility.Vector3dVector(rgb / 255)
points = np.array(pred_mesh.vertices)
# points[:, 2] += 3
points[:, 1] += (points[:, 1].max() + 0.5)
pred_mesh.vertices = o3d.utility.Vector3dVector(points)
mesh += pred_mesh
o3d.io.write_triangle_mesh(osp.join(output_dir, room_name + ".ply"), mesh)
def get_coords_color(data_root: str,
result_root: str,
room_split: str = "train",
room_name: str = "scene0000_00",
task: str = "instance_pred"):
input_file = os.path.join(data_root, room_split,
room_name + "_inst_nostuff.pth")
assert os.path.isfile(input_file), f"File not exist - {input_file}."
if "test" in room_split:
xyz, rgb, edges, scene_idx = torch.load(input_file)
else:
xyz, rgb, label, inst_label = torch.load(input_file)
rgb = (rgb + 1) * 127.5
if (task == "semantic_gt"):
assert "test" not in room_split
label = label.astype(np.int)
label_rgb = np.zeros(rgb.shape)
label_rgb[label >= 0] = np.array(
itemgetter(*SEMANTIC_NAMES[label[label >= 0]])(CLASS_COLOR))
rgb = label_rgb
elif (task == "instance_gt"):
assert "test" not in room_split
inst_label = inst_label.astype(np.int)
print(f"Instance number: {inst_label.max() + 1}")
inst_label_rgb = np.zeros(rgb.shape)
object_idx = (inst_label >= 0)
inst_label_rgb[object_idx] = COLOR20[inst_label[object_idx] %
len(COLOR20)]
rgb = inst_label_rgb
elif (task == "semantic_pred"):
assert room_split != "train"
semantic_file = os.path.join(result_root, room_split, "semantic",
room_name + ".npy")
assert os.path.isfile(
semantic_file), f"No semantic result - {semantic_file}."
label_pred = np.load(semantic_file).astype(np.int) # 0~19
label_pred_rgb = np.array(
itemgetter(*SEMANTIC_NAMES[label_pred])(CLASS_COLOR))
rgb = label_pred_rgb
elif (task == "instance_pred"):
assert room_split != "train"
instance_file = os.path.join(result_root, room_split,
room_name + ".txt")
assert os.path.isfile(
instance_file), f"No instance result - {instance_file}."
f = open(instance_file, "r")
masks = f.readlines()
masks = [mask.rstrip().split() for mask in masks]
inst_label_pred_rgb = np.zeros(rgb.shape) # np.ones(rgb.shape) * 255 #
for i in range(len(masks) - 1, -1, -1):
mask_path = os.path.join(result_root, room_split, masks[i][0])
assert os.path.isfile(mask_path), mask_path
if (float(masks[i][2]) < 0.09):
continue
mask = np.loadtxt(mask_path).astype(np.int)
print(
f"{i} {masks[i][2]}: {SEMANTIC_IDX2NAME[int(masks[i][1])]} pointnum: {mask.sum()}"
)
inst_label_pred_rgb[mask == 1] = COLOR20[i % len(COLOR20)]
rgb = inst_label_pred_rgb
if "test" not in room_split:
sem_valid = (label != -100)
xyz = xyz[sem_valid]
rgb = rgb[sem_valid]
return xyz, rgb
def visualize_instance_mask_lite(
clusters: np.ndarray,
points: np.ndarray,
visual_path: str,
color: int = 20,
):
assert color in [20, 40]
colors = globals()[f"COLOR{color}"]
inst_label_pred_rgb = np.zeros_like(points) # np.ones(rgb.shape) * 255 #
for cluster_id, cluster in enumerate(clusters):
inst_label_pred_rgb[cluster == 1] = colors[cluster_id % len(colors)]
rgb = inst_label_pred_rgb
pc = o3d.geometry.PointCloud()
pc.points = o3d.utility.Vector3dVector(points)
pc.colors = o3d.utility.Vector3dVector(rgb / 255)
o3d.io.write_point_cloud(visual_path, pc)
|
[
"gorilla.derive_logger",
"numpy.where",
"torch.load",
"os.path.join",
"os.path.isfile",
"open3d.io.read_triangle_mesh",
"numpy.array",
"open3d.io.write_point_cloud",
"numpy.zeros",
"open3d.geometry.PointCloud",
"copy.deepcopy",
"operator.itemgetter",
"numpy.loadtxt",
"numpy.load",
"numpy.zeros_like",
"open3d.utility.Vector3dVector"
] |
[((264, 617), 'numpy.array', 'np.array', (['[[171, 198, 230], [143, 223, 142], [0, 120, 177], [255, 188, 126], [189, \n 189, 57], [144, 86, 76], [255, 152, 153], [222, 40, 47], [197, 176, 212\n ], [150, 103, 185], [200, 156, 149], [0, 190, 206], [252, 183, 210], [\n 219, 219, 146], [255, 127, 43], [234, 119, 192], [150, 218, 228], [0, \n 160, 55], [110, 128, 143], [80, 83, 160]]'], {}), '([[171, 198, 230], [143, 223, 142], [0, 120, 177], [255, 188, 126],\n [189, 189, 57], [144, 86, 76], [255, 152, 153], [222, 40, 47], [197, \n 176, 212], [150, 103, 185], [200, 156, 149], [0, 190, 206], [252, 183, \n 210], [219, 219, 146], [255, 127, 43], [234, 119, 192], [150, 218, 228],\n [0, 160, 55], [110, 128, 143], [80, 83, 160]])\n', (272, 617), True, 'import numpy as np\n'), ((1118, 1461), 'numpy.array', 'np.array', (['[[230, 25, 75], [60, 180, 75], [255, 225, 25], [0, 130, 200], [245, 130, 48\n ], [145, 30, 180], [70, 240, 240], [240, 50, 230], [210, 245, 60], [250,\n 190, 190], [0, 128, 128], [230, 190, 255], [170, 110, 40], [255, 250, \n 200], [128, 0, 0], [170, 255, 195], [128, 128, 0], [255, 215, 180], [0,\n 0, 128], [128, 128, 128]]'], {}), '([[230, 25, 75], [60, 180, 75], [255, 225, 25], [0, 130, 200], [245,\n 130, 48], [145, 30, 180], [70, 240, 240], [240, 50, 230], [210, 245, 60\n ], [250, 190, 190], [0, 128, 128], [230, 190, 255], [170, 110, 40], [\n 255, 250, 200], [128, 0, 0], [170, 255, 195], [128, 128, 0], [255, 215,\n 180], [0, 0, 128], [128, 128, 128]])\n', (1126, 1461), True, 'import numpy as np\n'), ((1952, 2637), 'numpy.array', 'np.array', (['[[88, 170, 108], [174, 105, 226], [78, 194, 83], [198, 62, 165], [133, 188,\n 52], [97, 101, 219], [190, 177, 52], [139, 65, 168], [75, 202, 137], [\n 225, 66, 129], [68, 135, 42], [226, 116, 210], [146, 186, 98], [68, 105,\n 201], [219, 148, 53], [85, 142, 235], [212, 85, 42], [78, 176, 223], [\n 221, 63, 77], [68, 195, 195], [175, 58, 119], [81, 175, 144], [184, 70,\n 74], [40, 116, 79], [184, 134, 219], [130, 137, 46], [110, 89, 164], [\n 92, 135, 74], [220, 140, 190], [94, 103, 39], [144, 154, 219], [160, 86,\n 40], [67, 107, 165], [194, 170, 104], [162, 95, 150], [143, 110, 44], [\n 146, 72, 105], [225, 142, 106], [162, 83, 86], [227, 124, 143]]'], {}), '([[88, 170, 108], [174, 105, 226], [78, 194, 83], [198, 62, 165], [\n 133, 188, 52], [97, 101, 219], [190, 177, 52], [139, 65, 168], [75, 202,\n 137], [225, 66, 129], [68, 135, 42], [226, 116, 210], [146, 186, 98], [\n 68, 105, 201], [219, 148, 53], [85, 142, 235], [212, 85, 42], [78, 176,\n 223], [221, 63, 77], [68, 195, 195], [175, 58, 119], [81, 175, 144], [\n 184, 70, 74], [40, 116, 79], [184, 134, 219], [130, 137, 46], [110, 89,\n 164], [92, 135, 74], [220, 140, 190], [94, 103, 39], [144, 154, 219], [\n 160, 86, 40], [67, 107, 165], [194, 170, 104], [162, 95, 150], [143, \n 110, 44], [146, 72, 105], [225, 142, 106], [162, 83, 86], [227, 124, 143]])\n', (1960, 2637), True, 'import numpy as np\n'), ((3537, 3622), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36,\n 39])\n', (3545, 3622), True, 'import numpy as np\n'), ((3641, 3878), 'numpy.array', 'np.array', (["['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door',\n 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refridgerator', 'shower curtain', 'toilet', 'sink', 'bathtub',\n 'otherfurniture']"], {}), "(['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',\n 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain',\n 'refridgerator', 'shower curtain', 'toilet', 'sink', 'bathtub',\n 'otherfurniture'])\n", (3649, 3878), True, 'import numpy as np\n'), ((5368, 5399), 'gorilla.derive_logger', 'gorilla.derive_logger', (['__name__'], {}), '(__name__)\n', (5389, 5399), False, 'import gorilla\n'), ((5485, 5546), 'os.path.join', 'osp.join', (['data_root', 'room_name', "(room_name + '_vh_clean_2.ply')"], {}), "(data_root, room_name, room_name + '_vh_clean_2.ply')\n", (5493, 5546), True, 'import os.path as osp\n'), ((5558, 5594), 'open3d.io.read_triangle_mesh', 'o3d.io.read_triangle_mesh', (['mesh_file'], {}), '(mesh_file)\n', (5583, 5594), True, 'import open3d as o3d\n'), ((5611, 5625), 'copy.deepcopy', 'deepcopy', (['mesh'], {}), '(mesh)\n', (5619, 5625), False, 'from copy import deepcopy\n'), ((5639, 5667), 'numpy.array', 'np.array', (['pred_mesh.vertices'], {}), '(pred_mesh.vertices)\n', (5647, 5667), True, 'import numpy as np\n'), ((5694, 5715), 'numpy.zeros_like', 'np.zeros_like', (['points'], {}), '(points)\n', (5707, 5715), True, 'import numpy as np\n'), ((7173, 7210), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['(rgb / 255)'], {}), '(rgb / 255)\n', (7199, 7210), True, 'import open3d as o3d\n'), ((7283, 7317), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (7309, 7317), True, 'import open3d as o3d\n'), ((7639, 7707), 'os.path.join', 'osp.join', (['data_root', 'split', 'room_name', "(room_name + '_vh_clean_2.ply')"], {}), "(data_root, split, room_name, room_name + '_vh_clean_2.ply')\n", (7647, 7707), True, 'import os.path as osp\n'), ((7744, 7780), 'open3d.io.read_triangle_mesh', 'o3d.io.read_triangle_mesh', (['mesh_file'], {}), '(mesh_file)\n', (7769, 7780), True, 'import open3d as o3d\n'), ((7797, 7811), 'copy.deepcopy', 'deepcopy', (['mesh'], {}), '(mesh)\n', (7805, 7811), False, 'from copy import deepcopy\n'), ((7842, 7879), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['(rgb / 255)'], {}), '(rgb / 255)\n', (7868, 7879), True, 'import open3d as o3d\n'), ((7893, 7921), 'numpy.array', 'np.array', (['pred_mesh.vertices'], {}), '(pred_mesh.vertices)\n', (7901, 7921), True, 'import numpy as np\n'), ((8018, 8052), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (8044, 8052), True, 'import open3d as o3d\n'), ((8402, 8470), 'os.path.join', 'os.path.join', (['data_root', 'room_split', "(room_name + '_inst_nostuff.pth')"], {}), "(data_root, room_split, room_name + '_inst_nostuff.pth')\n", (8414, 8470), False, 'import os\n'), ((8512, 8538), 'os.path.isfile', 'os.path.isfile', (['input_file'], {}), '(input_file)\n', (8526, 8538), False, 'import os\n'), ((11390, 11411), 'numpy.zeros_like', 'np.zeros_like', (['points'], {}), '(points)\n', (11403, 11411), True, 'import numpy as np\n'), ((11611, 11636), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (11634, 11636), True, 'import open3d as o3d\n'), ((11653, 11687), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (11679, 11687), True, 'import open3d as o3d\n'), ((11704, 11741), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['(rgb / 255)'], {}), '(rgb / 255)\n', (11730, 11741), True, 'import open3d as o3d\n'), ((11746, 11787), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['visual_path', 'pc'], {}), '(visual_path, pc)\n', (11770, 11787), True, 'import open3d as o3d\n'), ((7371, 7411), 'os.path.join', 'osp.join', (['visual_dir', "(room_name + '.ply')"], {}), "(visual_dir, room_name + '.ply')\n", (7379, 7411), True, 'import os.path as osp\n'), ((8106, 8146), 'os.path.join', 'osp.join', (['output_dir', "(room_name + '.ply')"], {}), "(output_dir, room_name + '.ply')\n", (8114, 8146), True, 'import os.path as osp\n'), ((8640, 8662), 'torch.load', 'torch.load', (['input_file'], {}), '(input_file)\n', (8650, 8662), False, 'import torch\n'), ((8711, 8733), 'torch.load', 'torch.load', (['input_file'], {}), '(input_file)\n', (8721, 8733), False, 'import torch\n'), ((8892, 8911), 'numpy.zeros', 'np.zeros', (['rgb.shape'], {}), '(rgb.shape)\n', (8900, 8911), True, 'import numpy as np\n'), ((9256, 9275), 'numpy.zeros', 'np.zeros', (['rgb.shape'], {}), '(rgb.shape)\n', (9264, 9275), True, 'import numpy as np\n'), ((8966, 9012), 'operator.itemgetter', 'itemgetter', (['*SEMANTIC_NAMES[label[label >= 0]]'], {}), '(*SEMANTIC_NAMES[label[label >= 0]])\n', (8976, 9012), False, 'from operator import itemgetter\n'), ((9571, 9640), 'os.path.join', 'os.path.join', (['result_root', 'room_split', '"""semantic"""', "(room_name + '.npy')"], {}), "(result_root, room_split, 'semantic', room_name + '.npy')\n", (9583, 9640), False, 'import os\n'), ((9693, 9722), 'os.path.isfile', 'os.path.isfile', (['semantic_file'], {}), '(semantic_file)\n', (9707, 9722), False, 'import os\n'), ((10073, 10130), 'os.path.join', 'os.path.join', (['result_root', 'room_split', "(room_name + '.txt')"], {}), "(result_root, room_split, room_name + '.txt')\n", (10085, 10130), False, 'import os\n'), ((10183, 10212), 'os.path.isfile', 'os.path.isfile', (['instance_file'], {}), '(instance_file)\n', (10197, 10212), False, 'import os\n'), ((10423, 10442), 'numpy.zeros', 'np.zeros', (['rgb.shape'], {}), '(rgb.shape)\n', (10431, 10442), True, 'import numpy as np\n'), ((9799, 9821), 'numpy.load', 'np.load', (['semantic_file'], {}), '(semantic_file)\n', (9806, 9821), True, 'import numpy as np\n'), ((9892, 9931), 'operator.itemgetter', 'itemgetter', (['*SEMANTIC_NAMES[label_pred]'], {}), '(*SEMANTIC_NAMES[label_pred])\n', (9902, 9931), False, 'from operator import itemgetter\n'), ((10545, 10595), 'os.path.join', 'os.path.join', (['result_root', 'room_split', 'masks[i][0]'], {}), '(result_root, room_split, masks[i][0])\n', (10557, 10595), False, 'import os\n'), ((10615, 10640), 'os.path.isfile', 'os.path.isfile', (['mask_path'], {}), '(mask_path)\n', (10629, 10640), False, 'import os\n'), ((6529, 6551), 'numpy.where', 'np.where', (['(cluster == 1)'], {}), '(cluster == 1)\n', (6537, 6551), True, 'import numpy as np\n'), ((10740, 10761), 'numpy.loadtxt', 'np.loadtxt', (['mask_path'], {}), '(mask_path)\n', (10750, 10761), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
fs = 1000 # sampling frequency
fc = 6 # cut-off frequency
t = np.arange(1000)/fs
sga = np.sin(2*np.pi*2*t) # signal with f = 2
sgb = np.sin(2*np.pi*10*t) # signal with f = 10
sgo = sga + sgb #+ (np.random.random(1000) - 0.5)
w = fc/fs
b, a = signal.butter(4, w, 'low')
sgf1 = signal.lfilter(b, a, sgo)
sgf1 = sgf1[ : : -1]
sgf1 = signal.lfilter(b, a, sgf1)
sgf1 = sgf1[ : : -1]
plt.plot(t, sgo, label = 'original')
plt.plot(t, sga, label = 'f = 2')
plt.plot(t, sgf1, 'r-', linewidth = 3, label = 'bidirectional')
sgf2 = signal.filtfilt(b, a, sgo)
sgf3 = signal.lfilter(b, a, sgo)
plt.plot(t, sgf2, label = 'filtfilt')
plt.plot(t, sgf3, 'k-', linewidth = 1.5, label = 'lfilter')
plt.legend()
plt.show()
|
[
"numpy.arange",
"scipy.signal.filtfilt",
"matplotlib.pyplot.plot",
"scipy.signal.butter",
"scipy.signal.lfilter",
"numpy.sin",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((201, 226), 'numpy.sin', 'np.sin', (['(2 * np.pi * 2 * t)'], {}), '(2 * np.pi * 2 * t)\n', (207, 226), True, 'import numpy as np\n'), ((249, 275), 'numpy.sin', 'np.sin', (['(2 * np.pi * 10 * t)'], {}), '(2 * np.pi * 10 * t)\n', (255, 275), True, 'import numpy as np\n'), ((360, 386), 'scipy.signal.butter', 'signal.butter', (['(4)', 'w', '"""low"""'], {}), "(4, w, 'low')\n", (373, 386), False, 'from scipy import signal\n'), ((395, 420), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'sgo'], {}), '(b, a, sgo)\n', (409, 420), False, 'from scipy import signal\n'), ((449, 475), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'sgf1'], {}), '(b, a, sgf1)\n', (463, 475), False, 'from scipy import signal\n'), ((498, 532), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sgo'], {'label': '"""original"""'}), "(t, sgo, label='original')\n", (506, 532), True, 'import matplotlib.pyplot as plt\n'), ((535, 566), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sga'], {'label': '"""f = 2"""'}), "(t, sga, label='f = 2')\n", (543, 566), True, 'import matplotlib.pyplot as plt\n'), ((569, 628), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sgf1', '"""r-"""'], {'linewidth': '(3)', 'label': '"""bidirectional"""'}), "(t, sgf1, 'r-', linewidth=3, label='bidirectional')\n", (577, 628), True, 'import matplotlib.pyplot as plt\n'), ((641, 667), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'sgo'], {}), '(b, a, sgo)\n', (656, 667), False, 'from scipy import signal\n'), ((675, 700), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'sgo'], {}), '(b, a, sgo)\n', (689, 700), False, 'from scipy import signal\n'), ((701, 736), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sgf2'], {'label': '"""filtfilt"""'}), "(t, sgf2, label='filtfilt')\n", (709, 736), True, 'import matplotlib.pyplot as plt\n'), ((739, 794), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'sgf3', '"""k-"""'], {'linewidth': '(1.5)', 'label': '"""lfilter"""'}), "(t, sgf3, 'k-', linewidth=1.5, label='lfilter')\n", (747, 794), True, 'import matplotlib.pyplot as plt\n'), ((800, 812), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (810, 812), True, 'import matplotlib.pyplot as plt\n'), ((813, 823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (821, 823), True, 'import matplotlib.pyplot as plt\n'), ((176, 191), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (185, 191), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Tests for module mosaic.immutable_model
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import unittest
import numpy as N
import immutable.np as IN
import mosaic.immutable_model as M
from mosaic.api import is_valid
def make_water_fragment(nsites=1):
return M.fragment("water", (),
(("H1", M.atom(M.element("H"), nsites)),
("H2", M.atom(M.element("H"), nsites)),
("O", M.atom(M.element("O"), nsites))),
(("H1", "O", "single"), ("H2", "O", "single")))
class AtomDescriptorTest(unittest.TestCase):
def test_singleton(self):
self.assertTrue(M.dummy() is M.dummy())
self.assertTrue(M.dummy('a') is M.dummy('a'))
self.assertTrue(M.dummy('a') is not M.dummy('b'))
self.assertTrue(M.dummy('a') is not M.unknown('a'))
self.assertTrue(M.dummy('C') is not M.element('C'))
self.assertTrue(M.element('C') is M.element('C'))
def test_name(self):
for name in ['a', 'b', 'c']:
self.assertEqual(M.unknown(name).name, name)
def test_type(self):
self.assertEqual(M.dummy().type, "dummy")
self.assertEqual(M.unknown().type, "")
self.assertEqual(M.element('O').type, "element")
self.assertEqual(M.cgparticle('ala').type, "cgparticle")
class WaterTest(unittest.TestCase):
def setUp(self):
self.mol = make_water_fragment()
def test_basics(self):
self.assertEqual(self.mol.number_of_atoms, 3)
self.assertEqual(self.mol.number_of_sites, 3)
self.assertEqual(self.mol.number_of_bonds, 2)
self.assertEqual(self.mol.species, "water")
def test_equality(self):
same_mol = make_water_fragment()
changed_bond_order = M.fragment("water", (),
(("H1", M.atom(M.element("H"))),
("H2", M.atom(M.element("H"))),
("O", M.atom(M.element("O")))),
(("O", "H2", "single"),
("O", "H1", "single")))
changed_atom_order = M.fragment("water", (),
(("O", M.atom(M.element("O"))),
("H1", M.atom(M.element("H"))),
("H2", M.atom(M.element("H")))),
(("O", "H1", "single"),
("O", "H2", "single")))
self.assertEqual(self.mol, self.mol)
self.assertEqual(self.mol, same_mol)
self.assertEqual(self.mol, changed_bond_order)
self.assertNotEqual(self.mol, changed_atom_order)
class PeptideTest(unittest.TestCase):
def _make_molecule(self):
C = M.element('C')
H = M.element('H')
N = M.element('N')
O = M.element('O')
peptide_group = M.fragment('peptide',
(),
(('CA', M.atom(C)),
('HA', M.atom(H)),
('H', M.atom(H)),
('N', M.atom(N)),
('C', M.atom(C)),
('O', M.atom(O))),
(('N', 'H', "single"),
('N', 'CA', "single"),
('CA', 'HA', "single"),
('CA', 'C', "single"),
('C', 'O', "double")))
ala_sidechain = M.fragment('ala_sidechain',
(),
(('CB', M.atom(C)),
('HB1', M.atom(H)),
('HB2', M.atom(H)),
('HB3', M.atom(H))),
(('CB', 'HB1', "single"),
('CB', 'HB2', "single"),
('CB', 'HB3', "single"),))
ala = M.fragment('alanine',
(('peptide', peptide_group),
('sidechain', ala_sidechain)),
(),
(('peptide.CA', 'sidechain.CB', "single"),))
return M.polymer('alanine_dipeptide',
(('ALA1', ala),
('ALA2', ala)),
(('ALA1.peptide.C', 'ALA2.peptide.N', "single"),),
'polypeptide')
def test_basic(self):
mol = self._make_molecule()
self.assertEqual(mol.number_of_atoms, 20)
self.assertEqual(mol.number_of_sites, 20)
self.assertEqual(mol.number_of_bonds, 19)
self.assertEqual(mol.polymer_type, "polypeptide")
def test_equality(self):
self.assertEqual(self._make_molecule(),
self._make_molecule())
def test_iterators(self):
mol = self._make_molecule()
mol_ref = M.FragmentRef('x', mol)
atoms = tuple(mol_ref.recursive_atom_iterator())
self.assertEqual(len(atoms), mol.number_of_atoms)
bonds = tuple(mol_ref.recursive_bond_iterator())
self.assertEqual(len(bonds), mol.number_of_bonds)
for a1, a2, order in bonds:
for a in a1, a2:
node = mol
for p in a.split('.'):
node = node[p]
self.assertTrue(isinstance(node, M.Atom))
paths = tuple(mol_ref.recursive_atom_path_iterator())
self.assertEqual(len(paths), mol.number_of_atoms)
for ap in paths:
node = mol
for p in ap.split('.'):
node = node[p]
self.assertTrue(isinstance(node, M.Atom))
class ErrorCheckingTest(unittest.TestCase):
def test_atom_descriptor(self):
self.assertRaises(TypeError, lambda: M.dummy(42))
self.assertRaises(ValueError, lambda: M.element(42))
self.assertRaises(ValueError, lambda: M.element("X"))
def test_atom(self):
carbon = M.element("C")
self.assertRaises(TypeError, lambda: M.atom('C', 1))
self.assertRaises(ValueError, lambda: M.atom(carbon, 0))
def test_fragment(self):
carbon = M.atom(M.element("C"))
# Illegal fragments
self.assertRaises(TypeError,
lambda: M.fragment('m', None, (("C", carbon),), ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', [1, 2], (("C", carbon),), ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (("C", carbon),), (), ()))
# Illegal atoms
self.assertRaises(TypeError,
lambda: M.fragment('m', (), None, ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), [1, 2], ()))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (carbon,), ()))
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(("C", carbon),
("C", carbon)),
()))
# Illegal bond lists
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),), None))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
[1, 2, 3]))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
(('X', 'X'))))
self.assertRaises(TypeError,
lambda: M.fragment('m', (), (("C", carbon),),
(['X', 'X', 'single'])))
def test_bonds(self):
carbon = M.atom(M.element("C"))
# Bond specified by only one atom
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', ),)))
# Bond specified by two atoms but no bond order
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C2'),)))
# Bond specified by two identical atoms
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C1', ''),)))
# Bond specified by an atom name that is undefined
self.assertRaises(ValueError,
lambda: M.fragment('m', (),
(('C1', carbon), ('C2', carbon)),
(('C1', 'C3', ''),)))
# Bond specified at the wrong fragment level
f = M.fragment('x', (), (('C1', carbon), ('C2', carbon)), ())
self.assertRaises(ValueError,
lambda: M.fragment('m', (('x', f),),
(('C3', carbon),),
(('x.C1', 'x.C2', ''),)))
def test_universe(self):
mol = M.fragment("water", (),
(("H1", M.atom(M.element("H"), 8)),
("H2", M.atom(M.element("H"), 8)),
("O", M.atom(M.element("O"), 2))),
(("H1", "O", "single"), ("H2", "O", "single")))
self.assertRaises(TypeError,
lambda: M.universe(0, [(mol, 'water', 10)]))
self.assertRaises(ValueError,
lambda: M.universe('strange', [(mol, 'water', 10)]))
self.assertRaises(ValueError,
lambda: M.universe('strange', [(mol, 10)]))
self.assertRaises(TypeError,
lambda: M.universe('infinite', mol))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [("water", 10)]))
self.assertRaises(TypeError,
lambda: M.universe('infinite', [mol]))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [(10, mol)]))
self.assertRaises(ValueError,
lambda: M.universe('infinite', [(mol, 'water', 10)],
[(IN.zeros((3,3), N.float64),
IN.zeros((3,), N.float64))]))
def test_configuration(self):
mol = make_water_fragment()
universe = M.universe('cube', [(mol, 'water', 10)])
# Missing data
self.assertRaises(TypeError,
lambda: M.Configuration(universe))
# Positions but no cell parameters
self.assertRaises(TypeError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32)))
# Positions and cell parameters of different dtype
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32),
N.float64(10.)))
# Positions not an array
self.assertRaises(TypeError,
lambda: M.Configuration(universe,
list(IN.zeros((30, 3),
N.float32)),
N.float32(10.)))
# Positions of wrong shape
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((25, 3), N.float32),
N.float32(10.)))
# Cell parameters of wrong shape
self.assertRaises(ValueError,
lambda: M.Configuration(universe,
IN.zeros((30, 3), N.float32),
IN.zeros((3,), N.float32)))
def test_selection(self):
mol = make_water_fragment(nsites=2)
universe = M.universe('cube', [(mol, 'water', 5)])
# Index occurs twice
self.assertRaises(ValueError,
lambda: M.AtomSelection(universe,
IN.zeros((2,), N.uint16)))
# Atom index too large
self.assertRaises(ValueError,
lambda: M.AtomSelection(universe,
IN.array([20], N.uint16)))
# Template atom index too large
self.assertRaises(ValueError,
lambda: M.TemplateAtomSelection(universe,
IN.array([3], N.uint8)))
# Site index too large
self.assertRaises(ValueError,
lambda: M.SiteSelection(universe,
IN.array([40], N.uint16)))
# Template site index too large
self.assertRaises(ValueError,
lambda: M.TemplateSiteSelection(universe,
IN.array([8], N.uint8)))
class UniverseTest(unittest.TestCase):
def setUp(self):
mol = make_water_fragment(2)
self.universe = M.universe('infinite', [(mol, 'water', 10)],
convention='my_own')
def test_basics(self):
self.assertTrue(is_valid(self.universe))
self.assertEqual(self.universe.number_of_molecules, 10)
self.assertEqual(self.universe.number_of_atoms, 30)
self.assertEqual(self.universe.number_of_sites, 60)
self.assertEqual(self.universe.number_of_bonds, 20)
self.assertEqual(self.universe.cell_shape, "infinite")
self.assertEqual(self.universe.convention, "my_own")
def test_properties(self):
masses = M.TemplateAtomProperty(self.universe,
"masses", "amu",
IN.array([1., 1., 16.], N.float32))
self.assertTrue(is_valid(masses))
self.assertEqual(masses.type, 'template_atom')
self.assertTrue(masses.universe == self.universe)
self.assertEqual(masses.element_shape, ())
self.assertEqual(masses.data.shape, (3,))
bead_masses = M.TemplateSiteProperty(self.universe,
"mass", "amu",
IN.array([1., 1.,
1., 1.,
8., 8.], N.float32))
self.assertTrue(is_valid(bead_masses))
self.assertEqual(bead_masses.type, 'template_site')
self.assertTrue(bead_masses.universe is self.universe)
self.assertEqual(bead_masses.element_shape, ())
self.assertEqual(bead_masses.data.shape, (6,))
velocities = M.SiteProperty(self.universe,
"velocity", "nm ps-1",
IN.zeros((60, 3), dtype=N.float64))
self.assertTrue(is_valid(velocities))
self.assertEqual(velocities.type, 'site')
self.assertTrue(velocities.universe is self.universe)
self.assertEqual(velocities.data.shape, (60, 3))
self.assertEqual(velocities.element_shape, (3,))
foo = M.AtomProperty(self.universe,
"foo", "",
IN.zeros((30, 2, 2), dtype=N.int16))
self.assertTrue(is_valid(foo))
self.assertEqual(foo.type, 'atom')
self.assertTrue(foo.universe is self.universe)
self.assertEqual(foo.data.shape, (30, 2, 2))
self.assertEqual(foo.element_shape, (2, 2))
def test_labels(self):
labels = tuple(a.name
for f, n in self.universe.molecules
for a in f.recursive_atom_iterator())
el = M.TemplateAtomLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_template_atoms)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for _ in range(n)
for a in f.recursive_atom_iterator())
el = M.AtomLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_atoms)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for a in f.recursive_atom_iterator()
for _ in range(a.number_of_sites))
el = M.TemplateSiteLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_template_sites)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
labels = tuple(a.name
for f, n in self.universe.molecules
for _ in range(n)
for a in f.recursive_atom_iterator()
for __ in range(a.number_of_sites))
el = M.SiteLabel(self.universe, "element", labels)
self.assertTrue(is_valid(el))
self.assertEqual(el.name, "element")
self.assertTrue(el.universe == self.universe)
self.assertTrue(len(el.strings)
== self.universe.number_of_sites)
for s1, s2 in zip(labels, el.strings):
self.assertEqual(s1, s2)
def test_bonds(self):
bonds = self.universe.bond_index_array()
self.assertEqual(len(bonds), self.universe.number_of_bonds)
self.assertTrue((bonds >= 0).all())
self.assertTrue((bonds < self.universe.number_of_atoms).all())
for i in range(10):
self.assertEqual(bonds[2*i, 0], 3*i)
self.assertEqual(bonds[2*i, 1], 3*i+2)
self.assertEqual(bonds[2*i+1, 0], 3*i+1)
self.assertEqual(bonds[2*i+1, 1], 3*i+2)
def test_index_mappings(self):
mol = self.universe.molecules[0][0]
s2a = mol.site_to_atom_index_mapping()
self.assertTrue((s2a == N.array([0, 0, 1, 1, 2, 2])).all())
s2a = self.universe.site_to_atom_index_mapping()
s2a_ref = N.repeat(N.arange(30), 2)
self.assertTrue((s2a == s2a_ref).all())
st2at = self.universe.template_site_to_template_atom_index_mapping()
st2at_ref = N.array([0, 0, 1, 1, 2, 2])
self.assertTrue((st2at == st2at_ref).all())
s2t = self.universe.site_to_template_index_mapping()
s2t_ref = N.resize(N.arange(mol.number_of_sites),
(self.universe.number_of_sites,))
self.assertTrue((s2t == s2t_ref).all())
a2t = self.universe.atom_to_template_index_mapping()
a2t_ref = N.resize(N.arange(mol.number_of_atoms),
(self.universe.number_of_atoms,))
self.assertTrue((a2t == a2t_ref).all())
a2s = self.universe.atom_to_site_index_mapping()
a2s_ref = 2*N.arange(self.universe.number_of_atoms)
self.assertTrue((a2s == a2s_ref).all())
def test_selections(self):
s = M.AtomSelection(self.universe, IN.array([0, 2], N.uint8))
self.assertEqual(s.number_of_atoms, 2)
self.assertEqual(s.number_of_sites, 4)
s = M.TemplateAtomSelection(self.universe, IN.array([1], N.uint8))
self.assertEqual(s.number_of_atoms, 10)
self.assertEqual(s.number_of_sites, 20)
s = M.SiteSelection(self.universe, [2])
self.assertEqual(s.number_of_sites, 1)
s = M.TemplateSiteSelection(self.universe, [0])
self.assertEqual(s.number_of_sites, 10)
class PBCTest(unittest.TestCase):
def setUp(self):
self.infinite = M.universe('infinite', ())
self.cube = M.universe('cube', ())
self.cuboid = M.universe('cuboid', ())
self.parallelepiped = M.universe('parallelepiped', ())
def test_lattice_vectors(self):
conf = M.Configuration(self.infinite,
IN.zeros((0, 3), N.float32),
None)
self.assertEqual(conf.lattice_vectors(), ())
self.assertEqual(conf.cell_volume(), None)
conf = M.Configuration(self.cube,
IN.zeros((0, 3), N.float32),
IN.array(1., N.float32))
lv = conf.lattice_vectors()
self.assertTrue((lv[0] == N.array([1., 0., 0.], N.float32)).all())
self.assertTrue((lv[1] == N.array([0., 1., 0.], N.float32)).all())
self.assertTrue((lv[2] == N.array([0., 0., 1.], N.float32)).all())
self.assertEqual(conf.cell_volume(), 1.)
conf = M.Configuration(self.cuboid,
IN.zeros((0, 3), N.float32),
IN.array([1., 2., 4.], N.float32))
lv = conf.lattice_vectors()
self.assertTrue((lv[0] == N.array([1., 0., 0.], N.float32)).all())
self.assertTrue((lv[1] == N.array([0., 2., 0.], N.float32)).all())
self.assertTrue((lv[2] == N.array([0., 0., 4.], N.float32)).all())
self.assertEqual(conf.cell_volume(), 8.)
conf = M.Configuration(self.parallelepiped,
IN.zeros((0, 3), N.float32),
IN.array([[1., 2., 4.],
[8., 4., 2.],
[16., 4., 8.]], N.float32))
lv = conf.lattice_vectors()
self.assertTrue((lv[0] == N.array([1., 2., 4.], N.float32)).all())
self.assertTrue((lv[1] == N.array([8., 4., 2.], N.float32)).all())
self.assertTrue((lv[2] == N.array([16., 4., 8.], N.float32)).all())
self.assertAlmostEqual(conf.cell_volume(), 168.)
def suite():
loader = unittest.TestLoader()
s = unittest.TestSuite()
s.addTest(loader.loadTestsFromTestCase(AtomDescriptorTest))
s.addTest(loader.loadTestsFromTestCase(WaterTest))
s.addTest(loader.loadTestsFromTestCase(PeptideTest))
s.addTest(loader.loadTestsFromTestCase(ErrorCheckingTest))
s.addTest(loader.loadTestsFromTestCase(UniverseTest))
s.addTest(loader.loadTestsFromTestCase(PBCTest))
return s
if __name__ == '__main__':
unittest.main()
|
[
"mosaic.immutable_model.SiteLabel",
"numpy.array",
"unittest.main",
"numpy.arange",
"unittest.TestSuite",
"mosaic.immutable_model.FragmentRef",
"numpy.float64",
"mosaic.immutable_model.fragment",
"mosaic.immutable_model.TemplateSiteLabel",
"mosaic.immutable_model.polymer",
"mosaic.immutable_model.AtomLabel",
"immutable.np.zeros",
"mosaic.immutable_model.atom",
"mosaic.immutable_model.SiteSelection",
"mosaic.immutable_model.element",
"mosaic.immutable_model.dummy",
"mosaic.immutable_model.unknown",
"mosaic.immutable_model.TemplateSiteSelection",
"mosaic.api.is_valid",
"mosaic.immutable_model.cgparticle",
"immutable.np.array",
"mosaic.immutable_model.universe",
"mosaic.immutable_model.TemplateAtomLabel",
"numpy.float32",
"unittest.TestLoader",
"mosaic.immutable_model.Configuration"
] |
[((23536, 23557), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (23555, 23557), False, 'import unittest\n'), ((23566, 23586), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (23584, 23586), False, 'import unittest\n'), ((23982, 23997), 'unittest.main', 'unittest.main', ([], {}), '()\n', (23995, 23997), False, 'import unittest\n'), ((3173, 3187), 'mosaic.immutable_model.element', 'M.element', (['"""C"""'], {}), "('C')\n", (3182, 3187), True, 'import mosaic.immutable_model as M\n'), ((3200, 3214), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (3209, 3214), True, 'import mosaic.immutable_model as M\n'), ((3227, 3241), 'mosaic.immutable_model.element', 'M.element', (['"""N"""'], {}), "('N')\n", (3236, 3241), True, 'import mosaic.immutable_model as M\n'), ((3254, 3268), 'mosaic.immutable_model.element', 'M.element', (['"""O"""'], {}), "('O')\n", (3263, 3268), True, 'import mosaic.immutable_model as M\n'), ((4490, 4624), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""alanine"""', "(('peptide', peptide_group), ('sidechain', ala_sidechain))", '()', "(('peptide.CA', 'sidechain.CB', 'single'),)"], {}), "('alanine', (('peptide', peptide_group), ('sidechain',\n ala_sidechain)), (), (('peptide.CA', 'sidechain.CB', 'single'),))\n", (4500, 4624), True, 'import mosaic.immutable_model as M\n'), ((4737, 4870), 'mosaic.immutable_model.polymer', 'M.polymer', (['"""alanine_dipeptide"""', "(('ALA1', ala), ('ALA2', ala))", "(('ALA1.peptide.C', 'ALA2.peptide.N', 'single'),)", '"""polypeptide"""'], {}), "('alanine_dipeptide', (('ALA1', ala), ('ALA2', ala)), ((\n 'ALA1.peptide.C', 'ALA2.peptide.N', 'single'),), 'polypeptide')\n", (4746, 4870), True, 'import mosaic.immutable_model as M\n'), ((5449, 5472), 'mosaic.immutable_model.FragmentRef', 'M.FragmentRef', (['"""x"""', 'mol'], {}), "('x', mol)\n", (5462, 5472), True, 'import mosaic.immutable_model as M\n'), ((6530, 6544), 'mosaic.immutable_model.element', 'M.element', (['"""C"""'], {}), "('C')\n", (6539, 6544), True, 'import mosaic.immutable_model as M\n'), ((9671, 9728), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""x"""', '()', "(('C1', carbon), ('C2', carbon))", '()'], {}), "('x', (), (('C1', carbon), ('C2', carbon)), ())\n", (9681, 9728), True, 'import mosaic.immutable_model as M\n'), ((11406, 11446), 'mosaic.immutable_model.universe', 'M.universe', (['"""cube"""', "[(mol, 'water', 10)]"], {}), "('cube', [(mol, 'water', 10)])\n", (11416, 11446), True, 'import mosaic.immutable_model as M\n'), ((13118, 13157), 'mosaic.immutable_model.universe', 'M.universe', (['"""cube"""', "[(mol, 'water', 5)]"], {}), "('cube', [(mol, 'water', 5)])\n", (13128, 13157), True, 'import mosaic.immutable_model as M\n'), ((14355, 14420), 'mosaic.immutable_model.universe', 'M.universe', (['"""infinite"""', "[(mol, 'water', 10)]"], {'convention': '"""my_own"""'}), "('infinite', [(mol, 'water', 10)], convention='my_own')\n", (14365, 14420), True, 'import mosaic.immutable_model as M\n'), ((17017, 17070), 'mosaic.immutable_model.TemplateAtomLabel', 'M.TemplateAtomLabel', (['self.universe', '"""element"""', 'labels'], {}), "(self.universe, 'element', labels)\n", (17036, 17070), True, 'import mosaic.immutable_model as M\n'), ((17604, 17649), 'mosaic.immutable_model.AtomLabel', 'M.AtomLabel', (['self.universe', '"""element"""', 'labels'], {}), "(self.universe, 'element', labels)\n", (17615, 17649), True, 'import mosaic.immutable_model as M\n'), ((18190, 18243), 'mosaic.immutable_model.TemplateSiteLabel', 'M.TemplateSiteLabel', (['self.universe', '"""element"""', 'labels'], {}), "(self.universe, 'element', labels)\n", (18209, 18243), True, 'import mosaic.immutable_model as M\n'), ((18835, 18880), 'mosaic.immutable_model.SiteLabel', 'M.SiteLabel', (['self.universe', '"""element"""', 'labels'], {}), "(self.universe, 'element', labels)\n", (18846, 18880), True, 'import mosaic.immutable_model as M\n'), ((20136, 20163), 'numpy.array', 'N.array', (['[0, 0, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 2, 2])\n', (20143, 20163), True, 'import numpy as N\n'), ((21219, 21254), 'mosaic.immutable_model.SiteSelection', 'M.SiteSelection', (['self.universe', '[2]'], {}), '(self.universe, [2])\n', (21234, 21254), True, 'import mosaic.immutable_model as M\n'), ((21314, 21357), 'mosaic.immutable_model.TemplateSiteSelection', 'M.TemplateSiteSelection', (['self.universe', '[0]'], {}), '(self.universe, [0])\n', (21337, 21357), True, 'import mosaic.immutable_model as M\n'), ((21487, 21513), 'mosaic.immutable_model.universe', 'M.universe', (['"""infinite"""', '()'], {}), "('infinite', ())\n", (21497, 21513), True, 'import mosaic.immutable_model as M\n'), ((21534, 21556), 'mosaic.immutable_model.universe', 'M.universe', (['"""cube"""', '()'], {}), "('cube', ())\n", (21544, 21556), True, 'import mosaic.immutable_model as M\n'), ((21579, 21603), 'mosaic.immutable_model.universe', 'M.universe', (['"""cuboid"""', '()'], {}), "('cuboid', ())\n", (21589, 21603), True, 'import mosaic.immutable_model as M\n'), ((21634, 21666), 'mosaic.immutable_model.universe', 'M.universe', (['"""parallelepiped"""', '()'], {}), "('parallelepiped', ())\n", (21644, 21666), True, 'import mosaic.immutable_model as M\n'), ((6733, 6747), 'mosaic.immutable_model.element', 'M.element', (['"""C"""'], {}), "('C')\n", (6742, 6747), True, 'import mosaic.immutable_model as M\n'), ((8445, 8459), 'mosaic.immutable_model.element', 'M.element', (['"""C"""'], {}), "('C')\n", (8454, 8459), True, 'import mosaic.immutable_model as M\n'), ((14508, 14531), 'mosaic.api.is_valid', 'is_valid', (['self.universe'], {}), '(self.universe)\n', (14516, 14531), False, 'from mosaic.api import is_valid\n'), ((15085, 15122), 'immutable.np.array', 'IN.array', (['[1.0, 1.0, 16.0]', 'N.float32'], {}), '([1.0, 1.0, 16.0], N.float32)\n', (15093, 15122), True, 'import immutable.np as IN\n'), ((15145, 15161), 'mosaic.api.is_valid', 'is_valid', (['masses'], {}), '(masses)\n', (15153, 15161), False, 'from mosaic.api import is_valid\n'), ((15542, 15593), 'immutable.np.array', 'IN.array', (['[1.0, 1.0, 1.0, 1.0, 8.0, 8.0]', 'N.float32'], {}), '([1.0, 1.0, 1.0, 1.0, 8.0, 8.0], N.float32)\n', (15550, 15593), True, 'import immutable.np as IN\n'), ((15723, 15744), 'mosaic.api.is_valid', 'is_valid', (['bead_masses'], {}), '(bead_masses)\n', (15731, 15744), False, 'from mosaic.api import is_valid\n'), ((16126, 16160), 'immutable.np.zeros', 'IN.zeros', (['(60, 3)'], {'dtype': 'N.float64'}), '((60, 3), dtype=N.float64)\n', (16134, 16160), True, 'import immutable.np as IN\n'), ((16186, 16206), 'mosaic.api.is_valid', 'is_valid', (['velocities'], {}), '(velocities)\n', (16194, 16206), False, 'from mosaic.api import is_valid\n'), ((16547, 16582), 'immutable.np.zeros', 'IN.zeros', (['(30, 2, 2)'], {'dtype': 'N.int16'}), '((30, 2, 2), dtype=N.int16)\n', (16555, 16582), True, 'import immutable.np as IN\n'), ((16608, 16621), 'mosaic.api.is_valid', 'is_valid', (['foo'], {}), '(foo)\n', (16616, 16621), False, 'from mosaic.api import is_valid\n'), ((17095, 17107), 'mosaic.api.is_valid', 'is_valid', (['el'], {}), '(el)\n', (17103, 17107), False, 'from mosaic.api import is_valid\n'), ((17674, 17686), 'mosaic.api.is_valid', 'is_valid', (['el'], {}), '(el)\n', (17682, 17686), False, 'from mosaic.api import is_valid\n'), ((18268, 18280), 'mosaic.api.is_valid', 'is_valid', (['el'], {}), '(el)\n', (18276, 18280), False, 'from mosaic.api import is_valid\n'), ((18905, 18917), 'mosaic.api.is_valid', 'is_valid', (['el'], {}), '(el)\n', (18913, 18917), False, 'from mosaic.api import is_valid\n'), ((19973, 19985), 'numpy.arange', 'N.arange', (['(30)'], {}), '(30)\n', (19981, 19985), True, 'import numpy as N\n'), ((20305, 20334), 'numpy.arange', 'N.arange', (['mol.number_of_sites'], {}), '(mol.number_of_sites)\n', (20313, 20334), True, 'import numpy as N\n'), ((20534, 20563), 'numpy.arange', 'N.arange', (['mol.number_of_atoms'], {}), '(mol.number_of_atoms)\n', (20542, 20563), True, 'import numpy as N\n'), ((20752, 20791), 'numpy.arange', 'N.arange', (['self.universe.number_of_atoms'], {}), '(self.universe.number_of_atoms)\n', (20760, 20791), True, 'import numpy as N\n'), ((20915, 20940), 'immutable.np.array', 'IN.array', (['[0, 2]', 'N.uint8'], {}), '([0, 2], N.uint8)\n', (20923, 20940), True, 'import immutable.np as IN\n'), ((21087, 21109), 'immutable.np.array', 'IN.array', (['[1]', 'N.uint8'], {}), '([1], N.uint8)\n', (21095, 21109), True, 'import immutable.np as IN\n'), ((21781, 21808), 'immutable.np.zeros', 'IN.zeros', (['(0, 3)', 'N.float32'], {}), '((0, 3), N.float32)\n', (21789, 21808), True, 'import immutable.np as IN\n'), ((22024, 22051), 'immutable.np.zeros', 'IN.zeros', (['(0, 3)', 'N.float32'], {}), '((0, 3), N.float32)\n', (22032, 22051), True, 'import immutable.np as IN\n'), ((22084, 22108), 'immutable.np.array', 'IN.array', (['(1.0)', 'N.float32'], {}), '(1.0, N.float32)\n', (22092, 22108), True, 'import immutable.np as IN\n'), ((22494, 22521), 'immutable.np.zeros', 'IN.zeros', (['(0, 3)', 'N.float32'], {}), '((0, 3), N.float32)\n', (22502, 22521), True, 'import immutable.np as IN\n'), ((22554, 22590), 'immutable.np.array', 'IN.array', (['[1.0, 2.0, 4.0]', 'N.float32'], {}), '([1.0, 2.0, 4.0], N.float32)\n', (22562, 22590), True, 'import immutable.np as IN\n'), ((22982, 23009), 'immutable.np.zeros', 'IN.zeros', (['(0, 3)', 'N.float32'], {}), '((0, 3), N.float32)\n', (22990, 23009), True, 'import immutable.np as IN\n'), ((23042, 23115), 'immutable.np.array', 'IN.array', (['[[1.0, 2.0, 4.0], [8.0, 4.0, 2.0], [16.0, 4.0, 8.0]]', 'N.float32'], {}), '([[1.0, 2.0, 4.0], [8.0, 4.0, 2.0], [16.0, 4.0, 8.0]], N.float32)\n', (23050, 23115), True, 'import immutable.np as IN\n'), ((984, 993), 'mosaic.immutable_model.dummy', 'M.dummy', ([], {}), '()\n', (991, 993), True, 'import mosaic.immutable_model as M\n'), ((997, 1006), 'mosaic.immutable_model.dummy', 'M.dummy', ([], {}), '()\n', (1004, 1006), True, 'import mosaic.immutable_model as M\n'), ((1032, 1044), 'mosaic.immutable_model.dummy', 'M.dummy', (['"""a"""'], {}), "('a')\n", (1039, 1044), True, 'import mosaic.immutable_model as M\n'), ((1048, 1060), 'mosaic.immutable_model.dummy', 'M.dummy', (['"""a"""'], {}), "('a')\n", (1055, 1060), True, 'import mosaic.immutable_model as M\n'), ((1086, 1098), 'mosaic.immutable_model.dummy', 'M.dummy', (['"""a"""'], {}), "('a')\n", (1093, 1098), True, 'import mosaic.immutable_model as M\n'), ((1106, 1118), 'mosaic.immutable_model.dummy', 'M.dummy', (['"""b"""'], {}), "('b')\n", (1113, 1118), True, 'import mosaic.immutable_model as M\n'), ((1144, 1156), 'mosaic.immutable_model.dummy', 'M.dummy', (['"""a"""'], {}), "('a')\n", (1151, 1156), True, 'import mosaic.immutable_model as M\n'), ((1164, 1178), 'mosaic.immutable_model.unknown', 'M.unknown', (['"""a"""'], {}), "('a')\n", (1173, 1178), True, 'import mosaic.immutable_model as M\n'), ((1204, 1216), 'mosaic.immutable_model.dummy', 'M.dummy', (['"""C"""'], {}), "('C')\n", (1211, 1216), True, 'import mosaic.immutable_model as M\n'), ((1224, 1238), 'mosaic.immutable_model.element', 'M.element', (['"""C"""'], {}), "('C')\n", (1233, 1238), True, 'import mosaic.immutable_model as M\n'), ((1264, 1278), 'mosaic.immutable_model.element', 'M.element', (['"""C"""'], {}), "('C')\n", (1273, 1278), True, 'import mosaic.immutable_model as M\n'), ((1282, 1296), 'mosaic.immutable_model.element', 'M.element', (['"""C"""'], {}), "('C')\n", (1291, 1296), True, 'import mosaic.immutable_model as M\n'), ((1469, 1478), 'mosaic.immutable_model.dummy', 'M.dummy', ([], {}), '()\n', (1476, 1478), True, 'import mosaic.immutable_model as M\n'), ((1519, 1530), 'mosaic.immutable_model.unknown', 'M.unknown', ([], {}), '()\n', (1528, 1530), True, 'import mosaic.immutable_model as M\n'), ((1566, 1580), 'mosaic.immutable_model.element', 'M.element', (['"""O"""'], {}), "('O')\n", (1575, 1580), True, 'import mosaic.immutable_model as M\n'), ((1623, 1642), 'mosaic.immutable_model.cgparticle', 'M.cgparticle', (['"""ala"""'], {}), "('ala')\n", (1635, 1642), True, 'import mosaic.immutable_model as M\n'), ((6343, 6354), 'mosaic.immutable_model.dummy', 'M.dummy', (['(42)'], {}), '(42)\n', (6350, 6354), True, 'import mosaic.immutable_model as M\n'), ((6402, 6415), 'mosaic.immutable_model.element', 'M.element', (['(42)'], {}), '(42)\n', (6411, 6415), True, 'import mosaic.immutable_model as M\n'), ((6463, 6477), 'mosaic.immutable_model.element', 'M.element', (['"""X"""'], {}), "('X')\n", (6472, 6477), True, 'import mosaic.immutable_model as M\n'), ((6590, 6604), 'mosaic.immutable_model.atom', 'M.atom', (['"""C"""', '(1)'], {}), "('C', 1)\n", (6596, 6604), True, 'import mosaic.immutable_model as M\n'), ((6652, 6669), 'mosaic.immutable_model.atom', 'M.atom', (['carbon', '(0)'], {}), '(carbon, 0)\n', (6658, 6669), True, 'import mosaic.immutable_model as M\n'), ((6848, 6891), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', 'None', "(('C', carbon),)", '()'], {}), "('m', None, (('C', carbon),), ())\n", (6858, 6891), True, 'import mosaic.immutable_model as M\n'), ((6964, 7009), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '[1, 2]', "(('C', carbon),)", '()'], {}), "('m', [1, 2], (('C', carbon),), ())\n", (6974, 7009), True, 'import mosaic.immutable_model as M\n'), ((7082, 7123), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', "(('C', carbon),)", '()', '()'], {}), "('m', (('C', carbon),), (), ())\n", (7092, 7123), True, 'import mosaic.immutable_model as M\n'), ((7220, 7249), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', 'None', '()'], {}), "('m', (), None, ())\n", (7230, 7249), True, 'import mosaic.immutable_model as M\n'), ((7322, 7353), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', '[1, 2]', '()'], {}), "('m', (), [1, 2], ())\n", (7332, 7353), True, 'import mosaic.immutable_model as M\n'), ((7426, 7460), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', '(carbon,)', '()'], {}), "('m', (), (carbon,), ())\n", (7436, 7460), True, 'import mosaic.immutable_model as M\n'), ((7534, 7589), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C', carbon), ('C', carbon))", '()'], {}), "('m', (), (('C', carbon), ('C', carbon)), ())\n", (7544, 7589), True, 'import mosaic.immutable_model as M\n'), ((7827, 7870), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C', carbon),)", 'None'], {}), "('m', (), (('C', carbon),), None)\n", (7837, 7870), True, 'import mosaic.immutable_model as M\n'), ((7943, 7991), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C', carbon),)", '[1, 2, 3]'], {}), "('m', (), (('C', carbon),), [1, 2, 3])\n", (7953, 7991), True, 'import mosaic.immutable_model as M\n'), ((8109, 8158), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C', carbon),)", "('X', 'X')"], {}), "('m', (), (('C', carbon),), ('X', 'X'))\n", (8119, 8158), True, 'import mosaic.immutable_model as M\n'), ((8278, 8337), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C', carbon),)", "['X', 'X', 'single']"], {}), "('m', (), (('C', carbon),), ['X', 'X', 'single'])\n", (8288, 8337), True, 'import mosaic.immutable_model as M\n'), ((8575, 8640), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C1', carbon), ('C2', carbon))", "(('C1',),)"], {}), "('m', (), (('C1', carbon), ('C2', carbon)), (('C1',),))\n", (8585, 8640), True, 'import mosaic.immutable_model as M\n'), ((8861, 8931), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C1', carbon), ('C2', carbon))", "(('C1', 'C2'),)"], {}), "('m', (), (('C1', carbon), ('C2', carbon)), (('C1', 'C2'),))\n", (8871, 8931), True, 'import mosaic.immutable_model as M\n'), ((9143, 9217), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C1', carbon), ('C2', carbon))", "(('C1', 'C1', ''),)"], {}), "('m', (), (('C1', carbon), ('C2', carbon)), (('C1', 'C1', ''),))\n", (9153, 9217), True, 'import mosaic.immutable_model as M\n'), ((9440, 9514), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', '()', "(('C1', carbon), ('C2', carbon))", "(('C1', 'C3', ''),)"], {}), "('m', (), (('C1', carbon), ('C2', carbon)), (('C1', 'C3', ''),))\n", (9450, 9514), True, 'import mosaic.immutable_model as M\n'), ((9801, 9873), 'mosaic.immutable_model.fragment', 'M.fragment', (['"""m"""', "(('x', f),)", "(('C3', carbon),)", "(('x.C1', 'x.C2', ''),)"], {}), "('m', (('x', f),), (('C3', carbon),), (('x.C1', 'x.C2', ''),))\n", (9811, 9873), True, 'import mosaic.immutable_model as M\n'), ((10361, 10396), 'mosaic.immutable_model.universe', 'M.universe', (['(0)', "[(mol, 'water', 10)]"], {}), "(0, [(mol, 'water', 10)])\n", (10371, 10396), True, 'import mosaic.immutable_model as M\n'), ((10470, 10513), 'mosaic.immutable_model.universe', 'M.universe', (['"""strange"""', "[(mol, 'water', 10)]"], {}), "('strange', [(mol, 'water', 10)])\n", (10480, 10513), True, 'import mosaic.immutable_model as M\n'), ((10587, 10621), 'mosaic.immutable_model.universe', 'M.universe', (['"""strange"""', '[(mol, 10)]'], {}), "('strange', [(mol, 10)])\n", (10597, 10621), True, 'import mosaic.immutable_model as M\n'), ((10694, 10721), 'mosaic.immutable_model.universe', 'M.universe', (['"""infinite"""', 'mol'], {}), "('infinite', mol)\n", (10704, 10721), True, 'import mosaic.immutable_model as M\n'), ((10795, 10834), 'mosaic.immutable_model.universe', 'M.universe', (['"""infinite"""', "[('water', 10)]"], {}), "('infinite', [('water', 10)])\n", (10805, 10834), True, 'import mosaic.immutable_model as M\n'), ((10907, 10936), 'mosaic.immutable_model.universe', 'M.universe', (['"""infinite"""', '[mol]'], {}), "('infinite', [mol])\n", (10917, 10936), True, 'import mosaic.immutable_model as M\n'), ((11010, 11045), 'mosaic.immutable_model.universe', 'M.universe', (['"""infinite"""', '[(10, mol)]'], {}), "('infinite', [(10, mol)])\n", (11020, 11045), True, 'import mosaic.immutable_model as M\n'), ((11541, 11566), 'mosaic.immutable_model.Configuration', 'M.Configuration', (['universe'], {}), '(universe)\n', (11556, 11566), True, 'import mosaic.immutable_model as M\n'), ((659, 673), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (668, 673), True, 'import mosaic.immutable_model as M\n'), ((722, 736), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (731, 736), True, 'import mosaic.immutable_model as M\n'), ((785, 799), 'mosaic.immutable_model.element', 'M.element', (['"""O"""'], {}), "('O')\n", (794, 799), True, 'import mosaic.immutable_model as M\n'), ((1390, 1405), 'mosaic.immutable_model.unknown', 'M.unknown', (['name'], {}), '(name)\n', (1399, 1405), True, 'import mosaic.immutable_model as M\n'), ((3397, 3406), 'mosaic.immutable_model.atom', 'M.atom', (['C'], {}), '(C)\n', (3403, 3406), True, 'import mosaic.immutable_model as M\n'), ((3452, 3461), 'mosaic.immutable_model.atom', 'M.atom', (['H'], {}), '(H)\n', (3458, 3461), True, 'import mosaic.immutable_model as M\n'), ((3506, 3515), 'mosaic.immutable_model.atom', 'M.atom', (['H'], {}), '(H)\n', (3512, 3515), True, 'import mosaic.immutable_model as M\n'), ((3560, 3569), 'mosaic.immutable_model.atom', 'M.atom', (['N'], {}), '(N)\n', (3566, 3569), True, 'import mosaic.immutable_model as M\n'), ((3614, 3623), 'mosaic.immutable_model.atom', 'M.atom', (['C'], {}), '(C)\n', (3620, 3623), True, 'import mosaic.immutable_model as M\n'), ((3668, 3677), 'mosaic.immutable_model.atom', 'M.atom', (['O'], {}), '(O)\n', (3674, 3677), True, 'import mosaic.immutable_model as M\n'), ((4110, 4119), 'mosaic.immutable_model.atom', 'M.atom', (['C'], {}), '(C)\n', (4116, 4119), True, 'import mosaic.immutable_model as M\n'), ((4166, 4175), 'mosaic.immutable_model.atom', 'M.atom', (['H'], {}), '(H)\n', (4172, 4175), True, 'import mosaic.immutable_model as M\n'), ((4222, 4231), 'mosaic.immutable_model.atom', 'M.atom', (['H'], {}), '(H)\n', (4228, 4231), True, 'import mosaic.immutable_model as M\n'), ((4278, 4287), 'mosaic.immutable_model.atom', 'M.atom', (['H'], {}), '(H)\n', (4284, 4287), True, 'import mosaic.immutable_model as M\n'), ((11758, 11786), 'immutable.np.zeros', 'IN.zeros', (['(30, 3)', 'N.float32'], {}), '((30, 3), N.float32)\n', (11766, 11786), True, 'import immutable.np as IN\n'), ((11996, 12024), 'immutable.np.zeros', 'IN.zeros', (['(30, 3)', 'N.float32'], {}), '((30, 3), N.float32)\n', (12004, 12024), True, 'import immutable.np as IN\n'), ((12076, 12091), 'numpy.float64', 'N.float64', (['(10.0)'], {}), '(10.0)\n', (12085, 12091), True, 'import numpy as N\n'), ((12422, 12437), 'numpy.float32', 'N.float32', (['(10.0)'], {}), '(10.0)\n', (12431, 12437), True, 'import numpy as N\n'), ((12622, 12650), 'immutable.np.zeros', 'IN.zeros', (['(25, 3)', 'N.float32'], {}), '((25, 3), N.float32)\n', (12630, 12650), True, 'import immutable.np as IN\n'), ((12702, 12717), 'numpy.float32', 'N.float32', (['(10.0)'], {}), '(10.0)\n', (12711, 12717), True, 'import numpy as N\n'), ((12908, 12936), 'immutable.np.zeros', 'IN.zeros', (['(30, 3)', 'N.float32'], {}), '((30, 3), N.float32)\n', (12916, 12936), True, 'import immutable.np as IN\n'), ((12988, 13013), 'immutable.np.zeros', 'IN.zeros', (['(3,)', 'N.float32'], {}), '((3,), N.float32)\n', (12996, 13013), True, 'import immutable.np as IN\n'), ((13335, 13359), 'immutable.np.zeros', 'IN.zeros', (['(2,)', 'N.uint16'], {}), '((2,), N.uint16)\n', (13343, 13359), True, 'import immutable.np as IN\n'), ((13541, 13565), 'immutable.np.array', 'IN.array', (['[20]', 'N.uint16'], {}), '([20], N.uint16)\n', (13549, 13565), True, 'import immutable.np as IN\n'), ((13772, 13794), 'immutable.np.array', 'IN.array', (['[3]', 'N.uint8'], {}), '([3], N.uint8)\n', (13780, 13794), True, 'import immutable.np as IN\n'), ((13976, 14000), 'immutable.np.array', 'IN.array', (['[40]', 'N.uint16'], {}), '([40], N.uint16)\n', (13984, 14000), True, 'import immutable.np as IN\n'), ((14207, 14229), 'immutable.np.array', 'IN.array', (['[8]', 'N.uint8'], {}), '([8], N.uint8)\n', (14215, 14229), True, 'import immutable.np as IN\n'), ((2184, 2198), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (2193, 2198), True, 'import mosaic.immutable_model as M\n'), ((2257, 2271), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (2266, 2271), True, 'import mosaic.immutable_model as M\n'), ((2330, 2344), 'mosaic.immutable_model.element', 'M.element', (['"""O"""'], {}), "('O')\n", (2339, 2344), True, 'import mosaic.immutable_model as M\n'), ((2586, 2600), 'mosaic.immutable_model.element', 'M.element', (['"""O"""'], {}), "('O')\n", (2595, 2600), True, 'import mosaic.immutable_model as M\n'), ((2659, 2673), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (2668, 2673), True, 'import mosaic.immutable_model as M\n'), ((2732, 2746), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (2741, 2746), True, 'import mosaic.immutable_model as M\n'), ((10073, 10087), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (10082, 10087), True, 'import mosaic.immutable_model as M\n'), ((10134, 10148), 'mosaic.immutable_model.element', 'M.element', (['"""H"""'], {}), "('H')\n", (10143, 10148), True, 'import mosaic.immutable_model as M\n'), ((10195, 10209), 'mosaic.immutable_model.element', 'M.element', (['"""O"""'], {}), "('O')\n", (10204, 10209), True, 'import mosaic.immutable_model as M\n'), ((12278, 12306), 'immutable.np.zeros', 'IN.zeros', (['(30, 3)', 'N.float32'], {}), '((30, 3), N.float32)\n', (12286, 12306), True, 'import immutable.np as IN\n'), ((19852, 19879), 'numpy.array', 'N.array', (['[0, 0, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 2, 2])\n', (19859, 19879), True, 'import numpy as N\n'), ((22179, 22214), 'numpy.array', 'N.array', (['[1.0, 0.0, 0.0]', 'N.float32'], {}), '([1.0, 0.0, 0.0], N.float32)\n', (22186, 22214), True, 'import numpy as N\n'), ((22254, 22289), 'numpy.array', 'N.array', (['[0.0, 1.0, 0.0]', 'N.float32'], {}), '([0.0, 1.0, 0.0], N.float32)\n', (22261, 22289), True, 'import numpy as N\n'), ((22329, 22364), 'numpy.array', 'N.array', (['[0.0, 0.0, 1.0]', 'N.float32'], {}), '([0.0, 0.0, 1.0], N.float32)\n', (22336, 22364), True, 'import numpy as N\n'), ((22659, 22694), 'numpy.array', 'N.array', (['[1.0, 0.0, 0.0]', 'N.float32'], {}), '([1.0, 0.0, 0.0], N.float32)\n', (22666, 22694), True, 'import numpy as N\n'), ((22734, 22769), 'numpy.array', 'N.array', (['[0.0, 2.0, 0.0]', 'N.float32'], {}), '([0.0, 2.0, 0.0], N.float32)\n', (22741, 22769), True, 'import numpy as N\n'), ((22809, 22844), 'numpy.array', 'N.array', (['[0.0, 0.0, 4.0]', 'N.float32'], {}), '([0.0, 0.0, 4.0], N.float32)\n', (22816, 22844), True, 'import numpy as N\n'), ((23260, 23295), 'numpy.array', 'N.array', (['[1.0, 2.0, 4.0]', 'N.float32'], {}), '([1.0, 2.0, 4.0], N.float32)\n', (23267, 23295), True, 'import numpy as N\n'), ((23335, 23370), 'numpy.array', 'N.array', (['[8.0, 4.0, 2.0]', 'N.float32'], {}), '([8.0, 4.0, 2.0], N.float32)\n', (23342, 23370), True, 'import numpy as N\n'), ((23410, 23446), 'numpy.array', 'N.array', (['[16.0, 4.0, 8.0]', 'N.float32'], {}), '([16.0, 4.0, 8.0], N.float32)\n', (23417, 23446), True, 'import numpy as N\n'), ((11211, 11238), 'immutable.np.zeros', 'IN.zeros', (['(3, 3)', 'N.float64'], {}), '((3, 3), N.float64)\n', (11219, 11238), True, 'import immutable.np as IN\n'), ((11286, 11311), 'immutable.np.zeros', 'IN.zeros', (['(3,)', 'N.float64'], {}), '((3,), N.float64)\n', (11294, 11311), True, 'import immutable.np as IN\n')]
|
# Code from Chapter 3 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008, 2014
import pylab as pl
import numpy as np
import pcn
import cPickle, gzip
# Read the dataset in (code from sheet)
f = gzip.open('mnist.pkl.gz','rb')
tset, vset, teset = cPickle.load(f)
f.close()
nread = 200
# Just use the first few images
train_in = tset[0][:nread,:]
# This is a little bit of work -- 1 of N encoding
# Make sure you understand how it does it
train_tgt = np.zeros((nread,10))
for i in range(nread):
train_tgt[i,tset[1][i]] = 1
test_in = teset[0][:nread,:]
test_tgt = np.zeros((nread,10))
for i in range(nread):
test_tgt[i,teset[1][i]] = 1
# Train a Perceptron on training set
p = pcn.pcn(train_in, train_tgt)
p.pcntrain(train_in, train_tgt,0.25,100)
# This isn't really good practice since it's on the training data,
# but it does show that it is learning.
p.confmat(train_in,train_tgt)
# Now test it
p.confmat(test_in,test_tgt)
|
[
"numpy.zeros",
"pcn.pcn",
"cPickle.load",
"gzip.open"
] |
[((470, 501), 'gzip.open', 'gzip.open', (['"""mnist.pkl.gz"""', '"""rb"""'], {}), "('mnist.pkl.gz', 'rb')\n", (479, 501), False, 'import cPickle, gzip\n'), ((521, 536), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (533, 536), False, 'import cPickle, gzip\n'), ((726, 747), 'numpy.zeros', 'np.zeros', (['(nread, 10)'], {}), '((nread, 10))\n', (734, 747), True, 'import numpy as np\n'), ((843, 864), 'numpy.zeros', 'np.zeros', (['(nread, 10)'], {}), '((nread, 10))\n', (851, 864), True, 'import numpy as np\n'), ((961, 989), 'pcn.pcn', 'pcn.pcn', (['train_in', 'train_tgt'], {}), '(train_in, train_tgt)\n', (968, 989), False, 'import pcn\n')]
|
import sys
import numpy as np
from cv2 import BRISK_create
from cv2.xfeatures2d import FREAK_create
from numpy import histogramdd
from skimage.color import rgb2lab, rgb2hsv
from skimage.feature import local_binary_pattern, greycoprops, greycomatrix
from sklearn.base import TransformerMixin
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.preprocessing import MinMaxScaler
sys.path.append("../")
from helpers.img_utils import tif_to_grayscale, tif_to_rgb
class BaseFeatureExtractor(TransformerMixin):
def __init__(self):
pass
def fit(self, imgs, y=None):
raise NotImplementedError
def transform(self, imgs, y=None):
raise NotImplementedError
class ChannelsFeatureExtractor(BaseFeatureExtractor):
"""
extracts mean and standard deviation of every channel in the image (B, G, R, NIR)
and the brightness, where brightness is defined as the mean of channels
Parameters
----------
imgs : numpy.ndarray (np.int | np.float)
set of images, each with 4 channels (B, G, R, NIR)
rgb : if True only the first three colour channels are used
"""
def __init__(self, bgr=False):
self.pixels_axis = (1, 2)
self.bgr = bgr
super().__init__()
def fit(self, imgs, y=None):
return self
def transform(self, imgs, y=None):
if self.bgr:
imgs = imgs[:, :, :, :3] # extract color channels
means = np.mean(imgs, axis=self.pixels_axis)
sds = np.std(imgs, axis=self.pixels_axis)
brightness = np.mean(means, axis=1)
brightness = np.reshape(brightness, (-1, 1))
return np.concatenate((means, sds, brightness), axis=1)
class NDVIFeatureExtractor(BaseFeatureExtractor):
"""
extracts normalized difference vegatation index from multispectral image
Parameters
----------
imgs : numpy.ndarray (np.int | np.float)
set of images, each with 4 channels (B, G, R, NIR)
"""
def __init__(self):
self.pixels_axis = (1, 2)
super().__init__()
def fit(self, imgs, y=None):
return self
def transform(self, imgs, y=None):
# casting is required to obtain negative NDVI values. Else the computations produce errors for uint
imgs = imgs.astype('float64', casting='safe')
red = imgs[:, :, :, 2]
nir = imgs[:, :, :, 3]
ndvi = np.divide(nir - red, nir + red)
ndvi_means = np.mean(ndvi, axis=self.pixels_axis)
ndvi_sds = np.std(ndvi, axis=self.pixels_axis)
ndvi_means = np.reshape(ndvi_means, (-1, 1))
ndvi_sds = np.reshape(ndvi_sds, (-1, 1))
return np.concatenate((ndvi_means, ndvi_sds), axis=1)
class LBPFeatureExtractor(BaseFeatureExtractor):
"""extracts LBP features of a set of images using the 'uniform' method"""
def __init__(self, r):
"""
Parameters
----------
r: int
radius of LBP
"""
self.r = r
self.p = 8 * r
self.n_bins = self.p + 2
super().__init__()
def extract_feature(self, img):
"""Obtain lbp feature from tiff image"""
img_grayscale = tif_to_grayscale(img)
lbp = local_binary_pattern(img_grayscale, self.p, self.r, method='uniform')
lbp_feature = np.histogram(lbp, bins=self.n_bins, range=(0, self.n_bins))[0]
return lbp_feature
def fit(self, imgs, y=None):
return self
def transform(self, imgs, y=None):
self.n_images = np.shape(imgs)[0]
features = np.zeros([self.n_images, self.n_bins])
for i in range(self.n_images):
features[i, :] = self.extract_feature(imgs[i, :, :, :])
return features
class GLCMFeatureExtractor(BaseFeatureExtractor):
"""extracts set of GLCM features from a set of images
Parameters
----------
nir: if False it will extract the GLCM from the grayscale image
if True it will extract the GLCM from the NIR channel
"""
def __init__(self, nir=False):
self.nir = nir
self.scaler = MinMaxScaler(feature_range=(0, 255))
self.distances = [1]
self.directions = [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4] # omnidirectional
self.glcm_features = ['contrast', 'ASM', 'correlation']
self.n_features = len(self.glcm_features)
super().__init__()
def obtain_property(self, glcm, feature):
return np.mean(greycoprops(glcm, feature))
def extract_feature(self, img):
"""Obtain glcm feature from tiff image"""
if self.nir:
img_2d = img[:, :, 3]
img_2d = self.scaler.fit_transform(img_2d.astype('float64'))
img_2d = img_2d.astype('uint8')
else:
img_2d = tif_to_grayscale(img, as_int=True)
glcm = greycomatrix(img_2d, self.distances, self.directions,
symmetric=True, normed=True)
im_features = np.zeros(self.n_features)
for i, feature in enumerate(self.glcm_features):
im_features[i] = self.obtain_property(glcm, feature)
return im_features
def fit(self, imgs, y=None):
return self
def transform(self, imgs, y=None):
self.n_images = np.shape(imgs)[0]
features = np.zeros([self.n_images, self.n_features])
for i in range(self.n_images):
features[i, :] = self.extract_feature(imgs[i, :, :, :])
return features
class GCHFeatureExtractor(BaseFeatureExtractor):
"""extracts global color histogram (GCH) from a set of images"""
def __init__(self, color_space='rgb'):
self.n_bins = 8 # number of bins per channel histogram
self.n_channels = 3
self.n_features = self.n_bins ** self.n_channels
self.color_space = color_space
self.ranges = {'rgb': ((0, 255), (0, 255), (0, 255)),
'hsv': ((0, 1), (0, 1), (0, 1)),
'lab': ((0, 100), (-128, 127), (-128, 127))}
self.range = self.ranges[self.color_space]
super().__init__()
def preprocess_image(self, img):
img = tif_to_rgb(img, as_int=True)
if self.color_space == 'hsv':
return rgb2hsv(img)
elif self.color_space == 'lab':
return rgb2lab(img)
elif self.color_space == 'rgb':
return img
def extract_feature(self, img):
"""Obtain GCH feature from tiff image"""
img = self.preprocess_image(img)
GCH, _ = histogramdd(img.reshape(-1, img.shape[-1]),
bins=(self.n_bins, self.n_bins, self.n_bins),
range=self.range)
GCH = GCH.flatten() / np.sum(GCH) # normalize to have L1 norm of 1
return GCH
def fit(self, imgs, y=None):
return self
def transform(self, imgs, y=None):
self.n_images = np.shape(imgs)[0]
features = np.zeros([self.n_images, self.n_features])
for i in range(self.n_images):
features[i, :] = self.extract_feature(imgs[i, :, :, :])
return features
class LocalFeatureExtractor(BaseFeatureExtractor):
def __init__(self, descriptor='brisk', n_octaves=4, threshold=25, pattern_scale=1.0):
self.detector = BRISK_create(thresh=threshold,
octaves=n_octaves,
patternScale=pattern_scale)
if descriptor == 'brisk':
self.extractor = self.detector
elif descriptor == 'freak':
self.extractor = FREAK_create(patternScale=pattern_scale, nOctaves=n_octaves)
self.feature_dimension = 64
super().__init__()
def extract_feature(self, img):
img_grayscale = tif_to_grayscale(img, as_int=True)
keypoints = self.detector.detect(img_grayscale)
_, descriptors = self.extractor.compute(img_grayscale, keypoints)
return descriptors
def fit(self, imgs, y=None):
return self
def transform(self, imgs, y=None):
self.n_images = np.shape(imgs)[0]
features = []
for i in range(self.n_images):
# append to list rather than numpy.ndarray because number of feature is unknown
features.append(self.extract_feature(imgs[i, :, :, :]))
return features
class BoVW(TransformerMixin):
"""Bag of visual words"""
def __init__(self, n_clusters=25, batch_size=500):
self.n_clusters = n_clusters # number of words in the visual bag of words
self.kmeans = MiniBatchKMeans(n_clusters=self.n_clusters,
batch_size=batch_size,
random_state=0)
def create_histogram(self, img_descriptors):
if img_descriptors is None:
# return empty histogram if no keypoints are detected
return np.zeros(self.n_clusters)
try:
clusters = self.kmeans.predict(img_descriptors)
except ValueError:
# if there is only one descriptor we need to make it have one row
img_descriptors = img_descriptors.reshape(1, -1)
clusters = self.kmeans.predict(img_descriptors)
histogram = np.zeros(self.n_clusters)
counts = np.unique(clusters, return_counts=True)
for i, count in zip(counts[0], counts[1]):
histogram[i] = count
histogram /= np.sum(histogram) # normalize histogram
return histogram
def fit(self, descriptors_list, y=None):
"""creates dictionary for the bag of visual words"""
descriptors_list = [d for d in descriptors_list if d is not None]
self.kmeans.fit(np.concatenate(descriptors_list))
return self
def transform(self, descriptors_list, y=None):
"""create feature histograms for all descriptors of all images"""
self.n_images = len(descriptors_list)
histograms = np.zeros([self.n_images, self.n_clusters])
for i, descriptors in enumerate(descriptors_list):
histograms[i, :] = self.create_histogram(descriptors)
return histograms
|
[
"skimage.feature.local_binary_pattern",
"sys.path.append",
"numpy.divide",
"numpy.mean",
"numpy.histogram",
"numpy.reshape",
"helpers.img_utils.tif_to_grayscale",
"skimage.color.rgb2lab",
"numpy.concatenate",
"sklearn.preprocessing.MinMaxScaler",
"cv2.xfeatures2d.FREAK_create",
"helpers.img_utils.tif_to_rgb",
"sklearn.cluster.MiniBatchKMeans",
"skimage.color.rgb2hsv",
"numpy.std",
"numpy.shape",
"numpy.unique",
"skimage.feature.greycomatrix",
"numpy.sum",
"numpy.zeros",
"cv2.BRISK_create",
"skimage.feature.greycoprops"
] |
[((392, 414), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (407, 414), False, 'import sys\n'), ((1451, 1487), 'numpy.mean', 'np.mean', (['imgs'], {'axis': 'self.pixels_axis'}), '(imgs, axis=self.pixels_axis)\n', (1458, 1487), True, 'import numpy as np\n'), ((1502, 1537), 'numpy.std', 'np.std', (['imgs'], {'axis': 'self.pixels_axis'}), '(imgs, axis=self.pixels_axis)\n', (1508, 1537), True, 'import numpy as np\n'), ((1559, 1581), 'numpy.mean', 'np.mean', (['means'], {'axis': '(1)'}), '(means, axis=1)\n', (1566, 1581), True, 'import numpy as np\n'), ((1603, 1634), 'numpy.reshape', 'np.reshape', (['brightness', '(-1, 1)'], {}), '(brightness, (-1, 1))\n', (1613, 1634), True, 'import numpy as np\n'), ((1651, 1699), 'numpy.concatenate', 'np.concatenate', (['(means, sds, brightness)'], {'axis': '(1)'}), '((means, sds, brightness), axis=1)\n', (1665, 1699), True, 'import numpy as np\n'), ((2404, 2435), 'numpy.divide', 'np.divide', (['(nir - red)', '(nir + red)'], {}), '(nir - red, nir + red)\n', (2413, 2435), True, 'import numpy as np\n'), ((2458, 2494), 'numpy.mean', 'np.mean', (['ndvi'], {'axis': 'self.pixels_axis'}), '(ndvi, axis=self.pixels_axis)\n', (2465, 2494), True, 'import numpy as np\n'), ((2514, 2549), 'numpy.std', 'np.std', (['ndvi'], {'axis': 'self.pixels_axis'}), '(ndvi, axis=self.pixels_axis)\n', (2520, 2549), True, 'import numpy as np\n'), ((2571, 2602), 'numpy.reshape', 'np.reshape', (['ndvi_means', '(-1, 1)'], {}), '(ndvi_means, (-1, 1))\n', (2581, 2602), True, 'import numpy as np\n'), ((2622, 2651), 'numpy.reshape', 'np.reshape', (['ndvi_sds', '(-1, 1)'], {}), '(ndvi_sds, (-1, 1))\n', (2632, 2651), True, 'import numpy as np\n'), ((2667, 2713), 'numpy.concatenate', 'np.concatenate', (['(ndvi_means, ndvi_sds)'], {'axis': '(1)'}), '((ndvi_means, ndvi_sds), axis=1)\n', (2681, 2713), True, 'import numpy as np\n'), ((3185, 3206), 'helpers.img_utils.tif_to_grayscale', 'tif_to_grayscale', (['img'], {}), '(img)\n', (3201, 3206), False, 'from helpers.img_utils import tif_to_grayscale, tif_to_rgb\n'), ((3221, 3290), 'skimage.feature.local_binary_pattern', 'local_binary_pattern', (['img_grayscale', 'self.p', 'self.r'], {'method': '"""uniform"""'}), "(img_grayscale, self.p, self.r, method='uniform')\n", (3241, 3290), False, 'from skimage.feature import local_binary_pattern, greycoprops, greycomatrix\n'), ((3558, 3596), 'numpy.zeros', 'np.zeros', (['[self.n_images, self.n_bins]'], {}), '([self.n_images, self.n_bins])\n', (3566, 3596), True, 'import numpy as np\n'), ((4090, 4126), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 255)'}), '(feature_range=(0, 255))\n', (4102, 4126), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4825, 4911), 'skimage.feature.greycomatrix', 'greycomatrix', (['img_2d', 'self.distances', 'self.directions'], {'symmetric': '(True)', 'normed': '(True)'}), '(img_2d, self.distances, self.directions, symmetric=True,\n normed=True)\n', (4837, 4911), False, 'from skimage.feature import local_binary_pattern, greycoprops, greycomatrix\n'), ((4958, 4983), 'numpy.zeros', 'np.zeros', (['self.n_features'], {}), '(self.n_features)\n', (4966, 4983), True, 'import numpy as np\n'), ((5288, 5330), 'numpy.zeros', 'np.zeros', (['[self.n_images, self.n_features]'], {}), '([self.n_images, self.n_features])\n', (5296, 5330), True, 'import numpy as np\n'), ((6133, 6161), 'helpers.img_utils.tif_to_rgb', 'tif_to_rgb', (['img'], {'as_int': '(True)'}), '(img, as_int=True)\n', (6143, 6161), False, 'from helpers.img_utils import tif_to_grayscale, tif_to_rgb\n'), ((6927, 6969), 'numpy.zeros', 'np.zeros', (['[self.n_images, self.n_features]'], {}), '([self.n_images, self.n_features])\n', (6935, 6969), True, 'import numpy as np\n'), ((7270, 7347), 'cv2.BRISK_create', 'BRISK_create', ([], {'thresh': 'threshold', 'octaves': 'n_octaves', 'patternScale': 'pattern_scale'}), '(thresh=threshold, octaves=n_octaves, patternScale=pattern_scale)\n', (7282, 7347), False, 'from cv2 import BRISK_create\n'), ((7751, 7785), 'helpers.img_utils.tif_to_grayscale', 'tif_to_grayscale', (['img'], {'as_int': '(True)'}), '(img, as_int=True)\n', (7767, 7785), False, 'from helpers.img_utils import tif_to_grayscale, tif_to_rgb\n'), ((8549, 8635), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'self.n_clusters', 'batch_size': 'batch_size', 'random_state': '(0)'}), '(n_clusters=self.n_clusters, batch_size=batch_size,\n random_state=0)\n', (8564, 8635), False, 'from sklearn.cluster import MiniBatchKMeans, KMeans\n'), ((9225, 9250), 'numpy.zeros', 'np.zeros', (['self.n_clusters'], {}), '(self.n_clusters)\n', (9233, 9250), True, 'import numpy as np\n'), ((9268, 9307), 'numpy.unique', 'np.unique', (['clusters'], {'return_counts': '(True)'}), '(clusters, return_counts=True)\n', (9277, 9307), True, 'import numpy as np\n'), ((9414, 9431), 'numpy.sum', 'np.sum', (['histogram'], {}), '(histogram)\n', (9420, 9431), True, 'import numpy as np\n'), ((9932, 9974), 'numpy.zeros', 'np.zeros', (['[self.n_images, self.n_clusters]'], {}), '([self.n_images, self.n_clusters])\n', (9940, 9974), True, 'import numpy as np\n'), ((3313, 3372), 'numpy.histogram', 'np.histogram', (['lbp'], {'bins': 'self.n_bins', 'range': '(0, self.n_bins)'}), '(lbp, bins=self.n_bins, range=(0, self.n_bins))\n', (3325, 3372), True, 'import numpy as np\n'), ((3521, 3535), 'numpy.shape', 'np.shape', (['imgs'], {}), '(imgs)\n', (3529, 3535), True, 'import numpy as np\n'), ((4453, 4479), 'skimage.feature.greycoprops', 'greycoprops', (['glcm', 'feature'], {}), '(glcm, feature)\n', (4464, 4479), False, 'from skimage.feature import local_binary_pattern, greycoprops, greycomatrix\n'), ((4775, 4809), 'helpers.img_utils.tif_to_grayscale', 'tif_to_grayscale', (['img'], {'as_int': '(True)'}), '(img, as_int=True)\n', (4791, 4809), False, 'from helpers.img_utils import tif_to_grayscale, tif_to_rgb\n'), ((5251, 5265), 'numpy.shape', 'np.shape', (['imgs'], {}), '(imgs)\n', (5259, 5265), True, 'import numpy as np\n'), ((6219, 6231), 'skimage.color.rgb2hsv', 'rgb2hsv', (['img'], {}), '(img)\n', (6226, 6231), False, 'from skimage.color import rgb2lab, rgb2hsv\n'), ((6707, 6718), 'numpy.sum', 'np.sum', (['GCH'], {}), '(GCH)\n', (6713, 6718), True, 'import numpy as np\n'), ((6890, 6904), 'numpy.shape', 'np.shape', (['imgs'], {}), '(imgs)\n', (6898, 6904), True, 'import numpy as np\n'), ((8061, 8075), 'numpy.shape', 'np.shape', (['imgs'], {}), '(imgs)\n', (8069, 8075), True, 'import numpy as np\n'), ((8879, 8904), 'numpy.zeros', 'np.zeros', (['self.n_clusters'], {}), '(self.n_clusters)\n', (8887, 8904), True, 'import numpy as np\n'), ((9685, 9717), 'numpy.concatenate', 'np.concatenate', (['descriptors_list'], {}), '(descriptors_list)\n', (9699, 9717), True, 'import numpy as np\n'), ((6291, 6303), 'skimage.color.rgb2lab', 'rgb2lab', (['img'], {}), '(img)\n', (6298, 6303), False, 'from skimage.color import rgb2lab, rgb2hsv\n'), ((7565, 7625), 'cv2.xfeatures2d.FREAK_create', 'FREAK_create', ([], {'patternScale': 'pattern_scale', 'nOctaves': 'n_octaves'}), '(patternScale=pattern_scale, nOctaves=n_octaves)\n', (7577, 7625), False, 'from cv2.xfeatures2d import FREAK_create\n')]
|
# -*- coding: utf-8 -*-
#from __future__ import print_function
#import pixy
#from ctypes import *
#from pixy import *
import math as ma
import numpy as np
test_data = 120,100 #test input
#hight = 495-114 #mm
#ball parameter
r_ball = 114.8 #mm
'PIXY Parameter'
#pixy-cam image size in pixy-coordination
delta_X_pixy = 207
delta_Y_pixy = 315
#angles
ang_cam_tilt = ma.radians(33)
ang_rev_cam_tilt = ma.radians(90)-ang_cam_tilt
ang_cam_flare_X = ma.radians(47) #Öffnungswinkel pixy cam
ang_cam_flare_Y = ma.radians(75)
delta_ang_X = ma.radians(47) #deg
ang_ball_ratio = delta_X_pixy / delta_ang_X
'''PIXY Parameter'''
#pixy-cam position parameter
'vorläufige Parameter!!!'
ang_offset = ma.radians(30) #1.0472 # 60 degree
h_cam_offset = 463 #floor to camera-mountings rotation axle
s_cam_offset = 77 #midpoint robot to camera mounting on x axle
r_cam_rot = 55 #radius of camera rotation circle on xy-level
#absolute position of camera on robot
X_cam_pos = 0
Z_cam_pos = 0
#pixy-cam image parameter
Xmin_image_cam = 0
Xmax_image_cam = 0
X_offset_image_cam = 0
Ymin_image_cam = 0
Ymax_image_cam = 0
delta_X_image_cam = 0
delta_Y_image_cam = 0
X_factor_cam = 1
Y_factor_cam = 1
X_ball_ego = 0 #mm
Y_ball_ego = 0 #mm
X_ball_filtered = 0
Y_ball_filtered = 0
'Kalman Parameter'
dt = 1.0/60.0 #pixys freq = 60 Hz
F = np.array([[1, dt, 0], [0, 1, dt],[0, 0, 1]]) #state transition model, A
H = np.array([1, 0, 0]).reshape(1, 3) #transponieren #observation model C
"""
das ist meine C matrix für den Ausgang, also müsste das mittlere die geschwindigkeit sein
"""
q = 0.05
Q = np.array([[q, q, 0], [q, q, 0], [0, 0, 0]])
R = np.array([0.05]).reshape(1, 1) #observation noise
# Pixy2 Python SWIG get blocks example
'''
blocks = BlockArray(100)
frame = 0
class Blocks (Structure):
_fields_ = [ ("m_signature", c_uint),
("m_x", c_uint),
("m_y", c_uint),
("m_width", c_uint),
("m_height", c_uint),
("m_angle", c_uint),
("m_index", c_uint),
("m_age", c_uint) ]
'''
class KalmanFilter(object):
def __init__(self, F = None, B = None, H = None, Q = None, R = None, P = None, x0 = None):
if(F is None or H is None):
raise ValueError("Set proper system dynamics.")
self.n = F.shape[1]
self.m = H.shape[1]
self.F = F
self.H = H
self.B = 0 if B is None else B
self.Q = np.eye(self.n) if Q is None else Q
self.R = np.eye(self.n) if R is None else R
self.P = np.eye(self.n) if P is None else P
self.x = np.zeros((self.n, 1)) if x0 is None else x0
def predict(self, u = 0):
self.x = np.dot(self.F, self.x) + np.dot(self.B, u)
self.P = np.dot(np.dot(self.F, self.P), self.F.T) + self.Q
return self.x
def update(self, z):
y = z - np.dot(self.H, self.x) #das gemessne x aus der Matrix
S = self.R + np.dot(self.H, np.dot(self.P, self.H.T))
K = np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S))
self.x = self.x + np.dot(K, y)
I = np.eye(self.n)
self.P = np.dot(np.dot(I - np.dot(K, self.H), self.P),
(I - np.dot(K, self.H)).T) + np.dot(np.dot(K, self.R), K.T)
def kalman_pixy(kf,measure):
kf.predict()
kf.update(measure)
P = kf.x[0] #Position
V = kf.x[1] #Velocity
a = kf.x[2] #acceleration
return P, V
'''
def pixy_start():
print("Pixy2 Python SWIG Example -- Get Blocks")
pixy.init ()
pixy.change_prog ("color_connected_components");
def get_pixy():
count = pixy.ccc_get_blocks (100, blocks)
if count > 0:
# print('frame %3d:' % (frame))
# frame = frame + 1
for index in range (0, count):
#Anpassung des Pixy-Koordinatensystems an das Roboterkoordinatensystem
print("X_raw|Y_raw) ",blocks[index].m_y,blocks[index].m_x)
X_ball = 103.5 - (blocks[index].m_y) # - 103.5) #*(-1) #Y_pixy_max / 2
Y_ball = (blocks[index].m_x - 157.5) #*(-1) #X_pixy_max / 2
print('(X|Y)= ',X_ball,'|',Y_ball)
return X_ball,Y_ball
'''
# pixy-cam position from midpoint robot in relation to tilt angle alpha
def cam_position(alpha, h, s, r):
return (ma.sin(alpha)*r) + h, (ma.cos(alpha)*r) + s
def image_position(Z_cam, X_cam, gamma, delta_X, delta_Y):
global Xmin_image_cam #X_offset_image_cam
global X_offset_image_cam
Xmax_image_cam = Z_cam / ma.tan(gamma - (delta_X / 2))
Xmin_image_cam = Z_cam / ma.tan(gamma + (delta_X / 2))
#cam_offset in image on X axle:
X_offset_image_cam = (Xmax_image_cam + Xmin_image_cam)/2
print("Xmax, Xmin: ", Xmax_image_cam, Xmin_image_cam)
Ymax_image_cam = Xmax_image_cam / ma.tan(delta_Y / 2)
Ymin_image_cam = Xmin_image_cam / ma.tan(delta_Y / 2)
return Xmax_image_cam - Xmin_image_cam, Ymax_image_cam - Ymin_image_cam
#configuration of pixy-cam results before working
def cam_config():
Z_cam_pos, X_cam_pos = cam_position(ang_cam_tilt, h_cam_offset, s_cam_offset, r_cam_rot)
x, y = image_position(Z_cam_pos, X_cam_pos, ang_rev_cam_tilt, ang_cam_flare_X, ang_cam_flare_Y)
print("Cam Pos ", Z_cam_pos, X_cam_pos)
print("Img Pos ", x, y)
return (x / delta_X_pixy), (y / delta_Y_pixy)
#process new data from pixy-cam
def cam_process(raw_data):
x,y = raw_data
#print("Xmin: ", Xmin_image_cam)
ang_ball = ang_ball_ratio * x
#X_value = X gemessen + kleinstmöglicher X gemessen Wert + cam vor midpoint robot + bild des balls offset zum midpoint ball
return (x * X_factor_cam) + X_offset_image_cam + s_cam_offset + (r_ball * ma.cos(ang_ball)), (y * Y_factor_cam)
def offset_start(init_data):
init_data = []
print("offset calc start...")
for count in range (0,200):
init_data.append(cam_process(test_data))
offset = np.array(init_data)
return np.median(offset, axis=0)
'MAIN'
#Config Parameter
#pixy_start()
X_factor_cam, Y_factor_cam = cam_config()
offset_pixy = offset_start(test_data)
print("offset_pixy: ", offset_pixy)
kf_pixy = KalmanFilter(F = F, H = H, Q = Q, R = R)
#kf_pixy = KalmanFilter(F = F, B =offset_pixy, H = H, Q = Q, R = R)
#Execute:
'DEBUG OUTPUT'
print("Factors: ",X_factor_cam, Y_factor_cam)
#print("Filtered Data: ", X_ball_filtered, Y_ball_filtered)
#print(math.tan(math.radians(45)))
Position_Error_X = []
Position_Error_Y = []
for i in range (0,6): #range(start,end,step)
#X_ball_ego, Y_ball_ego = cam_process(test_data) #get_pixy())
P_ball_ego = cam_process(test_data) # get_pixy())
print("Länge in mm:", X_ball_ego, Y_ball_ego)
P_ball_filtered, V_ball_filtered = kalman_pixy(kf_pixy, cam_process(test_data))
print("Filtered: ", P_ball_filtered, V_ball_filtered)
'''
P_X, P_Y = P_ball_filtered
X_error = P_X - X_ball_ego
Y_error = P_Y - Y_ball_ego
print("Error: X: ", X_error,"Y: ", Y_error)
Filter_Error_X.append(X_error)
Filter_Error_Y.append(Y_error)
'''
#alternative
#absolute
#Filter_Error_Y.append(P_ball_filtered[1] - P_ball_ego[1])
#Filter_Error_X.append(P_ball_filtered[0] - P_ball_ego[0])
#relative in percent
Position_Error_X.append(100 -(100 / P_ball_filtered[0] * P_ball_ego[0]))
Position_Error_Y.append(100- (100 / P_ball_filtered[1] * P_ball_ego[1]))
print("Error: X: ", Position_Error_X[-1], "Y: ",Position_Error_Y[-1] )
import matplotlib.pyplot as plt
plt.plot(range(len(Position_Error_X)), np.array(Position_Error_X), label = 'Position Error X')
plt.plot(range(len(Position_Error_Y)), np.array(Position_Error_Y), label = 'Position Error Y')
plt.legend()
plt.show()
|
[
"numpy.eye",
"numpy.median",
"math.tan",
"math.radians",
"math.cos",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.inv",
"math.sin",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((368, 382), 'math.radians', 'ma.radians', (['(33)'], {}), '(33)\n', (378, 382), True, 'import math as ma\n'), ((449, 463), 'math.radians', 'ma.radians', (['(47)'], {}), '(47)\n', (459, 463), True, 'import math as ma\n'), ((507, 521), 'math.radians', 'ma.radians', (['(75)'], {}), '(75)\n', (517, 521), True, 'import math as ma\n'), ((536, 550), 'math.radians', 'ma.radians', (['(47)'], {}), '(47)\n', (546, 550), True, 'import math as ma\n'), ((690, 704), 'math.radians', 'ma.radians', (['(30)'], {}), '(30)\n', (700, 704), True, 'import math as ma\n'), ((1316, 1361), 'numpy.array', 'np.array', (['[[1, dt, 0], [0, 1, dt], [0, 0, 1]]'], {}), '([[1, dt, 0], [0, 1, dt], [0, 0, 1]])\n', (1324, 1361), True, 'import numpy as np\n'), ((1574, 1617), 'numpy.array', 'np.array', (['[[q, q, 0], [q, q, 0], [0, 0, 0]]'], {}), '([[q, q, 0], [q, q, 0], [0, 0, 0]])\n', (1582, 1617), True, 'import numpy as np\n'), ((7493, 7505), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7503, 7505), True, 'import matplotlib.pyplot as plt\n'), ((7506, 7516), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7514, 7516), True, 'import matplotlib.pyplot as plt\n'), ((403, 417), 'math.radians', 'ma.radians', (['(90)'], {}), '(90)\n', (413, 417), True, 'import math as ma\n'), ((5728, 5747), 'numpy.array', 'np.array', (['init_data'], {}), '(init_data)\n', (5736, 5747), True, 'import numpy as np\n'), ((5759, 5784), 'numpy.median', 'np.median', (['offset'], {'axis': '(0)'}), '(offset, axis=0)\n', (5768, 5784), True, 'import numpy as np\n'), ((7342, 7368), 'numpy.array', 'np.array', (['Position_Error_X'], {}), '(Position_Error_X)\n', (7350, 7368), True, 'import numpy as np\n'), ((7437, 7463), 'numpy.array', 'np.array', (['Position_Error_Y'], {}), '(Position_Error_Y)\n', (7445, 7463), True, 'import numpy as np\n'), ((1393, 1412), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1401, 1412), True, 'import numpy as np\n'), ((1622, 1638), 'numpy.array', 'np.array', (['[0.05]'], {}), '([0.05])\n', (1630, 1638), True, 'import numpy as np\n'), ((3019, 3033), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (3025, 3033), True, 'import numpy as np\n'), ((4332, 4359), 'math.tan', 'ma.tan', (['(gamma - delta_X / 2)'], {}), '(gamma - delta_X / 2)\n', (4338, 4359), True, 'import math as ma\n'), ((4391, 4418), 'math.tan', 'ma.tan', (['(gamma + delta_X / 2)'], {}), '(gamma + delta_X / 2)\n', (4397, 4418), True, 'import math as ma\n'), ((4616, 4635), 'math.tan', 'ma.tan', (['(delta_Y / 2)'], {}), '(delta_Y / 2)\n', (4622, 4635), True, 'import math as ma\n'), ((4674, 4693), 'math.tan', 'ma.tan', (['(delta_Y / 2)'], {}), '(delta_Y / 2)\n', (4680, 4693), True, 'import math as ma\n'), ((2367, 2381), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (2373, 2381), True, 'import numpy as np\n'), ((2419, 2433), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (2425, 2433), True, 'import numpy as np\n'), ((2471, 2485), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (2477, 2485), True, 'import numpy as np\n'), ((2523, 2544), 'numpy.zeros', 'np.zeros', (['(self.n, 1)'], {}), '((self.n, 1))\n', (2531, 2544), True, 'import numpy as np\n'), ((2615, 2637), 'numpy.dot', 'np.dot', (['self.F', 'self.x'], {}), '(self.F, self.x)\n', (2621, 2637), True, 'import numpy as np\n'), ((2640, 2657), 'numpy.dot', 'np.dot', (['self.B', 'u'], {}), '(self.B, u)\n', (2646, 2657), True, 'import numpy as np\n'), ((2789, 2811), 'numpy.dot', 'np.dot', (['self.H', 'self.x'], {}), '(self.H, self.x)\n', (2795, 2811), True, 'import numpy as np\n'), ((2924, 2948), 'numpy.dot', 'np.dot', (['self.P', 'self.H.T'], {}), '(self.P, self.H.T)\n', (2930, 2948), True, 'import numpy as np\n'), ((2950, 2966), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (2963, 2966), True, 'import numpy as np\n'), ((2994, 3006), 'numpy.dot', 'np.dot', (['K', 'y'], {}), '(K, y)\n', (3000, 3006), True, 'import numpy as np\n'), ((2682, 2704), 'numpy.dot', 'np.dot', (['self.F', 'self.P'], {}), '(self.F, self.P)\n', (2688, 2704), True, 'import numpy as np\n'), ((2879, 2903), 'numpy.dot', 'np.dot', (['self.P', 'self.H.T'], {}), '(self.P, self.H.T)\n', (2885, 2903), True, 'import numpy as np\n'), ((3143, 3160), 'numpy.dot', 'np.dot', (['K', 'self.R'], {}), '(K, self.R)\n', (3149, 3160), True, 'import numpy as np\n'), ((4123, 4136), 'math.sin', 'ma.sin', (['alpha'], {}), '(alpha)\n', (4129, 4136), True, 'import math as ma\n'), ((4146, 4159), 'math.cos', 'ma.cos', (['alpha'], {}), '(alpha)\n', (4152, 4159), True, 'import math as ma\n'), ((5512, 5528), 'math.cos', 'ma.cos', (['ang_ball'], {}), '(ang_ball)\n', (5518, 5528), True, 'import math as ma\n'), ((3069, 3086), 'numpy.dot', 'np.dot', (['K', 'self.H'], {}), '(K, self.H)\n', (3075, 3086), True, 'import numpy as np\n'), ((3112, 3129), 'numpy.dot', 'np.dot', (['K', 'self.H'], {}), '(K, self.H)\n', (3118, 3129), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Image conversion functionality for trivector"""
from enum import Enum
import numpy as np
import svgwrite
import cv2
import progressbar
def upper_tri_sum(d3array: np.ndarray) -> np.ndarray:
"""Get a 3D image array's upper diagonal's pixel color average
:param d3array: 3D image array derived from :func:`cv2.imread`
Treat the 3D array as 2d array. Having the innermost array (pixel BGR
values) be considered base values to be averaged.
:return: BGR array of the average color of the upper diagonal of the
3D image array
"""
x, y, _ = d3array.shape
tri = []
for i in range(x):
if i > y:
break
for j in range(y - i):
tri.append(d3array[i][i + j])
return np.sum(tri, axis=0) // len(tri)
def lower_tri_sum(d3array: np.ndarray) -> np.ndarray:
"""Get a 3D image array's lower diagonal's pixel color average
:param d3array: 3D image array derived from :func:`cv2.imread`
Treat the 3D array as 2d array. Having the innermost array (pixel BGR
values) be considered base values to be averaged.
.. note::
If the lower diagonal cannot be computed (eg: flat/malformed 3D array)
use the 3D image array's upper diagonal's pixel color average instead.
:return: BGR array of the average color of the lower diagonal of the
3D image array
"""
x, y, _ = d3array.shape
tri = []
for i in range(x):
if i > y:
break
for j in range(i):
tri.append(d3array[i][j])
# if bottom tri is empty use the upper tri's sum
if not tri:
return upper_tri_sum(d3array)
return np.sum(tri, axis=0) // len(tri)
def vectorize_sector_left(sub_img: np.ndarray, svg_drawing: svgwrite.Drawing,
x: int, y: int, cut_size: int):
"""Add two triangles to ``svg_drawing`` whose colors are derived from
the color averages from the top and bottom diagonals of the 3D BGR image
array of the sub image"""
b, g, r = upper_tri_sum(sub_img)
svg_drawing.add(
svg_drawing.polygon(
[(x, y), (x + cut_size, y), (x + cut_size, y + cut_size)],
fill=svgwrite.rgb(r, g, b, "RGB")
)
)
b, g, r = lower_tri_sum(sub_img)
svg_drawing.add(
svg_drawing.polygon(
[(x, y), (x, y + cut_size), (x + cut_size, y + cut_size)],
fill=svgwrite.rgb(r, g, b, "RGB")
)
)
def vectorize_sector_right(sub_img: np.ndarray, svg_drawing: svgwrite.Drawing,
x: int, y: int, cut_size: int):
"""Add two triangles to ``svg_drawing`` whose colors are derived from
the color averages from the top and bottom diagonals of the 3D BGR image
array of the sub image"""
b, g, r = upper_tri_sum(sub_img)
svg_drawing.add(
svg_drawing.polygon(
[(x, y + cut_size), (x + cut_size, y + cut_size), (x + cut_size, y)],
fill=svgwrite.rgb(r, g, b, "RGB")
)
)
b, g, r = lower_tri_sum(sub_img)
svg_drawing.add(
svg_drawing.polygon(
[(x, y + cut_size), (x, y), (x + cut_size, y)],
fill=svgwrite.rgb(r, g, b, "RGB")
)
)
class DiagonalStyle(Enum):
"""Styling options noting the diagonal arrangement of the
triangle sectors"""
right = "right"
left = "left"
alternating = "alternating"
def __str__(self):
return self.value
def trivector(image_path: str, cut_size: int, output_path: str,
diagonal_style: DiagonalStyle = DiagonalStyle.alternating):
"""Convert an image into a SVG vector image composed of triangular sectors
:param image_path: path to the image to trivector
:param cut_size: size in pixels for each triangle sector
:param diagonal_style: diagonal arrangement of the triangle sectors
:param output_path: path to write the trivectored image
"""
image = cv2.imread(image_path) # pylint:disable=no-member
height, width, _ = image.shape
width_slices = range(0, width, cut_size)
height_slices = range(0, height, cut_size)
svg_drawing = svgwrite.Drawing(
output_path,
profile="full",
size=(len(width_slices)*cut_size, len(height_slices)*cut_size)
)
# start up the progress bar
# each image sector is one tick one the progress bar
bar = progressbar.ProgressBar(max_value=len(width_slices)*len(height_slices))
counter_2 = 0
sector_num = 0
for y in height_slices:
counter_1 = counter_2
counter_2 += 1
for x in width_slices:
sector_image = image[y:y + cut_size, x:x + cut_size]
if (diagonal_style == DiagonalStyle.left) or \
(diagonal_style == DiagonalStyle.alternating and counter_1 % 2):
vectorize_sector_left(sector_image, svg_drawing, x, y, cut_size)
else:
sector_image = np.rot90(sector_image, axes=(0, 1))
vectorize_sector_right(sector_image, svg_drawing, x, y, cut_size)
sector_num += 1
counter_1 += 1
bar.update(sector_num)
svg_drawing.save()
|
[
"numpy.sum",
"cv2.imread",
"svgwrite.rgb",
"numpy.rot90"
] |
[((3986, 4008), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3996, 4008), False, 'import cv2\n'), ((797, 816), 'numpy.sum', 'np.sum', (['tri'], {'axis': '(0)'}), '(tri, axis=0)\n', (803, 816), True, 'import numpy as np\n'), ((1712, 1731), 'numpy.sum', 'np.sum', (['tri'], {'axis': '(0)'}), '(tri, axis=0)\n', (1718, 1731), True, 'import numpy as np\n'), ((2238, 2266), 'svgwrite.rgb', 'svgwrite.rgb', (['r', 'g', 'b', '"""RGB"""'], {}), "(r, g, b, 'RGB')\n", (2250, 2266), False, 'import svgwrite\n'), ((2458, 2486), 'svgwrite.rgb', 'svgwrite.rgb', (['r', 'g', 'b', '"""RGB"""'], {}), "(r, g, b, 'RGB')\n", (2470, 2486), False, 'import svgwrite\n'), ((3010, 3038), 'svgwrite.rgb', 'svgwrite.rgb', (['r', 'g', 'b', '"""RGB"""'], {}), "(r, g, b, 'RGB')\n", (3022, 3038), False, 'import svgwrite\n'), ((3219, 3247), 'svgwrite.rgb', 'svgwrite.rgb', (['r', 'g', 'b', '"""RGB"""'], {}), "(r, g, b, 'RGB')\n", (3231, 3247), False, 'import svgwrite\n'), ((4984, 5019), 'numpy.rot90', 'np.rot90', (['sector_image'], {'axes': '(0, 1)'}), '(sector_image, axes=(0, 1))\n', (4992, 5019), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, division, print_function
import math
import os
import sys
import h5py
import numpy as np
from cctbx import factor_ev_angstrom
from cctbx.eltbx import attenuation_coefficient
from scitbx import matrix
from scitbx.array_family import flex
from dxtbx.format.FormatHDF5 import FormatHDF5
from dxtbx.format.FormatStill import FormatStill
from dxtbx.model import ParallaxCorrectedPxMmStrategy
from dxtbx.model.detector import Detector
# 151028: deepcopying this class causes crash in h5py
# temporary fix by closing the file in every methods(!)
# 161003: updated to follow dxtbx changes
# removed iotbx support, which was incomplete anyway
# 161005: get wavelength from the file
# 170929: read metadata for phase III and compact MPCCDs
# 171003: fix mask
# 180724: update 'understand' to exclude Rayonix data
class FormatHDF5SaclaMPCCD(FormatHDF5, FormatStill):
"""
Class to handle multi-event HDF5 files from MPCCD
preprocessed by Cheetah SFX pipeline at SACLA.
To handle reassembled images from "DataConvert3 -reconst"
(old pipeline), use FormatHDF5Sacla.
To override metrology, use the following environmental variables.
MPCCD_GEOMETRY, MPCCD_DISTANCE
MPCCD_RECONST_MODE
You can also specify reference_geometry in dials.stills_process.
"""
@staticmethod
def understand(image_file):
with h5py.File(image_file, "r") as h5_handle:
if "metadata/detector" in h5_handle:
if "Rayonix" in h5_handle["metadata/detector"][()]:
return False
for elem in h5_handle:
if elem.startswith("tag-"):
return True
return False
def __init__(self, image_file, index=0, reconst_mode=False, **kwargs):
self._raw_data = None
self.index = index
self.image_filename = image_file
super(FormatHDF5SaclaMPCCD, self).__init__(image_file, **kwargs)
self.PIXEL_SIZE = 50 / 1000 # 50 um
self.RECONST_SIZE = 2398 # compatible with DataConvert3 -reconst mode
# These hard-coded values can be overwritten
# by MPCCD_GEOMETRY and MPCCD_DISTANCE
#
# These values can be retrieved from SACLA API.
# Alternatively, you can get it from a CrystFEL geometry file by
# awk '/corner_x/{x=50*$3} /corner_y/{y=50*$3; printf x","y","rot","}
# /\/ss/{rot=-atan2($3, $4)/3.141592*180}' input.geom
# Default value for Phase I MPCCD
self.distance = 50.0 # mm
self.panel_origins = [
(-1755.000000, 51711.000000, 0.000000),
(-1711.000000, 24944.000000, 0.000000),
(817.000000, -1808.000000, 0.000000),
(812.000000, -28466.000000, 0.000000),
(-792.000000, 28544.000000, 0.000000),
(-781.000000, 1840.000000, 0.000000),
(1650.000000, -24900.000000, 0.000000),
(1655.000000, -51626.000000, 0.000000),
] # um
self.panel_rotations = [
-89.906197,
-89.915802,
-89.980003,
-89.929298,
89.963097,
89.880798,
90.000000,
90.029503,
]
self.thickness = 0.050
self.mask = None
# Read metadata if possible
self.read_metadata()
# Override by environmental variables
if "MPCCD_RECONST_MODE" in os.environ:
reconst_mode = bool(os.environ["MPCCD_RECONST_MODE"])
self.RECONST_MODE = reconst_mode
self.RECONST_64 = (
True # Set False if you want to keep panels completely horizontal
)
# But this makes errors bigger.
if "MPCCD_GEOMETRY" in os.environ:
try:
tmp = [float(i) for i in os.environ["MPCCD_GEOMETRY"].split(",")]
if len(tmp) != 24:
raise EnvironmentError(
"Environment variable MPCCD_GEOMETRY must contain 24 comma-separated parts"
)
for i in range(8):
self.panel_origins[i] = (-tmp[i * 3], tmp[i * 3 + 1], 0)
self.panel_rotations[i] = tmp[i * 3 + 2]
except Exception as e:
raise EnvironmentError(
"Invalid MPCCD Geometry specified in environment variable MPCCD_GEOMETRY: {}".format(
e
)
)
if "MPCCD_DISTANCE" in os.environ:
self.distance = float(os.environ["MPCCD_DISTANCE"])
def _start(self):
h5_handle = h5py.File(self.image_filename, "r")
self._images = sorted([tag for tag in h5_handle if tag.startswith("tag-")])
self.tag = self._images[self.index]
h5_handle.close()
def read_metadata(self):
h5_handle = h5py.File(self.image_filename, "r")
if "metadata" not in h5_handle:
return
try:
distance = h5_handle["metadata/distance_in_mm"][()]
panel_rotations = h5_handle["metadata/angle_in_rad"][()]
posx = h5_handle["metadata/posx_in_um"][()]
posy = h5_handle["metadata/posy_in_um"][()]
posz = h5_handle["metadata/posz_in_um"][()]
panel_origins = list(zip(posx, posy, posz))
sensor = h5_handle["metadata/sensor_id"][0]
thickness = 0.050
if sensor.startswith(b"MPCCD-8B"):
thickness = 0.300 # Phase 3 sensor
orig_mask = np.logical_not(h5_handle["metadata/pixelmask"][()])
mask = self.split_panels(orig_mask, bool=True)
except Exception:
return
self.distance = distance
self.panel_rotations = panel_rotations
self.panel_origins = panel_origins
self.thickness = thickness
self.mask = mask
def get_image_file(self, index=None):
return self.image_filename
def set_index(self, index):
assert index < len(self._images)
self.index = index
self.tag = self._images[self.index]
self._raw_data = None
def _detector(self, index=None):
wavelength = self.get_beam(index).get_wavelength()
table = attenuation_coefficient.get_table("Si")
mu = table.mu_at_angstrom(wavelength) / 10.0
px_mm = ParallaxCorrectedPxMmStrategy(mu, self.thickness)
if self.RECONST_MODE:
return self._detector_factory.simple(
sensor="PAD",
distance=self.distance,
beam_centre=(
self.RECONST_SIZE / 2 * self.PIXEL_SIZE,
self.RECONST_SIZE / 2 * self.PIXEL_SIZE,
),
fast_direction="-x",
slow_direction="-y",
pixel_size=(self.PIXEL_SIZE, self.PIXEL_SIZE),
image_size=(self.RECONST_SIZE, self.RECONST_SIZE),
trusted_range=(-1, 65535),
mask=[],
) # TODO: add gaps
detector = Detector()
root = detector.hierarchy()
root.set_frame((-1, 0, 0), (0, 1, 0), (0, 0, -self.distance))
for i in range(8):
angle = math.pi * self.panel_rotations[i] / 180.0
fast = matrix.col((math.cos(angle), math.sin(angle), 0))
slow = matrix.col((-math.sin(angle), math.cos(angle), 0))
origin = (
matrix.col(
(
-self.panel_origins[i][0],
self.panel_origins[i][1],
self.panel_origins[i][2],
)
)
/ 1000.0
)
p = root.add_panel()
p.set_type("SENSOR_PAD")
p.set_name("Panel%d" % i)
p.set_image_size((512, 1024))
p.set_trusted_range((-1, 65535))
p.set_pixel_size((self.PIXEL_SIZE, self.PIXEL_SIZE))
p.set_thickness(self.thickness)
p.set_local_frame(fast.elems, slow.elems, origin.elems)
p.set_px_mm_strategy(px_mm)
p.set_gain(10)
return detector
def _beam(self):
h5_handle = h5py.File(self.image_filename, "r")
eV = h5_handle[self.tag]["photon_energy_ev"][()]
h5_handle.close()
return self._beam_factory.simple(factor_ev_angstrom / eV)
def get_num_images(self):
return len(self._images)
def split_panels(self, img, bool=False):
tmp = []
for i in range(8):
xmin, ymin, xmax, ymax = 0, i * 1024, 512, (i + 1) * 1024
# To avoid "numpy.ndarray instance is not contiguous"
if bool:
source = np.ascontiguousarray(img[ymin:ymax, xmin:xmax])
tmp.append(flex.bool(source))
else:
source = np.ascontiguousarray(img[ymin:ymax, xmin:xmax], dtype=np.int32)
tmp.append(flex.int(source))
return tuple(tmp)
def get_raw_data(self, index=None):
if index is not None and self.index != index:
self.set_index(index)
if self._raw_data is None:
if self.RECONST_MODE:
self._raw_data = flex.int(self.reconst_image())
else:
h5_handle = h5py.File(self.image_filename, "r")
data = h5_handle[self.tag]["data"][()] # .astype(np.int32)
# [()] forces conversion to ndarray
# this is 8192x512 (slow/fast) tiled image
h5_handle.close()
self._raw_data = self.split_panels(data)
return self._raw_data
def get_active_areas(self):
assert self.RECONST_MODE
return self.active_areas
def reconst_image(self):
det = np.empty((self.RECONST_SIZE, self.RECONST_SIZE), dtype="int32")
det.fill(-1)
h5_handle = h5py.File(self.image_filename, "r")
data = h5_handle[self.tag]["data"][()].astype(np.int32)
h5_handle.close()
self.active_areas = []
for i in range(8):
angle = math.pi * self.panel_rotations[i] / 180.0
fast = matrix.col((math.cos(angle), math.sin(angle)))
slow = matrix.col((-math.sin(angle), math.cos(angle)))
origin = matrix.col(
(
-self.panel_origins[i][0] / self.PIXEL_SIZE / 1000
+ self.RECONST_SIZE / 2,
-self.panel_origins[i][1] / self.PIXEL_SIZE / 1000
+ self.RECONST_SIZE / 2,
)
)
if self.RECONST_64:
size_fast = 256
size_slow = 256
for j in range(2):
for k in range(4):
xmin, ymin, xmax, ymax = (
j * size_slow,
(i * 4 + k) * size_fast,
(j + 1) * size_slow,
(i * 4 + k + 1) * size_fast,
)
source = data[ymin:ymax, xmin:xmax].transpose()
subpanel_origin = (
origin - j * size_fast * fast + k * size_slow * slow
)
if abs(round(self.panel_rotations[i]) + 90) < 1:
det[
int(round(subpanel_origin[1])) : int(
round(subpanel_origin[1] + size_slow)
),
int(round(subpanel_origin[0])) : int(
round(subpanel_origin[0] + size_fast)
),
] = source
# TODO: Is the border inclusive?
self.active_areas.extend(
[
round(subpanel_origin[1]),
round(subpanel_origin[0]),
round(subpanel_origin[1] + size_slow),
round(subpanel_origin[0] + size_fast),
]
)
elif abs(round(self.panel_rotations[i]) - 90) < 1:
det[
int(round(subpanel_origin[1])) : int(
round(subpanel_origin[1] - size_slow)
) : -1,
int(round(subpanel_origin[0])) : int(
round(subpanel_origin[0] - size_fast)
) : -1,
] = source
self.active_areas.extend(
[
round(subpanel_origin[1] - size_slow),
round(subpanel_origin[0] - size_fast),
round(subpanel_origin[1]),
round(subpanel_origin[0]),
]
)
else:
raise RuntimeError(
"Panel angle deviation is too large! Do not use reconst mode!"
)
else:
size_fast = 1024
size_slow = 512
xmin, ymin, xmax, ymax = (
0,
i * size_fast,
size_slow,
(i + 1) * size_fast,
)
source = data[ymin:ymax, xmin:xmax].transpose()
if abs(round(self.panel_rotations[i]) + 90) < 1:
det[
round(origin[1]) : round(origin[1] + size_slow),
round(origin[0]) : round(origin[0] + size_fast),
] = source
elif abs(round(self.panel_rotations[i]) - 90) < 1:
det[
round(origin[1]) : round(origin[1] - size_slow) : -1,
round(origin[0]) : round(origin[0] - size_fast) : -1,
] = source
else:
raise RuntimeError(
"Panel angle deviation is too large! Do not use reconst mode!"
)
self.active_areas = [int(aa) for aa in self.active_areas]
return det
def get_detector(self, index=None):
if self._detector_instance is None:
self._detector_instance = self._detector()
return self._detector_instance
def get_static_mask(self):
# This means when the pixel mask is present, trusted region is ignored.
# The used provided masks (if any) will be automatically merged.
# see https://github.com/dials/dials/issues/236
return self.mask
def get_beam(self, index=None):
if index is not None and self.index != index:
self.set_index(index)
self._beam_instance = None
if self._beam_instance is None:
self._beam_instance = self._beam()
return self._beam_instance
if __name__ == "__main__":
print(FormatHDF5SaclaMPCCD.understand(sys.argv[1]))
FormatHDF5SaclaMPCCD(sys.argv[1])
|
[
"cctbx.eltbx.attenuation_coefficient.get_table",
"scitbx.array_family.flex.bool",
"numpy.logical_not",
"scitbx.array_family.flex.int",
"dxtbx.model.ParallaxCorrectedPxMmStrategy",
"h5py.File",
"numpy.ascontiguousarray",
"math.cos",
"scitbx.matrix.col",
"numpy.empty",
"dxtbx.model.detector.Detector",
"math.sin"
] |
[((4656, 4691), 'h5py.File', 'h5py.File', (['self.image_filename', '"""r"""'], {}), "(self.image_filename, 'r')\n", (4665, 4691), False, 'import h5py\n'), ((4897, 4932), 'h5py.File', 'h5py.File', (['self.image_filename', '"""r"""'], {}), "(self.image_filename, 'r')\n", (4906, 4932), False, 'import h5py\n'), ((6279, 6318), 'cctbx.eltbx.attenuation_coefficient.get_table', 'attenuation_coefficient.get_table', (['"""Si"""'], {}), "('Si')\n", (6312, 6318), False, 'from cctbx.eltbx import attenuation_coefficient\n'), ((6388, 6437), 'dxtbx.model.ParallaxCorrectedPxMmStrategy', 'ParallaxCorrectedPxMmStrategy', (['mu', 'self.thickness'], {}), '(mu, self.thickness)\n', (6417, 6437), False, 'from dxtbx.model import ParallaxCorrectedPxMmStrategy\n'), ((7084, 7094), 'dxtbx.model.detector.Detector', 'Detector', ([], {}), '()\n', (7092, 7094), False, 'from dxtbx.model.detector import Detector\n'), ((8240, 8275), 'h5py.File', 'h5py.File', (['self.image_filename', '"""r"""'], {}), "(self.image_filename, 'r')\n", (8249, 8275), False, 'import h5py\n'), ((9838, 9901), 'numpy.empty', 'np.empty', (['(self.RECONST_SIZE, self.RECONST_SIZE)'], {'dtype': '"""int32"""'}), "((self.RECONST_SIZE, self.RECONST_SIZE), dtype='int32')\n", (9846, 9901), True, 'import numpy as np\n'), ((9944, 9979), 'h5py.File', 'h5py.File', (['self.image_filename', '"""r"""'], {}), "(self.image_filename, 'r')\n", (9953, 9979), False, 'import h5py\n'), ((1411, 1437), 'h5py.File', 'h5py.File', (['image_file', '"""r"""'], {}), "(image_file, 'r')\n", (1420, 1437), False, 'import h5py\n'), ((5572, 5623), 'numpy.logical_not', 'np.logical_not', (["h5_handle['metadata/pixelmask'][()]"], {}), "(h5_handle['metadata/pixelmask'][()])\n", (5586, 5623), True, 'import numpy as np\n'), ((10346, 10520), 'scitbx.matrix.col', 'matrix.col', (['(-self.panel_origins[i][0] / self.PIXEL_SIZE / 1000 + self.RECONST_SIZE / 2,\n -self.panel_origins[i][1] / self.PIXEL_SIZE / 1000 + self.RECONST_SIZE / 2)'], {}), '((-self.panel_origins[i][0] / self.PIXEL_SIZE / 1000 + self.\n RECONST_SIZE / 2, -self.panel_origins[i][1] / self.PIXEL_SIZE / 1000 + \n self.RECONST_SIZE / 2))\n', (10356, 10520), False, 'from scitbx import matrix\n'), ((7470, 7566), 'scitbx.matrix.col', 'matrix.col', (['(-self.panel_origins[i][0], self.panel_origins[i][1], self.panel_origins[i][2])'], {}), '((-self.panel_origins[i][0], self.panel_origins[i][1], self.\n panel_origins[i][2]))\n', (7480, 7566), False, 'from scitbx import matrix\n'), ((8763, 8810), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img[ymin:ymax, xmin:xmax]'], {}), '(img[ymin:ymax, xmin:xmax])\n', (8783, 8810), True, 'import numpy as np\n'), ((8900, 8963), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img[ymin:ymax, xmin:xmax]'], {'dtype': 'np.int32'}), '(img[ymin:ymax, xmin:xmax], dtype=np.int32)\n', (8920, 8963), True, 'import numpy as np\n'), ((9347, 9382), 'h5py.File', 'h5py.File', (['self.image_filename', '"""r"""'], {}), "(self.image_filename, 'r')\n", (9356, 9382), False, 'import h5py\n'), ((7322, 7337), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (7330, 7337), False, 'import math\n'), ((7339, 7354), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (7347, 7354), False, 'import math\n'), ((7409, 7424), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (7417, 7424), False, 'import math\n'), ((8838, 8855), 'scitbx.array_family.flex.bool', 'flex.bool', (['source'], {}), '(source)\n', (8847, 8855), False, 'from scitbx.array_family import flex\n'), ((8991, 9007), 'scitbx.array_family.flex.int', 'flex.int', (['source'], {}), '(source)\n', (8999, 9007), False, 'from scitbx.array_family import flex\n'), ((10223, 10238), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (10231, 10238), False, 'import math\n'), ((10240, 10255), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (10248, 10255), False, 'import math\n'), ((10307, 10322), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (10315, 10322), False, 'import math\n'), ((7392, 7407), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (7400, 7407), False, 'import math\n'), ((10290, 10305), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (10298, 10305), False, 'import math\n')]
|
import numpy as np
import numba as nb
import matplotlib.pyplot as plt
from scipy import interpolate
class Similarity(object):
"""
Class to compute the similarity between two diffraction patterns
"""
def __init__(self, f, g, N = None, x_range = None, l = 2.0, weight = 'cosine'):
"""
Args:
f: spectra1 (2D array)
g: spectra2 (2D array)
N: number of sampling points for the processed spectra
x_range: the range of x values used to compute similarity ([x_min, x_max])
l: cutoff value for shift (real)
weight: weight function 'triangle' or 'cosine' (str)
"""
self.fx, self.fy = f[0], f[1]
self.gx, self.gy = g[0], g[1]
self.x_range = x_range
self.l = abs(l)
res1 = (self.fx[-1] - self.fx[0])/len(self.fx)
res2 = (self.gx[-1] - self.gx[0])/len(self.gx)
self.resolution = min([res1, res2])/3 # improve the resolution
if N is None:
self.N = int(2*self.l/self.resolution)
else:
self.N = N
self.r = np.linspace(-self.l, self.l, self.N)
self.preprocess()
self.weight = weight
if self.weight == 'triangle':
self.triangleFunction()
elif self.weight == 'cosine':
self.cosineFunction()
else:
msg = function + 'is not supported'
raise NotImplementedError(msg)
def calculate(self):
self.S = self._calculate(self.r,self.w,self.d,self.Npts,self.fy,self.gy)
return self.S
@staticmethod
@nb.njit(nb.f8(nb.f8[:], nb.f8[:], nb.f8, nb.i8, nb.f8[:], nb.f8[:]), nopython=True)
def _calculate(r,w,d,Npts,fy,gy):
"""
Compute the similarity between the pair of spectra f, g
with an approximated Simpson rule
"""
xCorrfg_w = 0
aCorrff_w = 0
aCorrgg_w = 0
count0 = 0
count = 0
for r0, w0 in zip(r, w):
Corrfg, Corrff, Corrgg = 0, 0, 0
for i in range(Npts):
shift = int(round(r0/d))
if 0 <= i + shift <= Npts-1:
if count == 0:
coef = 1/3
elif count %2 == 1:
coef = 4/3
else:
coef = 2/3
count += 1
Corrfg += coef*fy[i]*gy[i+shift]
Corrff += coef*fy[i]*fy[i+shift]
Corrgg += coef*gy[i]*gy[i+shift]
if count0 == 0:
coef = 1/3
elif count0 %2 == 1:
coef = 4/3
else:
coef = 2/3
count0 += 1
xCorrfg_w += coef*w0*Corrfg
aCorrff_w += coef*w0*Corrff
aCorrgg_w += coef*w0*Corrgg
return np.abs(xCorrfg_w / np.sqrt(aCorrff_w * aCorrgg_w))
def preprocess(self):
"""
Preprocess the input spectra f and g
"""
if self.x_range == None:
x_min = max(np.min(self.fx), np.min(self.gx))
x_max = min(np.max(self.fx), np.max(self.gx))
self.x_range = [x_min,x_max]
else:
x_min, x_max = self.x_range[0], self.x_range[1]
f_inter = interpolate.interp1d(self.fx, self.fy, 'cubic', fill_value = 'extrapolate')
g_inter = interpolate.interp1d(self.gx, self.gy, 'cubic', fill_value = 'extrapolate')
fgx_new = np.linspace(x_min, x_max, int((x_max-x_min)/self.resolution)+1)
fy_new = f_inter(fgx_new)
gy_new = g_inter(fgx_new)
self.fx, self.fy, self.gy = fgx_new, fy_new, gy_new
self.Npts = len(self.fx)
self.d = (self.fx[-1] - self.fx[0])/self.Npts
def triangleFunction(self):
"""
Triangle function to weight correlations
"""
w = np.zeros((self.N))
l = self.l
for i in range(self.r.shape[0]):
r = np.abs(self.r[i])
if r <= l:
tf = lambda r,l : 1 - r/l
w[i] = tf(r,l)
else:
w[i] = 0
self.w = w
def cosineFunction(self):
"""
cosine function to weight correlations
"""
w = np.zeros((self.N))
l = self.l
for i in range(self.r.shape[0]):
r = np.abs(self.r[i])
if r <= l:
tf = lambda r,l : 0.5 * (np.cos(np.pi * r/l) + 1)
w[i] = tf(r,l)
else:
w[i] = 0
self.w = w
def showPlot(self):
fig1 = plt.figure(1,figsize=(15,6))
frame1=fig1.add_axes((.1,.3,.8,.6))
plt.plot(self.fx,self.fy,label='pxrd')
plt.plot(self.gx,-self.gy,label='vesta')
plt.legend()
#Residual plot
residuals = self.gy-self.fy
frame2=fig1.add_axes((.1,.1,.8,.2))
plt.plot(self.gx,residuals,'.r', markersize = 0.5)
plt.title("{:6f}".format(self.S))
plt.show()
|
[
"numpy.abs",
"numpy.sqrt",
"numba.f8",
"matplotlib.pyplot.plot",
"scipy.interpolate.interp1d",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.cos",
"numpy.min",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((1095, 1131), 'numpy.linspace', 'np.linspace', (['(-self.l)', 'self.l', 'self.N'], {}), '(-self.l, self.l, self.N)\n', (1106, 1131), True, 'import numpy as np\n'), ((1606, 1665), 'numba.f8', 'nb.f8', (['nb.f8[:]', 'nb.f8[:]', 'nb.f8', 'nb.i8', 'nb.f8[:]', 'nb.f8[:]'], {}), '(nb.f8[:], nb.f8[:], nb.f8, nb.i8, nb.f8[:], nb.f8[:])\n', (1611, 1665), True, 'import numba as nb\n'), ((3317, 3390), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.fx', 'self.fy', '"""cubic"""'], {'fill_value': '"""extrapolate"""'}), "(self.fx, self.fy, 'cubic', fill_value='extrapolate')\n", (3337, 3390), False, 'from scipy import interpolate\n'), ((3411, 3484), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['self.gx', 'self.gy', '"""cubic"""'], {'fill_value': '"""extrapolate"""'}), "(self.gx, self.gy, 'cubic', fill_value='extrapolate')\n", (3431, 3484), False, 'from scipy import interpolate\n'), ((3922, 3938), 'numpy.zeros', 'np.zeros', (['self.N'], {}), '(self.N)\n', (3930, 3938), True, 'import numpy as np\n'), ((4317, 4333), 'numpy.zeros', 'np.zeros', (['self.N'], {}), '(self.N)\n', (4325, 4333), True, 'import numpy as np\n'), ((4652, 4682), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(15, 6)'}), '(1, figsize=(15, 6))\n', (4662, 4682), True, 'import matplotlib.pyplot as plt\n'), ((4738, 4778), 'matplotlib.pyplot.plot', 'plt.plot', (['self.fx', 'self.fy'], {'label': '"""pxrd"""'}), "(self.fx, self.fy, label='pxrd')\n", (4746, 4778), True, 'import matplotlib.pyplot as plt\n'), ((4785, 4827), 'matplotlib.pyplot.plot', 'plt.plot', (['self.gx', '(-self.gy)'], {'label': '"""vesta"""'}), "(self.gx, -self.gy, label='vesta')\n", (4793, 4827), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4846), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4844, 4846), True, 'import matplotlib.pyplot as plt\n'), ((4966, 5016), 'matplotlib.pyplot.plot', 'plt.plot', (['self.gx', 'residuals', '""".r"""'], {'markersize': '(0.5)'}), "(self.gx, residuals, '.r', markersize=0.5)\n", (4974, 5016), True, 'import matplotlib.pyplot as plt\n'), ((5067, 5077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5075, 5077), True, 'import matplotlib.pyplot as plt\n'), ((4017, 4034), 'numpy.abs', 'np.abs', (['self.r[i]'], {}), '(self.r[i])\n', (4023, 4034), True, 'import numpy as np\n'), ((4412, 4429), 'numpy.abs', 'np.abs', (['self.r[i]'], {}), '(self.r[i])\n', (4418, 4429), True, 'import numpy as np\n'), ((2904, 2934), 'numpy.sqrt', 'np.sqrt', (['(aCorrff_w * aCorrgg_w)'], {}), '(aCorrff_w * aCorrgg_w)\n', (2911, 2934), True, 'import numpy as np\n'), ((3091, 3106), 'numpy.min', 'np.min', (['self.fx'], {}), '(self.fx)\n', (3097, 3106), True, 'import numpy as np\n'), ((3108, 3123), 'numpy.min', 'np.min', (['self.gx'], {}), '(self.gx)\n', (3114, 3123), True, 'import numpy as np\n'), ((3149, 3164), 'numpy.max', 'np.max', (['self.fx'], {}), '(self.fx)\n', (3155, 3164), True, 'import numpy as np\n'), ((3166, 3181), 'numpy.max', 'np.max', (['self.gx'], {}), '(self.gx)\n', (3172, 3181), True, 'import numpy as np\n'), ((4494, 4515), 'numpy.cos', 'np.cos', (['(np.pi * r / l)'], {}), '(np.pi * r / l)\n', (4500, 4515), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.utils import class_weight
import math
import time
import cv2
import itertools
import os
import shutil
import random
from tensorflow.keras import backend as K
#CaricoCSV
training_csv = pd.read_csv('Manually_CSV/training.csv',dtype=str)
validation_csv = pd.read_csv('Manually_CSV/validation.csv',dtype=str)
print("PREDATAGEN")
train_datagen = ImageDataGenerator(rescale=1./255, horizontal_flip=True, rotation_range=30,brightness_range=[0.6,1.4], featurewise_center=True )
test_datagen = ImageDataGenerator(rescale=1./255, featurewise_center=True)
train_datagen.mean = np.array([0.53990436 , 0.4405486 , 0.39328504], dtype=np.float32).reshape((1,1,3))
test_datagen.mean = np.array([0.53990436 , 0.4405486 , 0.39328504], dtype=np.float32).reshape((1,1,3))
print("PRETRAIN")
train_generator= train_datagen.flow_from_dataframe(
dataframe=training_csv,
directory="Manually_Annotated_Images//",
x_col="subDirectory_filePath",
y_col="expression",
weight_col=None,
target_size=(123, 123),
color_mode="rgb",
class_mode="categorical",
batch_size=64,
subset=None,
interpolation="nearest",
validate_filenames=True,
classes=["0","1","2","3","4","5","6","7","8","9","10"]
)
validation_generator = test_datagen.flow_from_dataframe(
dataframe=validation_csv,
directory="Manually_Annotated_Images//",
x_col="subDirectory_filePath",
y_col="expression",
weight_col=None,
target_size=(123, 123),
color_mode="rgb",
class_mode="categorical",
batch_size=64,
subset=None,
interpolation="nearest",
validate_filenames=True,
classes=["0","1","2","3","4","5","6","7","8","9","10"]
)
class_weights = class_weight.compute_class_weight('balanced',np.unique(train_generator.classes),train_generator.classes)
print(class_weights)
weight = {i: class_weights[i] for i in range(11)}
model = tf.keras.applications.MobileNetV2(
input_shape=(123,123,3),
include_top=True,
weights=None,
input_tensor=None,
pooling='max',
classes=11,
classifier_activation="softmax",
)
opt= keras.optimizers.Adam(learning_rate=0.001)
model.compile(
loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy']
)
model.summary()
checkpoint = ModelCheckpoint('model.h5', save_best_only=True)
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size
history= model.fit(train_generator,
epochs=100,
verbose=1,
callbacks=[checkpoint],
validation_data=validation_generator,
shuffle=True,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_steps=STEP_SIZE_VALID,
class_weight=weight
)
|
[
"numpy.unique",
"pandas.read_csv",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.applications.MobileNetV2"
] |
[((530, 581), 'pandas.read_csv', 'pd.read_csv', (['"""Manually_CSV/training.csv"""'], {'dtype': 'str'}), "('Manually_CSV/training.csv', dtype=str)\n", (541, 581), True, 'import pandas as pd\n'), ((598, 651), 'pandas.read_csv', 'pd.read_csv', (['"""Manually_CSV/validation.csv"""'], {'dtype': 'str'}), "('Manually_CSV/validation.csv', dtype=str)\n", (609, 651), True, 'import pandas as pd\n'), ((687, 824), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'horizontal_flip': '(True)', 'rotation_range': '(30)', 'brightness_range': '[0.6, 1.4]', 'featurewise_center': '(True)'}), '(rescale=1.0 / 255, horizontal_flip=True, rotation_range=\n 30, brightness_range=[0.6, 1.4], featurewise_center=True)\n', (705, 824), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((831, 893), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'featurewise_center': '(True)'}), '(rescale=1.0 / 255, featurewise_center=True)\n', (849, 893), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2221, 2401), 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': '(123, 123, 3)', 'include_top': '(True)', 'weights': 'None', 'input_tensor': 'None', 'pooling': '"""max"""', 'classes': '(11)', 'classifier_activation': '"""softmax"""'}), "(input_shape=(123, 123, 3), include_top=\n True, weights=None, input_tensor=None, pooling='max', classes=11,\n classifier_activation='softmax')\n", (2254, 2401), True, 'import tensorflow as tf\n'), ((2414, 2456), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2435, 2456), True, 'import tensorflow.keras as keras\n'), ((2607, 2655), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""model.h5"""'], {'save_best_only': '(True)'}), "('model.h5', save_best_only=True)\n", (2622, 2655), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((2079, 2113), 'numpy.unique', 'np.unique', (['train_generator.classes'], {}), '(train_generator.classes)\n', (2088, 2113), True, 'import numpy as np\n'), ((913, 976), 'numpy.array', 'np.array', (['[0.53990436, 0.4405486, 0.39328504]'], {'dtype': 'np.float32'}), '([0.53990436, 0.4405486, 0.39328504], dtype=np.float32)\n', (921, 976), True, 'import numpy as np\n'), ((1017, 1080), 'numpy.array', 'np.array', (['[0.53990436, 0.4405486, 0.39328504]'], {'dtype': 'np.float32'}), '([0.53990436, 0.4405486, 0.39328504], dtype=np.float32)\n', (1025, 1080), True, 'import numpy as np\n')]
|
import datetime
import inspect
from io import BytesIO
import os
import pickle
import shutil
import tempfile
import unittest
from unittest.mock import patch
import asdf
import numpy
from numpy.testing import assert_array_equal
from scipy.sparse import csr_matrix
from modelforge import configuration, storage_backend
from modelforge.backends import create_backend
import modelforge.index as ind
from modelforge.meta import generate_new_meta
from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, \
merge_strings, Model, split_strings
from modelforge.models import GenericModel, register_model
import modelforge.tests.fake_dulwich as fake_git
from modelforge.tests.fake_requests import FakeRequests
@register_model
class FakeDocfreqModel(Model):
NAME = "docfreq"
VENDOR = "source{d}"
DESCRIPTION = "document frequencies"
tree = {}
def _load_tree(self, tree):
self.docs = tree["docs"]
self.tree = tree
def dump(self):
return str(self.docs)
def _generate_tree(self) -> dict:
return self.tree
class Model1(Model):
NAME = "model1"
VENDOR = "source{d}"
DESCRIPTION = "model1"
def _load_tree(self, tree):
pass
def dump(self):
return "model1"
class Model2(Model):
NAME = "model2"
VENDOR = "source{d}"
DESCRIPTION = "model2"
def _load_tree(self, tree):
pass
def dump(self):
return "model2"
class Model3(Model):
NAME = "model3"
VENDOR = "source{d}"
DESCRIPTION = "model3"
def _load_tree(self, tree):
pass
class Model4(Model):
NAME = "model4"
VENDOR = "source{d}"
DESCRIPTION = "model4"
def dump(self):
return str(self.xxx)
class Model5(Model):
NAME = "aux"
VENDOR = "source{d}"
DESCRIPTION = "aux"
def _load_tree(self, tree):
pass
class Model6(Model5):
NAME = "docfreq"
VENDOR = "source{d}"
DESCRIPTION = "docfreq"
def _load_tree(self, tree):
pass
class Model7(Model6):
NAME = "xxx"
VENDOR = "source{d}"
DESCRIPTION = "xxx"
def _load_tree(self, tree):
pass
class Model8(Model):
NAME = "model8"
VENDOR = "source{d}"
DESCRIPTION = "model8"
def _load_tree(self, tree):
self.tree = tree
def _generate_tree(self):
return {"abc": 777}
def dump(self):
return "model8"
class NumpyArray(Model):
NAME = "numpy_array"
VENDOR = "source{d}"
DESCRIPTION = "test numpy array pickling"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.array = numpy.random.normal(16)
def _generate_tree(self):
return {"array": self.array}
def _load_tree(self, tree):
self.array = tree["array"]
class FakeIndex:
def __init__(self, index):
self.index = index
def get_path(name):
return os.path.join(os.path.dirname(__file__), name)
def generate_meta(name, version):
meta = generate_new_meta(name, "test", "source{d}", "Proprietary")
meta["version"] = version
return meta
UUID = "625557b5-4f2e-4ebb-bd6d-0a7083b1cf06"
PARENT_UUID = "bf0e7b04-a3ea-4b42-8274-a97f192fa15a"
SIZE = 110712 # do *not* use os.stat
class ModelTests(unittest.TestCase):
MODEL_PATH = "test.asdf"
cached_path = "/tmp/modelforge-test-cache"
default_url = "https://github.com/src-d/models"
templates_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates")
default_index = {
"models": {
"docfreq": {
UUID: {
"url": "https://xxx",
"created_at": "13:00",
"code": "model_code %s",
"description": "model_description"},
"1e3da42a-28b6-4b33-94a2-a5671f4102f4": {
"source": "https://xxx",
"license": "Proprietary",
"created_at": "13:00",
"code": "%s",
"description": ""
}}},
"meta": {
"docfreq": {
"code": "readme_code %s",
"description": "readme_description",
"default": UUID}}}
def setUp(self):
ind.git = fake_git
ind.Repo = fake_git.FakeRepo
fake_git.FakeRepo.reset(self.default_index)
self.backend = create_backend(
git_index=ind.GitIndex(remote=self.default_url, cache=self.cached_path))
def clear(self):
if os.path.exists(self.cached_path):
shutil.rmtree(os.path.expanduser(self.cached_path))
def tearDown(self):
self.clear()
from dulwich.repo import Repo
ind.Repo = Repo
from dulwich import porcelain as git
ind.git = git
def test_file(self):
model = GenericModel(source=get_path(self.MODEL_PATH))
self._validate_meta(model)
vendor = configuration.VENDOR
configuration.VENDOR = None
try:
model = GenericModel(source=get_path(self.MODEL_PATH))
self._validate_meta(model)
finally:
configuration.VENDOR = vendor
def test_error(self):
with self.assertRaises(ValueError):
GenericModel(source=UUID)
def test_id(self):
def route(url):
self.assertEqual("https://xxx", url)
with open(get_path(self.MODEL_PATH), "rb") as fin:
return fin.read()
storage_backend.requests = FakeRequests(route)
cleaned = False
def fake_rmtree(path):
nonlocal cleaned
cleaned = True
with patch("shutil.rmtree", fake_rmtree):
model = GenericModel(source=UUID, backend=self.backend)
self._validate_meta(model)
self.assertTrue(cleaned)
def test_url(self):
def route(url):
self.assertEqual("https://xxx", url)
with open(get_path(self.MODEL_PATH), "rb") as fin:
return fin.read()
storage_backend.requests = FakeRequests(route)
model = GenericModel(source="https://xxx", backend=self.backend)
self.assertEqual(model.source, "https://xxx")
self._validate_meta(model)
def test_auto(self):
class FakeModel(GenericModel):
NAME = "docfreq"
def route(url):
self.assertEqual("https://xxx", url)
with open(get_path(self.MODEL_PATH), "rb") as fin:
return fin.read()
storage_backend.requests = FakeRequests(route)
model = FakeModel(backend=self.backend)
self.assertEqual(model.source, "https://xxx")
self._validate_meta(model)
def test_bad_code(self):
def route(url):
self.assertEqual("https://bad_code", url)
return 404
storage_backend.requests = FakeRequests(route)
with self.assertRaises(ValueError):
GenericModel(source="https://bad_code", backend=self.backend)
def test_init_with_model(self):
model1 = FakeDocfreqModel().load(source=get_path(self.MODEL_PATH))
# init with correct model
FakeDocfreqModel(source=model1)
# init with wrong model
with self.assertRaises(TypeError):
Model1().load(source=model1)
def test_repr_str_empty(self):
model = FakeDocfreqModel()
self.assertIsInstance(str(model), str)
self.assertIsInstance(repr(model), str)
def test_repr_str(self):
self.maxDiff = None
path = get_path(self.MODEL_PATH)
model = FakeDocfreqModel().load(source=path)
repr1 = repr(model)
try:
self.assertIn("test_model.py].FakeDocfreqModel().load(source=\"%s\")" % path, repr1)
except AssertionError:
self.assertEqual("modelforge.tests.test_model.FakeDocfreqModel().load(source=\"%s\")"
% path, repr1)
str1 = str(model)
self.assertEqual(len(str1.split("\n")), 14)
self.assertIn("'%s'" % FakeDocfreqModel.NAME, str1)
self.assertIn("'uuid': '%s'" % UUID, str1)
model = FakeDocfreqModel().load(source=path)
str2 = str(model)
self.assertEqual(len(str2.split("\n")), 14)
model = FakeDocfreqModel().load(source=path)
self.assertEqual(model.description, "test description")
self.assertNotEqual(model.description, FakeDocfreqModel.DESCRIPTION)
repr2 = repr(model)
self.assertEqual("[%s].FakeDocfreqModel().load(source=\"%s\")"
% (os.path.realpath(__file__), path), repr2)
def test_repr_main(self):
path = get_path(self.MODEL_PATH)
model = FakeDocfreqModel().load(source=path)
module = inspect.getmodule(model)
module.__name__ = "__main__"
module.__spec__ = None
module_file = module.__file__
del module.__file__
try:
repr2 = repr(model)
finally:
module.__file__ = module_file
self.assertEqual("[unknown].FakeDocfreqModel().load(source=\"%s\")" % path, repr2)
def test_get_dep(self):
model = FakeDocfreqModel().load(source=get_path(self.MODEL_PATH))
model.meta["dependencies"] = [{"model": "xxx", "uuid": "yyy"},
{"model": "zzz", "uuid": None}]
self.assertEqual(model.get_dep("xxx")["uuid"], "yyy")
def _validate_meta(self, model):
self.assertEqual(model.size, SIZE)
meta = model.meta
self.assertIsInstance(meta, dict)
valid_meta = {
"created_at": datetime.datetime(2017, 6, 19, 9, 59, 14, 766638),
"dependencies": [],
"model": "docfreq",
"parent": PARENT_UUID,
"license": "MIT",
"uuid": UUID,
"version": [1, 0, 1]
}
for key, val in valid_meta.items():
self.assertEqual(meta[key], val, key)
def test_uninitialized_dump(self):
text = str(Model4())
try:
self.assertIn("test_model.py].Model4().load(source=None)", text)
except AssertionError:
self.assertEqual("modelforge.tests.test_model.Model4().load(source=None)", text)
def test_name_check(self):
Model5().load(source=get_path(self.MODEL_PATH))
Model6().load(source=get_path(self.MODEL_PATH))
with self.assertRaises(ValueError):
Model7().load(source=get_path(self.MODEL_PATH))
def test_derive(self):
path = get_path(self.MODEL_PATH)
model = FakeDocfreqModel().load(source=path)
self.assertEqual(model._initial_version, [1, 0, 1])
mid = model.uuid
model.derive()
self.assertEqual(model._initial_version, [1, 0, 1])
self.assertEqual(model.version, [1, 0, 2])
self.assertEqual(model.parent, mid)
model.derive((2, 0, 0))
self.assertEqual(model.version, [2, 0, 0])
self.assertEqual(model.parent, mid)
with self.assertRaises(ValueError):
model.derive("1.2.3")
def test_derive_init(self):
model = Model8()
with BytesIO() as f:
model.save(f, "series")
self.assertEqual(model.version, [1, 0, 0])
def test_derive_save(self):
model = FakeDocfreqModel().load(source=get_path(self.MODEL_PATH))
mid = model.uuid
model.derive()
self.assertEqual(model.version, [1, 0, 2])
with BytesIO() as f:
model.save(f)
self.assertEqual(model.version, [1, 0, 2])
self.assertEqual(model.parent, mid)
mid = model.uuid
model.derive()
self.assertEqual(model.version, [1, 0, 3])
self.assertEqual(model.parent, mid)
def test_set_dep(self):
model1 = Model1()
model2 = Model2()
model1.set_dep(model2)
self.assertIs(model1.get_dep("model2"), model2.meta)
def test_props(self):
path = get_path(self.MODEL_PATH)
model = FakeDocfreqModel().load(source=path)
for n in ("references", "datasets", "code"):
with self.assertRaises(KeyError):
getattr(model, n)
self.assertEqual(model.version, [1, 0, 1])
self.assertEqual(model.created_at, datetime.datetime(2017, 6, 19, 9, 59, 14, 766638))
def test_init_version(self):
self.assertEqual(Model1().version, [1, 0, 0])
def test_save(self):
with tempfile.NamedTemporaryFile(prefix="modelforge-test-") as f:
m = Model8()
m.series = "series"
m.save(f.name)
self.assertIsInstance(m.created_at, datetime.datetime)
self.assertEqual(m.source, f.name)
self.assertGreater(m.size, 1000)
self.assertLess(m.size, 2000)
m = Model8().load(f.name)
self.assertEqual(m.tree["abc"], 777)
self.assertEqual(m.source, f.name)
def test_save_no_impl(self):
with self.assertRaises(NotImplementedError):
Model4().save("model.asdf", "series")
with self.assertRaises(ValueError):
Model4().save("model.asdf")
def test_save_create_missing_dirs(self):
with tempfile.TemporaryDirectory(prefix="modelforge-test-") as savedir:
savepath = os.path.join(savedir, "add/some/subdirs/", "model.asdf")
with self.assertRaises(FileNotFoundError):
m = Model8().save(savepath, "series", create_missing_dirs=False)
self.assertEqual(m.source, savepath)
Model8().save(savepath, "series")
self.assertEqual(Model8().load(savepath).tree["abc"], 777)
def test_load_no_args(self):
shutil.rmtree(configuration.vendor_cache_dir(), ignore_errors=True)
self.assertRaises(ValueError, FakeDocfreqModel().load)
class SerializationTests(unittest.TestCase):
DOCFREQ_PATH = "test.asdf"
def test_empty_split_save_load_merge(self):
strings = []
merged = merge_strings(strings)
assert_array_equal(merged["strings"], numpy.array([], dtype="S1"))
assert_array_equal(merged["lengths"], numpy.array([], dtype=int))
self.assertIsNone(merged["str"])
af = asdf.AsdfFile(merged)
buffer = BytesIO()
af.write_to(buffer)
buffer.seek(0)
af_loaded = asdf.open(buffer)
strings_restored = split_strings(af_loaded.tree)
self.assertEqual(strings, strings_restored)
def test_merge_strings(self):
strings = ["a", "bc", "def"]
merged = merge_strings(strings)
self.assertIsInstance(merged, dict)
self.assertIn("strings", merged)
self.assertIn("lengths", merged)
self.assertIsInstance(merged["strings"], numpy.ndarray)
self.assertEqual(merged["strings"].shape, (1,))
self.assertEqual(merged["strings"][0], b"abcdef")
self.assertIsInstance(merged["lengths"], numpy.ndarray)
self.assertEqual(merged["lengths"].shape, (3,))
self.assertEqual(merged["lengths"][0], 1)
self.assertEqual(merged["lengths"][1], 2)
self.assertEqual(merged["lengths"][2], 3)
def test_split_strings(self):
strings = split_strings({
"strings": numpy.array([b"abcdef"]),
"lengths": numpy.array([1, 2, 3])
})
self.assertEqual(strings, ["a", "bc", "def"])
def test_invalid_merge_strings(self):
with self.assertRaises(TypeError):
merge_strings("abcd")
with self.assertRaises(TypeError):
merge_strings([0, 1, 2, 3])
def test_merge_bytes(self):
strings = [b"a", b"bc", b"def"]
merged = merge_strings(strings)
self.assertIsInstance(merged, dict)
self.assertIn("strings", merged)
self.assertIn("lengths", merged)
self.assertEqual(merged["str"], False)
self.assertIsInstance(merged["strings"], numpy.ndarray)
self.assertEqual(merged["strings"].shape, (1,))
self.assertEqual(merged["strings"][0], b"abcdef")
self.assertIsInstance(merged["lengths"], numpy.ndarray)
self.assertEqual(merged["lengths"].shape, (3,))
self.assertEqual(merged["lengths"][0], 1)
self.assertEqual(merged["lengths"][1], 2)
self.assertEqual(merged["lengths"][2], 3)
def test_split_bytes(self):
strings = split_strings({
"strings": numpy.array([b"abcdef"]),
"lengths": numpy.array([1, 2, 3]),
"str": False
})
self.assertEqual(strings, [b"a", b"bc", b"def"])
def test_disassemble_sparse_matrix(self):
arr = numpy.zeros((10, 10), dtype=numpy.float32)
numpy.random.seed(0)
arr[numpy.random.randint(0, 10, (50, 2))] = 1
mat = csr_matrix(arr)
dis = disassemble_sparse_matrix(mat)
self.assertIsInstance(dis, dict)
self.assertIn("shape", dis)
self.assertIn("format", dis)
self.assertIn("data", dis)
self.assertEqual(dis["shape"], arr.shape)
self.assertEqual(dis["format"], "csr")
self.assertIsInstance(dis["data"], (tuple, list))
self.assertEqual(len(dis["data"]), 3)
self.assertTrue((dis["data"][0] == mat.data).all())
self.assertTrue((dis["data"][1] == mat.indices).all())
self.assertTrue((dis["data"][2] == [0] + list(numpy.diff(mat.indptr))).all())
self.assertEqual(dis["data"][2].dtype, numpy.uint8)
def test_assemble_sparse_matrix(self):
tree = {
"shape": (3, 10),
"format": "csr",
"data": [numpy.arange(1, 8),
numpy.array([0, 4, 1, 5, 2, 3, 8]),
numpy.array([0, 2, 4, 7])]
}
mat = assemble_sparse_matrix(tree)
self.assertIsInstance(mat, csr_matrix)
self.assertTrue((mat.data == tree["data"][0]).all())
self.assertTrue((mat.indices == tree["data"][1]).all())
self.assertTrue((mat.indptr == tree["data"][2]).all())
self.assertEqual(mat.shape, (3, 10))
self.assertEqual(mat.dtype, numpy.int)
tree = {
"shape": (3, 10),
"format": "csr",
"data": [numpy.arange(1, 8),
numpy.array([0, 4, 1, 5, 2, 3, 8]),
numpy.array([0, 2, 2, 3])]
}
mat = assemble_sparse_matrix(tree)
self.assertIsInstance(mat, csr_matrix)
self.assertTrue((mat.data == tree["data"][0]).all())
self.assertTrue((mat.indices == tree["data"][1]).all())
self.assertTrue((mat.indptr == [0, 2, 4, 7]).all())
self.assertEqual(mat.shape, (3, 10))
self.assertEqual(mat.dtype, numpy.int)
def test_pickle(self):
docfreq = GenericModel(source=get_path(self.DOCFREQ_PATH))
res = pickle.dumps(docfreq)
docfreq_rec = pickle.loads(res)
for k in docfreq.__dict__:
if k != "tree":
self.assertEqual(getattr(docfreq, k), getattr(docfreq_rec, k), k)
def test_pickle_numpy(self):
arr = NumpyArray()
fobj = BytesIO()
arr.save(fobj, series="test")
fobj.seek(0)
arr = NumpyArray().load(fobj)
pickle.dumps(arr)
with tempfile.NamedTemporaryFile(prefix="modelforge-test-") as f:
arr.save(f.name)
arr = NumpyArray().load(f.name)
pickle.dumps(arr)
def test_write(self):
model = Model1()
model._meta = generate_meta("test", (1, 0, 3))
with tempfile.NamedTemporaryFile() as tmp:
model._write_tree({"xxx": 100500}, tmp.name)
with asdf.open(tmp.name) as f:
self.assertEqual(f.tree["meta"]["model"], "test")
self.assertEqual(f.tree["xxx"], 100500)
self.assertEqual(oct(os.stat(tmp.name).st_mode)[-3:], "666")
def test_write_fileobj(self):
model = Model1()
model._meta = generate_meta("test", (1, 0, 3))
buffer = BytesIO()
model._write_tree({"xxx": 100500}, buffer)
buffer.seek(0)
with asdf.open(buffer) as f:
self.assertEqual(f.tree["meta"]["model"], "test")
self.assertEqual(f.tree["xxx"], 100500)
def test_load_fileobj(self):
path = get_path(self.DOCFREQ_PATH)
buffer = BytesIO()
with open(path, "rb") as fin:
buffer.write(fin.read())
buffer.seek(0)
model = FakeDocfreqModel().load(source=buffer)
self.assertEqual(model.source, "<file object>")
self.assertEqual(model.size, SIZE)
self.assertEqual(model.created_at, datetime.datetime(2017, 6, 19, 9, 59, 14, 766638))
if __name__ == "__main__":
unittest.main()
|
[
"modelforge.meta.generate_new_meta",
"pickle.dumps",
"io.BytesIO",
"numpy.array",
"asdf.open",
"unittest.main",
"pickle.loads",
"unittest.mock.patch",
"numpy.arange",
"datetime.datetime",
"os.path.exists",
"modelforge.model.disassemble_sparse_matrix",
"modelforge.models.GenericModel",
"numpy.diff",
"numpy.random.seed",
"tempfile.NamedTemporaryFile",
"scipy.sparse.csr_matrix",
"os.path.expanduser",
"numpy.random.normal",
"modelforge.configuration.vendor_cache_dir",
"inspect.getmodule",
"os.path.dirname",
"modelforge.index.GitIndex",
"tempfile.TemporaryDirectory",
"asdf.AsdfFile",
"modelforge.tests.fake_dulwich.FakeRepo.reset",
"os.path.join",
"os.path.realpath",
"numpy.zeros",
"numpy.random.randint",
"modelforge.model.merge_strings",
"modelforge.tests.fake_requests.FakeRequests",
"os.stat",
"modelforge.model.assemble_sparse_matrix",
"modelforge.model.split_strings"
] |
[((2991, 3050), 'modelforge.meta.generate_new_meta', 'generate_new_meta', (['name', '"""test"""', '"""source{d}"""', '"""Proprietary"""'], {}), "(name, 'test', 'source{d}', 'Proprietary')\n", (3008, 3050), False, 'from modelforge.meta import generate_new_meta\n'), ((20727, 20742), 'unittest.main', 'unittest.main', ([], {}), '()\n', (20740, 20742), False, 'import unittest\n'), ((2628, 2651), 'numpy.random.normal', 'numpy.random.normal', (['(16)'], {}), '(16)\n', (2647, 2651), False, 'import numpy\n'), ((2911, 2936), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2926, 2936), False, 'import os\n'), ((4323, 4366), 'modelforge.tests.fake_dulwich.FakeRepo.reset', 'fake_git.FakeRepo.reset', (['self.default_index'], {}), '(self.default_index)\n', (4346, 4366), True, 'import modelforge.tests.fake_dulwich as fake_git\n'), ((4524, 4556), 'os.path.exists', 'os.path.exists', (['self.cached_path'], {}), '(self.cached_path)\n', (4538, 4556), False, 'import os\n'), ((5512, 5531), 'modelforge.tests.fake_requests.FakeRequests', 'FakeRequests', (['route'], {}), '(route)\n', (5524, 5531), False, 'from modelforge.tests.fake_requests import FakeRequests\n'), ((6062, 6081), 'modelforge.tests.fake_requests.FakeRequests', 'FakeRequests', (['route'], {}), '(route)\n', (6074, 6081), False, 'from modelforge.tests.fake_requests import FakeRequests\n'), ((6098, 6154), 'modelforge.models.GenericModel', 'GenericModel', ([], {'source': '"""https://xxx"""', 'backend': 'self.backend'}), "(source='https://xxx', backend=self.backend)\n", (6110, 6154), False, 'from modelforge.models import GenericModel, register_model\n'), ((6545, 6564), 'modelforge.tests.fake_requests.FakeRequests', 'FakeRequests', (['route'], {}), '(route)\n', (6557, 6564), False, 'from modelforge.tests.fake_requests import FakeRequests\n'), ((6869, 6888), 'modelforge.tests.fake_requests.FakeRequests', 'FakeRequests', (['route'], {}), '(route)\n', (6881, 6888), False, 'from modelforge.tests.fake_requests import FakeRequests\n'), ((8763, 8787), 'inspect.getmodule', 'inspect.getmodule', (['model'], {}), '(model)\n', (8780, 8787), False, 'import inspect\n'), ((14009, 14031), 'modelforge.model.merge_strings', 'merge_strings', (['strings'], {}), '(strings)\n', (14022, 14031), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((14235, 14256), 'asdf.AsdfFile', 'asdf.AsdfFile', (['merged'], {}), '(merged)\n', (14248, 14256), False, 'import asdf\n'), ((14274, 14283), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (14281, 14283), False, 'from io import BytesIO\n'), ((14355, 14372), 'asdf.open', 'asdf.open', (['buffer'], {}), '(buffer)\n', (14364, 14372), False, 'import asdf\n'), ((14400, 14429), 'modelforge.model.split_strings', 'split_strings', (['af_loaded.tree'], {}), '(af_loaded.tree)\n', (14413, 14429), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((14571, 14593), 'modelforge.model.merge_strings', 'merge_strings', (['strings'], {}), '(strings)\n', (14584, 14593), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((15690, 15712), 'modelforge.model.merge_strings', 'merge_strings', (['strings'], {}), '(strings)\n', (15703, 15712), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((16651, 16693), 'numpy.zeros', 'numpy.zeros', (['(10, 10)'], {'dtype': 'numpy.float32'}), '((10, 10), dtype=numpy.float32)\n', (16662, 16693), False, 'import numpy\n'), ((16702, 16722), 'numpy.random.seed', 'numpy.random.seed', (['(0)'], {}), '(0)\n', (16719, 16722), False, 'import numpy\n'), ((16791, 16806), 'scipy.sparse.csr_matrix', 'csr_matrix', (['arr'], {}), '(arr)\n', (16801, 16806), False, 'from scipy.sparse import csr_matrix\n'), ((16821, 16851), 'modelforge.model.disassemble_sparse_matrix', 'disassemble_sparse_matrix', (['mat'], {}), '(mat)\n', (16846, 16851), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((17761, 17789), 'modelforge.model.assemble_sparse_matrix', 'assemble_sparse_matrix', (['tree'], {}), '(tree)\n', (17783, 17789), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((18364, 18392), 'modelforge.model.assemble_sparse_matrix', 'assemble_sparse_matrix', (['tree'], {}), '(tree)\n', (18386, 18392), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((18826, 18847), 'pickle.dumps', 'pickle.dumps', (['docfreq'], {}), '(docfreq)\n', (18838, 18847), False, 'import pickle\n'), ((18870, 18887), 'pickle.loads', 'pickle.loads', (['res'], {}), '(res)\n', (18882, 18887), False, 'import pickle\n'), ((19110, 19119), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (19117, 19119), False, 'from io import BytesIO\n'), ((19225, 19242), 'pickle.dumps', 'pickle.dumps', (['arr'], {}), '(arr)\n', (19237, 19242), False, 'import pickle\n'), ((20009, 20018), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (20016, 20018), False, 'from io import BytesIO\n'), ((20338, 20347), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (20345, 20347), False, 'from io import BytesIO\n'), ((3452, 3477), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3467, 3477), False, 'import os\n'), ((5256, 5281), 'modelforge.models.GenericModel', 'GenericModel', ([], {'source': 'UUID'}), '(source=UUID)\n', (5268, 5281), False, 'from modelforge.models import GenericModel, register_model\n'), ((5658, 5693), 'unittest.mock.patch', 'patch', (['"""shutil.rmtree"""', 'fake_rmtree'], {}), "('shutil.rmtree', fake_rmtree)\n", (5663, 5693), False, 'from unittest.mock import patch\n'), ((5715, 5762), 'modelforge.models.GenericModel', 'GenericModel', ([], {'source': 'UUID', 'backend': 'self.backend'}), '(source=UUID, backend=self.backend)\n', (5727, 5762), False, 'from modelforge.models import GenericModel, register_model\n'), ((6945, 7006), 'modelforge.models.GenericModel', 'GenericModel', ([], {'source': '"""https://bad_code"""', 'backend': 'self.backend'}), "(source='https://bad_code', backend=self.backend)\n", (6957, 7006), False, 'from modelforge.models import GenericModel, register_model\n'), ((9621, 9670), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(6)', '(19)', '(9)', '(59)', '(14)', '(766638)'], {}), '(2017, 6, 19, 9, 59, 14, 766638)\n', (9638, 9670), False, 'import datetime\n'), ((11156, 11165), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (11163, 11165), False, 'from io import BytesIO\n'), ((11478, 11487), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (11485, 11487), False, 'from io import BytesIO\n'), ((12279, 12328), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(6)', '(19)', '(9)', '(59)', '(14)', '(766638)'], {}), '(2017, 6, 19, 9, 59, 14, 766638)\n', (12296, 12328), False, 'import datetime\n'), ((12457, 12511), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'prefix': '"""modelforge-test-"""'}), "(prefix='modelforge-test-')\n", (12484, 12511), False, 'import tempfile\n'), ((13218, 13272), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {'prefix': '"""modelforge-test-"""'}), "(prefix='modelforge-test-')\n", (13245, 13272), False, 'import tempfile\n'), ((13308, 13364), 'os.path.join', 'os.path.join', (['savedir', '"""add/some/subdirs/"""', '"""model.asdf"""'], {}), "(savedir, 'add/some/subdirs/', 'model.asdf')\n", (13320, 13364), False, 'import os\n'), ((13727, 13759), 'modelforge.configuration.vendor_cache_dir', 'configuration.vendor_cache_dir', ([], {}), '()\n', (13757, 13759), False, 'from modelforge import configuration, storage_backend\n'), ((14078, 14105), 'numpy.array', 'numpy.array', (['[]'], {'dtype': '"""S1"""'}), "([], dtype='S1')\n", (14089, 14105), False, 'import numpy\n'), ((14153, 14179), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (14164, 14179), False, 'import numpy\n'), ((15495, 15516), 'modelforge.model.merge_strings', 'merge_strings', (['"""abcd"""'], {}), "('abcd')\n", (15508, 15516), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((15572, 15599), 'modelforge.model.merge_strings', 'merge_strings', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (15585, 15599), False, 'from modelforge.model import assemble_sparse_matrix, disassemble_sparse_matrix, merge_strings, Model, split_strings\n'), ((16735, 16771), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(10)', '(50, 2)'], {}), '(0, 10, (50, 2))\n', (16755, 16771), False, 'import numpy\n'), ((19256, 19310), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'prefix': '"""modelforge-test-"""'}), "(prefix='modelforge-test-')\n", (19283, 19310), False, 'import tempfile\n'), ((19402, 19419), 'pickle.dumps', 'pickle.dumps', (['arr'], {}), '(arr)\n', (19414, 19419), False, 'import pickle\n'), ((19540, 19569), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (19567, 19569), False, 'import tempfile\n'), ((20106, 20123), 'asdf.open', 'asdf.open', (['buffer'], {}), '(buffer)\n', (20115, 20123), False, 'import asdf\n'), ((20643, 20692), 'datetime.datetime', 'datetime.datetime', (['(2017)', '(6)', '(19)', '(9)', '(59)', '(14)', '(766638)'], {}), '(2017, 6, 19, 9, 59, 14, 766638)\n', (20660, 20692), False, 'import datetime\n'), ((4428, 4489), 'modelforge.index.GitIndex', 'ind.GitIndex', ([], {'remote': 'self.default_url', 'cache': 'self.cached_path'}), '(remote=self.default_url, cache=self.cached_path)\n', (4440, 4489), True, 'import modelforge.index as ind\n'), ((4584, 4620), 'os.path.expanduser', 'os.path.expanduser', (['self.cached_path'], {}), '(self.cached_path)\n', (4602, 4620), False, 'import os\n'), ((15260, 15284), 'numpy.array', 'numpy.array', (["[b'abcdef']"], {}), "([b'abcdef'])\n", (15271, 15284), False, 'import numpy\n'), ((15309, 15331), 'numpy.array', 'numpy.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (15320, 15331), False, 'import numpy\n'), ((16424, 16448), 'numpy.array', 'numpy.array', (["[b'abcdef']"], {}), "([b'abcdef'])\n", (16435, 16448), False, 'import numpy\n'), ((16473, 16495), 'numpy.array', 'numpy.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (16484, 16495), False, 'import numpy\n'), ((17612, 17630), 'numpy.arange', 'numpy.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (17624, 17630), False, 'import numpy\n'), ((17653, 17687), 'numpy.array', 'numpy.array', (['[0, 4, 1, 5, 2, 3, 8]'], {}), '([0, 4, 1, 5, 2, 3, 8])\n', (17664, 17687), False, 'import numpy\n'), ((17710, 17735), 'numpy.array', 'numpy.array', (['[0, 2, 4, 7]'], {}), '([0, 2, 4, 7])\n', (17721, 17735), False, 'import numpy\n'), ((18215, 18233), 'numpy.arange', 'numpy.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (18227, 18233), False, 'import numpy\n'), ((18256, 18290), 'numpy.array', 'numpy.array', (['[0, 4, 1, 5, 2, 3, 8]'], {}), '([0, 4, 1, 5, 2, 3, 8])\n', (18267, 18290), False, 'import numpy\n'), ((18313, 18338), 'numpy.array', 'numpy.array', (['[0, 2, 2, 3]'], {}), '([0, 2, 2, 3])\n', (18324, 18338), False, 'import numpy\n'), ((19652, 19671), 'asdf.open', 'asdf.open', (['tmp.name'], {}), '(tmp.name)\n', (19661, 19671), False, 'import asdf\n'), ((8579, 8605), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (8595, 8605), False, 'import os\n'), ((17379, 17401), 'numpy.diff', 'numpy.diff', (['mat.indptr'], {}), '(mat.indptr)\n', (17389, 17401), False, 'import numpy\n'), ((19837, 19854), 'os.stat', 'os.stat', (['tmp.name'], {}), '(tmp.name)\n', (19844, 19854), False, 'import os\n')]
|
import typing
import cv2
import numpy as np
from numpy.lib.polynomial import poly
import streamlit as st
import plotly.express as px
import plotly.graph_objects as go
from utils.configs import IMAGES, DetectionConfig, RunningModes
from utils.configs import default_config
from utils.configs import birds_config
from utils.configs import birds2_config
from utils.configs import keyboard_config
from utils.configs import dots_config
@st.experimental_singleton
def load_image(path: str):
'''Loads input image from path using opencv.'''
return cv2.imread(path)
def convert_color(img: np.array):
'''Sets the correct colors for matplotlib.'''
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def convert_gray(img: np.array):
'''Preprocess image and returns gray version of it.'''
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def find_contours(img: np.array,
thresh_val: int=150,
thresh_maxval: int=255,
thresh_type: int=1,
contour_mode: int=1,
contour_method: int=2) -> np.array:
'''Detect contours and return results.'''
img = convert_gray(img)
_, thresh = cv2.threshold(img,
thresh_val,
thresh_maxval,
thresh_type)
contours, _ = cv2.findContours(thresh, contour_mode, contour_method)
return contours
def filter_contours(contours: typing.Tuple,
min_contour_length: int=-9999,
max_contour_length: int=9999) -> np.array:
'''Filter contours by given criteria.'''
return np.array([
contour for contour in contours
if len(contour) >= min_contour_length and len(contour) <= max_contour_length
], dtype=object)
def draw_contours(img: np.array,
contours: np.array,
contour_index: int=-1,
contour_color: typing.Tuple=(255, 0, 0, 1),
contour_thickness: int=0) -> typing.Tuple[int, typing.Tuple]:
'''Add contours if they fullfill the given criteria.'''
n_contours = 0
for contour in contours:
cv2.drawContours(img,
[contour],
contour_index,
contour_color,
contour_thickness)
n_contours += 1
return n_contours, img
def format_uploaded_image(uploaded_file: typing.Any) -> np.array:
'''Tries to format uploaded file into an image.'''
if uploaded_file is not None:
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
img = cv2.imdecode(file_bytes, 1)
return img
def polygon_sizes(contours: list) -> list[float]:
sizes = []
for contour in contours:
xs = np.array(contour, dtype=object)[:,0,0]
ys = np.array(contour, dtype=object)[:,0,1]
sizes.append(calculate_ploygon_size(xs, ys))
return np.array(sizes)
def calculate_ploygon_size(xs: np.array, ys: np.array):
return 0.5*np.abs(np.dot(xs,np.roll(ys,1))-np.dot(ys,np.roll(xs,1)))
def create_sidebar() -> typing.Tuple[np.array, DetectionConfig]:
'''Create sidebar widgets, apply pre-defined configs and return adjusted ones.'''
st.sidebar.header('Select Options')
available_modes = [f'{mode.name} ({mode.value})' for mode in RunningModes]
mode = st.sidebar.radio('Running Mode', available_modes)
mode = mode.split(' ')[0]
st.sidebar.markdown('---')
initial_config = default_config
new_config = DetectionConfig()
if mode == RunningModes.UPLOAD.name:
uploaded_file = st.sidebar.file_uploader('Upload Image', ['png', 'jpg', 'jpeg'], )
img = format_uploaded_image(uploaded_file)
# No image uploaded yet
if img is None:
return None, None
elif mode == RunningModes.EXAMPLES.name:
img_name = st.sidebar.selectbox('Input Image',
list(choice.name for choice in IMAGES))
if img_name == IMAGES.BIRDS.name:
initial_config = birds_config
elif img_name == IMAGES.DOTS.name:
initial_config = dots_config
elif img_name == IMAGES.BIRDS2.name:
initial_config = birds2_config
elif img_name == IMAGES.KEYBOARD.name:
initial_config = keyboard_config
img = load_image(getattr(IMAGES, img_name).value).copy()
img = convert_color(img)
new_config.thresh_val = st.sidebar.slider('Threshold Value',
0,
255,
initial_config.thresh_val,
help='Threshold Value, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57).')
new_config.thresh_type = st.sidebar.slider('Threshold Type',
0,
5,
initial_config.thresh_type,
help='Threshold Operation Type, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gaa9e58d2860d4afa658ef70a9b1115576).')
new_config.thresh_maxval = st.sidebar.slider('Thresh Max Val',
0,
255,
initial_config.thresh_maxval,
help='Maximum Value for Threshold Types 1 and 2, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57).')
new_config.contour_mode = st.sidebar.slider('Contour Mode',
0,
3,
initial_config.contour_mode,
help='Contour Retrieval Mode, see [here](https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga819779b9857cc2f8601e6526a3a5bc71).')
new_config.contour_method = st.sidebar.slider('Contour Method',
1,
4,
initial_config.contour_method,
help='Contour Approximation Method, see [here](https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga4303f45752694956374734a03c54d5ff).')
min_contour_length, max_contour_length = st.sidebar.slider(
'Min/Max Contour Length',
min_value=0,
max_value=100,
value=(initial_config.min_contour_length, initial_config.max_contour_length),
help='Number of Vertices for Contour Graph.')
new_config.min_contour_length = min_contour_length
new_config.max_contour_length = max_contour_length
return img, new_config
def plot_histogram(data: np.array) -> go.Figure:
'''Plot histogram of given data.'''
fig = px.histogram(x=data)
fig.update_layout(
xaxis_title='Polygon Areas',
yaxis_title='Polygon Sizes'
)
return fig
def main():
st.set_page_config(page_title='Object Counter',
page_icon='🔢')
st.markdown('''
# Object Counter App
This app helps you to count similar shaped objects on a given image. To improve
the object detection performance, you can adjust the parameters on the left hand side.
Look at the example to get a feeling for that. Afterwards, upload an image and try it
yourself. Good luck 🍀!
''')
img, config = create_sidebar()
if img is None:
return
contours = find_contours(img,
thresh_val=config.thresh_val,
thresh_maxval=config.thresh_maxval,
thresh_type=config.thresh_type,
contour_mode=config.contour_mode,
contour_method=config.contour_method)
contours_filtered = filter_contours(contours,
min_contour_length=config.min_contour_length,
max_contour_length=config.max_contour_length)
if len(contours_filtered) < 1:
st.warning('No contours found for current selection. Try to loosen it a bit!')
return
# refilter contours by polygon size
polygons = polygon_sizes(contours_filtered)
min_area, max_area = st.sidebar.slider('Polygon Size',
value=(int(min(polygons)), int(max(polygons))+1),
min_value=int(min(polygons)),
max_value=int(max(polygons))+1)
contour_indices = np.argwhere((polygons >= min_area) & (polygons <= max_area))
contours_refiltered = []
polygons_filtered = []
for i, (polygon, contour) in enumerate(zip(polygons, contours_filtered)):
if i in contour_indices:
contours_refiltered.append(contour)
polygons_filtered.append(polygon)
n_objects, img = draw_contours(img,
contours_refiltered)
n_objects, img_contours = draw_contours(np.zeros(shape=img.shape) + 255,
contours_refiltered)
st.subheader(f'Objects found: {n_objects}')
fig = plot_histogram(polygons_filtered)
st.image(img,
caption='Original image with contours',
use_column_width=True)
st.sidebar.markdown('---')
st.sidebar.subheader('Advanced Options')
if st.sidebar.checkbox('Show Contours Only Image'):
st.image(img_contours,
caption='Contours',
clamp=True,
use_column_width=True)
if st.sidebar.checkbox('Show Polygon Size Distribution'):
st.markdown('If the objects are equal in size, the polygon areas '
'should follow a normal distribution. Modify the "Polygon Size" '
'attribute to filter out outliers.')
st.plotly_chart(fig, use_container_width=True)
if __name__=='__main__':
try:
main()
except Exception as e:
st.error(f"Something went wrong, sorry 😐. Please reload the page and try again.\n\n{str(e)}")
|
[
"streamlit.image",
"numpy.array",
"cv2.imdecode",
"utils.configs.DetectionConfig",
"cv2.threshold",
"streamlit.warning",
"streamlit.sidebar.header",
"streamlit.sidebar.checkbox",
"streamlit.sidebar.markdown",
"streamlit.sidebar.slider",
"streamlit.set_page_config",
"streamlit.markdown",
"cv2.drawContours",
"plotly.express.histogram",
"streamlit.sidebar.subheader",
"streamlit.subheader",
"cv2.cvtColor",
"streamlit.plotly_chart",
"cv2.imread",
"numpy.roll",
"numpy.zeros",
"numpy.argwhere",
"streamlit.sidebar.file_uploader",
"cv2.findContours",
"streamlit.sidebar.radio"
] |
[((551, 567), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (561, 567), False, 'import cv2\n'), ((663, 699), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (675, 699), False, 'import cv2\n'), ((819, 856), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (831, 856), False, 'import cv2\n'), ((1209, 1267), 'cv2.threshold', 'cv2.threshold', (['img', 'thresh_val', 'thresh_maxval', 'thresh_type'], {}), '(img, thresh_val, thresh_maxval, thresh_type)\n', (1222, 1267), False, 'import cv2\n'), ((1376, 1430), 'cv2.findContours', 'cv2.findContours', (['thresh', 'contour_mode', 'contour_method'], {}), '(thresh, contour_mode, contour_method)\n', (1392, 1430), False, 'import cv2\n'), ((2992, 3007), 'numpy.array', 'np.array', (['sizes'], {}), '(sizes)\n', (3000, 3007), True, 'import numpy as np\n'), ((3294, 3329), 'streamlit.sidebar.header', 'st.sidebar.header', (['"""Select Options"""'], {}), "('Select Options')\n", (3311, 3329), True, 'import streamlit as st\n'), ((3421, 3470), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Running Mode"""', 'available_modes'], {}), "('Running Mode', available_modes)\n", (3437, 3470), True, 'import streamlit as st\n'), ((3506, 3532), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""---"""'], {}), "('---')\n", (3525, 3532), True, 'import streamlit as st\n'), ((3587, 3604), 'utils.configs.DetectionConfig', 'DetectionConfig', ([], {}), '()\n', (3602, 3604), False, 'from utils.configs import IMAGES, DetectionConfig, RunningModes\n'), ((4533, 4752), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Threshold Value"""', '(0)', '(255)', 'initial_config.thresh_val'], {'help': '"""Threshold Value, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57)."""'}), "('Threshold Value', 0, 255, initial_config.thresh_val,\n help=\n 'Threshold Value, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57).'\n )\n", (4550, 4752), True, 'import streamlit as st\n'), ((4952, 5174), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Threshold Type"""', '(0)', '(5)', 'initial_config.thresh_type'], {'help': '"""Threshold Operation Type, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gaa9e58d2860d4afa658ef70a9b1115576)."""'}), "('Threshold Type', 0, 5, initial_config.thresh_type, help=\n 'Threshold Operation Type, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gaa9e58d2860d4afa658ef70a9b1115576).'\n )\n", (4969, 5174), True, 'import streamlit as st\n'), ((5384, 5631), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Thresh Max Val"""', '(0)', '(255)', 'initial_config.thresh_maxval'], {'help': '"""Maximum Value for Threshold Types 1 and 2, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57)."""'}), "('Thresh Max Val', 0, 255, initial_config.thresh_maxval,\n help=\n 'Maximum Value for Threshold Types 1 and 2, see [here](https://docs.opencv.org/3.4/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57).'\n )\n", (5401, 5631), True, 'import streamlit as st\n'), ((5844, 6064), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Contour Mode"""', '(0)', '(3)', 'initial_config.contour_mode'], {'help': '"""Contour Retrieval Mode, see [here](https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga819779b9857cc2f8601e6526a3a5bc71)."""'}), "('Contour Mode', 0, 3, initial_config.contour_mode, help=\n 'Contour Retrieval Mode, see [here](https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga819779b9857cc2f8601e6526a3a5bc71).'\n )\n", (5861, 6064), True, 'import streamlit as st\n'), ((6279, 6513), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Contour Method"""', '(1)', '(4)', 'initial_config.contour_method'], {'help': '"""Contour Approximation Method, see [here](https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga4303f45752694956374734a03c54d5ff)."""'}), "('Contour Method', 1, 4, initial_config.contour_method,\n help=\n 'Contour Approximation Method, see [here](https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html#ga4303f45752694956374734a03c54d5ff).'\n )\n", (6296, 6513), True, 'import streamlit as st\n'), ((6746, 6950), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Min/Max Contour Length"""'], {'min_value': '(0)', 'max_value': '(100)', 'value': '(initial_config.min_contour_length, initial_config.max_contour_length)', 'help': '"""Number of Vertices for Contour Graph."""'}), "('Min/Max Contour Length', min_value=0, max_value=100,\n value=(initial_config.min_contour_length, initial_config.\n max_contour_length), help='Number of Vertices for Contour Graph.')\n", (6763, 6950), True, 'import streamlit as st\n'), ((7222, 7242), 'plotly.express.histogram', 'px.histogram', ([], {'x': 'data'}), '(x=data)\n', (7234, 7242), True, 'import plotly.express as px\n'), ((7377, 7439), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Object Counter"""', 'page_icon': '"""🔢"""'}), "(page_title='Object Counter', page_icon='🔢')\n", (7395, 7439), True, 'import streamlit as st\n'), ((7467, 7833), 'streamlit.markdown', 'st.markdown', (['"""\n # Object Counter App\n This app helps you to count similar shaped objects on a given image. To improve\n the object detection performance, you can adjust the parameters on the left hand side.\n Look at the example to get a feeling for that. Afterwards, upload an image and try it\n yourself. Good luck 🍀!\n """'], {}), '(\n """\n # Object Counter App\n This app helps you to count similar shaped objects on a given image. To improve\n the object detection performance, you can adjust the parameters on the left hand side.\n Look at the example to get a feeling for that. Afterwards, upload an image and try it\n yourself. Good luck 🍀!\n """\n )\n', (7478, 7833), True, 'import streamlit as st\n'), ((9017, 9077), 'numpy.argwhere', 'np.argwhere', (['((polygons >= min_area) & (polygons <= max_area))'], {}), '((polygons >= min_area) & (polygons <= max_area))\n', (9028, 9077), True, 'import numpy as np\n'), ((9584, 9627), 'streamlit.subheader', 'st.subheader', (['f"""Objects found: {n_objects}"""'], {}), "(f'Objects found: {n_objects}')\n", (9596, 9627), True, 'import streamlit as st\n'), ((9678, 9754), 'streamlit.image', 'st.image', (['img'], {'caption': '"""Original image with contours"""', 'use_column_width': '(True)'}), "(img, caption='Original image with contours', use_column_width=True)\n", (9686, 9754), True, 'import streamlit as st\n'), ((9786, 9812), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""---"""'], {}), "('---')\n", (9805, 9812), True, 'import streamlit as st\n'), ((9817, 9857), 'streamlit.sidebar.subheader', 'st.sidebar.subheader', (['"""Advanced Options"""'], {}), "('Advanced Options')\n", (9837, 9857), True, 'import streamlit as st\n'), ((9865, 9912), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Show Contours Only Image"""'], {}), "('Show Contours Only Image')\n", (9884, 9912), True, 'import streamlit as st\n'), ((10067, 10120), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Show Polygon Size Distribution"""'], {}), "('Show Polygon Size Distribution')\n", (10086, 10120), True, 'import streamlit as st\n'), ((2196, 2281), 'cv2.drawContours', 'cv2.drawContours', (['img', '[contour]', 'contour_index', 'contour_color', 'contour_thickness'], {}), '(img, [contour], contour_index, contour_color,\n contour_thickness)\n', (2212, 2281), False, 'import cv2\n'), ((2681, 2708), 'cv2.imdecode', 'cv2.imdecode', (['file_bytes', '(1)'], {}), '(file_bytes, 1)\n', (2693, 2708), False, 'import cv2\n'), ((3671, 3735), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload Image"""', "['png', 'jpg', 'jpeg']"], {}), "('Upload Image', ['png', 'jpg', 'jpeg'])\n", (3695, 3735), True, 'import streamlit as st\n'), ((8512, 8590), 'streamlit.warning', 'st.warning', (['"""No contours found for current selection. Try to loosen it a bit!"""'], {}), "('No contours found for current selection. Try to loosen it a bit!')\n", (8522, 8590), True, 'import streamlit as st\n'), ((9922, 9999), 'streamlit.image', 'st.image', (['img_contours'], {'caption': '"""Contours"""', 'clamp': '(True)', 'use_column_width': '(True)'}), "(img_contours, caption='Contours', clamp=True, use_column_width=True)\n", (9930, 9999), True, 'import streamlit as st\n'), ((10130, 10303), 'streamlit.markdown', 'st.markdown', (['"""If the objects are equal in size, the polygon areas should follow a normal distribution. Modify the "Polygon Size" attribute to filter out outliers."""'], {}), '(\n \'If the objects are equal in size, the polygon areas should follow a normal distribution. Modify the "Polygon Size" attribute to filter out outliers.\'\n )\n', (10141, 10303), True, 'import streamlit as st\n'), ((10348, 10394), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (10363, 10394), True, 'import streamlit as st\n'), ((2837, 2868), 'numpy.array', 'np.array', (['contour'], {'dtype': 'object'}), '(contour, dtype=object)\n', (2845, 2868), True, 'import numpy as np\n'), ((2889, 2920), 'numpy.array', 'np.array', (['contour'], {'dtype': 'object'}), '(contour, dtype=object)\n', (2897, 2920), True, 'import numpy as np\n'), ((9481, 9506), 'numpy.zeros', 'np.zeros', ([], {'shape': 'img.shape'}), '(shape=img.shape)\n', (9489, 9506), True, 'import numpy as np\n'), ((3097, 3111), 'numpy.roll', 'np.roll', (['ys', '(1)'], {}), '(ys, 1)\n', (3104, 3111), True, 'import numpy as np\n'), ((3122, 3136), 'numpy.roll', 'np.roll', (['xs', '(1)'], {}), '(xs, 1)\n', (3129, 3136), True, 'import numpy as np\n')]
|
"""
Copyright (c) 2021, WSO2 Inc. (http://www.wso2.com). All Rights Reserved.
This software is the property of WSO2 Inc. and its suppliers, if any.
Dissemination of any information or reproduction of any material contained
herein is strictly forbidden, unless permitted by WSO2 in accordance with
the WSO2 Commercial License available at http://wso2.com/licenses.
For specific language governing the permissions and limitations under
this license, please see the license as well as any agreement you’ve
entered into with WSO2 governing the purchase of this software and any
"""
import math
import numpy as np
import pandas as pd
import csv
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
from sklearn.model_selection import KFold
import pymc3 as pm
from sklearn.metrics import mean_squared_error
def root_mean_squared_percentage_error(y_true, prediction):
"""
Calculate root mean squared percentage error of
predictions compared to y_values from dataset.
:param y_true: y_values (actual TPS) from Dataset
:param prediction: predicted TPS
:return rmspe: Root mean squared percentage error:
"""
y_true, y_pred = np.array(y_true), np.array(prediction)
EPSILON = 1e-10
rmspe = (np.sqrt(np.mean(np.square((y_true - y_pred) /
(y_true + EPSILON))))) * 100
return rmspe
# Define MAPE function
def mean_absolute_percentage_error(y_true, prediction):
"""
Calculate mean absolute percentage error of
predictions compared to y_values from dataset.
:param y_true: y_values (actual TPS) from Dataset
:param prediction: predicted TPS
:return rmspe: Mean Absolute Percentage error:
"""
y_true, y_pred = np.array(y_true), np.array(prediction)
mape = np.mean(np.abs((y_true - y_pred) / y_true)) * 100
return mape
class BayesianPolyRegression:
def fit(self, X, Y):
with pm.Model() as self.model:
lm = pm.Gamma("l", alpha=2, beta=1)
offset = 0.1
nu = pm.HalfCauchy("nu", beta=1)
d = 2
cov = nu ** 2 * pm.gp.cov.Polynomial(X.shape[1], lm, d, offset)
self.gp = pm.gp.Marginal(cov_func=cov)
sigma = pm.HalfCauchy("sigma", beta=1)
self.gp.marginal_likelihood("y", X=X, y=Y, noise=sigma)
self.map_trace = [pm.find_MAP()]
def predict(self, X, with_error=False):
with self.model:
f_pred = self.gp.conditional('f_pred', X)
pred_samples = pm.sample_posterior_predictive(
self.map_trace, vars=[f_pred],
samples=2000,
random_seed=42
)
y_pred, uncer = pred_samples['f_pred'].mean(axis=0), \
pred_samples['f_pred'].std(axis=0)
if with_error:
return y_pred, uncer / 1000
return y_pred
def get_fold_predictions(X, y, eval_X):
lr = BayesianPolyRegression()
lr.fit(X, y)
pred_y, error = lr.predict(eval_X, True)
return pred_y, error
def run_baysian_poly():
predict_label = 9 # 9 for TPS
# Read Data
dataset = pd.read_csv('dataset/dataset.csv')
# Ignore Errors
dataset = dataset.loc[dataset["Error %"] < 5]
# Define X and Y columns
X = dataset.iloc[:, [0, 2, 3]].values
Y = dataset.iloc[:, predict_label].values
# Encode 'Scenario Name'
le_X_0 = LabelEncoder()
X[:, 0] = le_X_0.fit_transform(X[:, 0])
# Create Scaler
scaler = MinMaxScaler(feature_range=(0, 1))
# Apply Scaler on X
scaler.fit(X)
X = scaler.transform(X)
# Convert Y to 1D Array - Not necessary
Y = Y.flatten()
# Shuffle Data
X, Y = shuffle(X, Y, random_state=42)
predictions = []
errorlist = []
y_actual = []
kf = KFold(n_splits=10)
for train_index, test_index in kf.split(X):
pred_bayes, error = get_fold_predictions(np.copy(X[train_index]),
np.copy(Y[train_index]),
np.copy(X[test_index]))
for item in pred_bayes:
predictions.append(item)
for item in error:
errorlist.append(item)
for item in Y[test_index]:
y_actual.append(item)
RMSPE = root_mean_squared_percentage_error(y_actual, predictions)
MAPE = mean_absolute_percentage_error(y_actual, predictions)
RMSE = math.sqrt(mean_squared_error(y_actual, predictions))
print(
"Scores for Baysian_Polynomial: \n",
"RMSE :", RMSE, "\n",
"MAPE: ", MAPE, "\n",
"RMSPE: ", RMSPE, "\n",
)
file_name = "results/" + "baysian_poly.csv"
with open(file_name, "a") as f:
writer = csv.writer(f)
writer.writerows(zip(y_actual, predictions))
# Run Evaluation
run_baysian_poly()
|
[
"numpy.abs",
"sklearn.preprocessing.LabelEncoder",
"numpy.copy",
"pymc3.find_MAP",
"pandas.read_csv",
"pymc3.gp.Marginal",
"pymc3.sample_posterior_predictive",
"sklearn.utils.shuffle",
"pymc3.gp.cov.Polynomial",
"csv.writer",
"sklearn.metrics.mean_squared_error",
"numpy.square",
"numpy.array",
"pymc3.Model",
"sklearn.model_selection.KFold",
"pymc3.HalfCauchy",
"sklearn.preprocessing.MinMaxScaler",
"pymc3.Gamma"
] |
[((3210, 3244), 'pandas.read_csv', 'pd.read_csv', (['"""dataset/dataset.csv"""'], {}), "('dataset/dataset.csv')\n", (3221, 3244), True, 'import pandas as pd\n'), ((3477, 3491), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3489, 3491), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3570, 3604), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (3582, 3604), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3772, 3802), 'sklearn.utils.shuffle', 'shuffle', (['X', 'Y'], {'random_state': '(42)'}), '(X, Y, random_state=42)\n', (3779, 3802), False, 'from sklearn.utils import shuffle\n'), ((3872, 3890), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (3877, 3890), False, 'from sklearn.model_selection import KFold\n'), ((1240, 1256), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (1248, 1256), True, 'import numpy as np\n'), ((1258, 1278), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (1266, 1278), True, 'import numpy as np\n'), ((1802, 1818), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (1810, 1818), True, 'import numpy as np\n'), ((1820, 1840), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (1828, 1840), True, 'import numpy as np\n'), ((1860, 1894), 'numpy.abs', 'np.abs', (['((y_true - y_pred) / y_true)'], {}), '((y_true - y_pred) / y_true)\n', (1866, 1894), True, 'import numpy as np\n'), ((1988, 1998), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (1996, 1998), True, 'import pymc3 as pm\n'), ((2031, 2061), 'pymc3.Gamma', 'pm.Gamma', (['"""l"""'], {'alpha': '(2)', 'beta': '(1)'}), "('l', alpha=2, beta=1)\n", (2039, 2061), True, 'import pymc3 as pm\n'), ((2104, 2131), 'pymc3.HalfCauchy', 'pm.HalfCauchy', (['"""nu"""'], {'beta': '(1)'}), "('nu', beta=1)\n", (2117, 2131), True, 'import pymc3 as pm\n'), ((2250, 2278), 'pymc3.gp.Marginal', 'pm.gp.Marginal', ([], {'cov_func': 'cov'}), '(cov_func=cov)\n', (2264, 2278), True, 'import pymc3 as pm\n'), ((2300, 2330), 'pymc3.HalfCauchy', 'pm.HalfCauchy', (['"""sigma"""'], {'beta': '(1)'}), "('sigma', beta=1)\n", (2313, 2330), True, 'import pymc3 as pm\n'), ((2596, 2691), 'pymc3.sample_posterior_predictive', 'pm.sample_posterior_predictive', (['self.map_trace'], {'vars': '[f_pred]', 'samples': '(2000)', 'random_seed': '(42)'}), '(self.map_trace, vars=[f_pred], samples=2000,\n random_seed=42)\n', (2626, 2691), True, 'import pymc3 as pm\n'), ((3989, 4012), 'numpy.copy', 'np.copy', (['X[train_index]'], {}), '(X[train_index])\n', (3996, 4012), True, 'import numpy as np\n'), ((4063, 4086), 'numpy.copy', 'np.copy', (['Y[train_index]'], {}), '(Y[train_index])\n', (4070, 4086), True, 'import numpy as np\n'), ((4137, 4159), 'numpy.copy', 'np.copy', (['X[test_index]'], {}), '(X[test_index])\n', (4144, 4159), True, 'import numpy as np\n'), ((4533, 4574), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_actual', 'predictions'], {}), '(y_actual, predictions)\n', (4551, 4574), False, 'from sklearn.metrics import mean_squared_error\n'), ((4869, 4882), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4879, 4882), False, 'import csv\n'), ((1328, 1377), 'numpy.square', 'np.square', (['((y_true - y_pred) / (y_true + EPSILON))'], {}), '((y_true - y_pred) / (y_true + EPSILON))\n', (1337, 1377), True, 'import numpy as np\n'), ((2179, 2226), 'pymc3.gp.cov.Polynomial', 'pm.gp.cov.Polynomial', (['X.shape[1]', 'lm', 'd', 'offset'], {}), '(X.shape[1], lm, d, offset)\n', (2199, 2226), True, 'import pymc3 as pm\n'), ((2430, 2443), 'pymc3.find_MAP', 'pm.find_MAP', ([], {}), '()\n', (2441, 2443), True, 'import pymc3 as pm\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.