code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import json
import numpy as np
import os
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization
from sklearn.model_selection import train_test_split
class SuggestionModeler(object):
"""
A collection of functions to generate a model of subreddit suggestions from the data retreived in
data_retrieval.py
"""
def __init__(self, force_retrain=False):
self.session = tf.Session()
self.graph = tf.get_default_graph()
with open("model_generation/config.json", "r") as infile:
self.config = json.loads(infile.read())
if os.path.exists("config_override.json"):
with open("model_generation/config_override.json", "r") as infile:
self.config.update(json.loads(infile.read()))
self.subreddit_to_rank = dict()
with open(self.config["rank_to_subreddit_path"], 'r') as infile:
self.rank_to_subreddit = json.loads(infile.read())
self.rank_to_subreddit = {int(k): v for k, v in self.rank_to_subreddit.items()}
for rank, subreddit in self.rank_to_subreddit.items():
self.subreddit_to_rank[subreddit] = rank
with open(self.config['rank_to_sfw_status'], 'r') as infile:
self.rank_to_sfw_status = json.loads(infile.read())
self.rank_to_sfw_status = {int(k): v for k, v in self.rank_to_sfw_status.items()}
self.method = self.config["method"]
self.model_path = self.config['model_path'].format(method=self.method)
if self.method == "hot":
model = Sequential()
model.add(Dense(512, activation='relu',
input_shape=(self.config['max_subreddits_in_model'], )))
model.add(Dropout(0.5))
model.add(Dense(self.config['max_subreddits_in_model'], activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['acc'])
else:
raise ValueError("'method' in config not well defined")
self.model = model
if force_retrain or not os.path.exists(self.model_path):
model.summary()
print("Preparing train/test data...")
X, y = self.arrange_training_data(method=self.method)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.config['test_pct'])
train_data, test_data = (X_train, y_train), (X_test, y_test)
print("Starting training process...")
self.train_model(train_data, test_data)
with self.graph.as_default():
with self.session.as_default():
self.model.load_weights(self.model_path)
def arrange_training_data(self, method):
import random
with open(self.config["combined_user_to_subreddit_score_path"], 'r') as infile:
user_subreddit_scores = json.loads(infile.read())
for k, scores in user_subreddit_scores.items():
user_subreddit_scores[k] = sorted(scores, key=lambda x: x[1], reverse=True)
data_length, data_width = len(user_subreddit_scores), self.config['max_subreddits_in_model']
user_subreddit_scores = list(user_subreddit_scores.values())
random.shuffle(user_subreddit_scores)
if method == 'hot': # Input vector is one-hot encoding.
X = np.zeros((data_length, data_width), dtype=np.bool)
for i, scores in enumerate(user_subreddit_scores):
for subreddit_key, score in scores:
if subreddit_key <= data_width:
X[i][subreddit_key - 1] = True
else:
raise ValueError(f"Unhandled training data preparation method {method}")
y = np.zeros((data_length, data_width), dtype=np.bool)
for i, scores in enumerate(user_subreddit_scores):
for subreddit_key, score in scores:
if subreddit_key <= data_width:
y[i][subreddit_key-1] = score > 0
return X, y
def arrange_user_data(self, user_data):
user_data = {k: v for k, v in sorted(user_data.items(), key=lambda x: x[1], reverse=True)
if 0 < self.subreddit_to_rank.get(k, -1) < self.config['max_subreddits_in_model']}
if self.method == 'hot':
data = np.zeros((1, self.config['max_subreddits_in_model']), dtype=np.bool)
for subreddit_name, subreddit_score in user_data.items():
if subreddit_name in self.subreddit_to_rank:
data[0][self.subreddit_to_rank[subreddit_name]-1] = subreddit_score > 0
return data
def train_model(self, train_data, test_data):
X, y = train_data
self.model.fit(X, y, epochs=5, batch_size=256, verbose=1)
self.model.save(self.model_path)
X, y = test_data
scores = self.model.evaluate(X, y, verbose=1)
print(self.model.metrics_names)
print(scores)
def get_user_predictions(self, user_data):
arranged_data = self.arrange_user_data(user_data)
user_known_subreddits = set(list(user_data.keys()))
with self.graph.as_default():
with self.session.as_default():
predictions = self.model.predict(arranged_data)[0]
predictions = [(self.rank_to_subreddit[i+1], round(float(score), 5), i) for i, score
in enumerate(predictions) if self.rank_to_subreddit[i+1] not in user_known_subreddits \
and self.rank_to_sfw_status[i+1] and i > 200]
predictions.sort(key=lambda x: x[1], reverse=True)
return predictions
if __name__ == '__main__':
import os
os.chdir('..')
modeler = SuggestionModeler(True)
|
[
"os.path.exists",
"random.shuffle",
"sklearn.model_selection.train_test_split",
"tensorflow.Session",
"keras.models.Sequential",
"os.chdir",
"numpy.zeros",
"keras.layers.Dense",
"keras.layers.Dropout",
"tensorflow.get_default_graph"
] |
[((5754, 5768), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (5762, 5768), False, 'import os\n'), ((490, 502), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (500, 502), True, 'import tensorflow as tf\n'), ((524, 546), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (544, 546), True, 'import tensorflow as tf\n'), ((677, 715), 'os.path.exists', 'os.path.exists', (['"""config_override.json"""'], {}), "('config_override.json')\n", (691, 715), False, 'import os\n'), ((3308, 3345), 'random.shuffle', 'random.shuffle', (['user_subreddit_scores'], {}), '(user_subreddit_scores)\n', (3322, 3345), False, 'import random\n'), ((3814, 3864), 'numpy.zeros', 'np.zeros', (['(data_length, data_width)'], {'dtype': 'np.bool'}), '((data_length, data_width), dtype=np.bool)\n', (3822, 3864), True, 'import numpy as np\n'), ((1656, 1668), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1666, 1668), False, 'from keras.models import Sequential\n'), ((2391, 2448), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': "self.config['test_pct']"}), "(X, y, test_size=self.config['test_pct'])\n", (2407, 2448), False, 'from sklearn.model_selection import train_test_split\n'), ((3428, 3478), 'numpy.zeros', 'np.zeros', (['(data_length, data_width)'], {'dtype': 'np.bool'}), '((data_length, data_width), dtype=np.bool)\n', (3436, 3478), True, 'import numpy as np\n'), ((4393, 4461), 'numpy.zeros', 'np.zeros', (["(1, self.config['max_subreddits_in_model'])"], {'dtype': 'np.bool'}), "((1, self.config['max_subreddits_in_model']), dtype=np.bool)\n", (4401, 4461), True, 'import numpy as np\n'), ((1691, 1780), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'input_shape': "(self.config['max_subreddits_in_model'],)"}), "(512, activation='relu', input_shape=(self.config[\n 'max_subreddits_in_model'],))\n", (1696, 1780), False, 'from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization\n'), ((1828, 1840), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1835, 1840), False, 'from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization\n'), ((1864, 1931), 'keras.layers.Dense', 'Dense', (["self.config['max_subreddits_in_model']"], {'activation': '"""sigmoid"""'}), "(self.config['max_subreddits_in_model'], activation='sigmoid')\n", (1869, 1931), False, 'from keras.layers import Dense, Dropout, Embedding, Flatten, Activation, BatchNormalization\n'), ((2167, 2198), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (2181, 2198), False, 'import os\n')]
|
import math
import numpy as np
import torch
from mock import patch
from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial
def test_spherical2cartesial():
spherical = torch.Tensor([
[0, 0],
[math.pi / 2, 0],
[-math.pi / 2, 0],
[0, math.pi / 2],
[math.pi / 2, math.pi / 2],
])
target_xyz = np.array([
[0, 0, -1],
[1, 0, 0],
[-1, 0, 0],
[0, 1, 0],
[0, 1, 0],
])
xyz = spherical2cartesial(spherical)
assert xyz.shape[0] == spherical.shape[0]
assert xyz.shape[1] == 3
assert isinstance(xyz, torch.Tensor)
assert np.linalg.norm(target_xyz - xyz.numpy(), axis=1).max() < 1e-5
@patch('core.train_utils.epsilon', 0)
def test_compute_angular_error_xyz_arr():
input1 = torch.Tensor([
[0.8001 / math.sqrt(2), 0.6, 0.8 / math.sqrt(2)],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[-1, 0, 0],
[1 / math.sqrt(2), 0, 1 / math.sqrt(2)],
])
input2 = torch.Tensor([
[0.8 / math.sqrt(2), 0.6, 0.8 / math.sqrt(2)],
[-1, 0, 0],
[0, 0, 1],
[0, 0, -1],
[-1, 0, 0],
[-1 / math.sqrt(2), 0, 1 / math.sqrt(2)],
], )
target = torch.Tensor([
0,
180,
180 / 2,
180,
0,
180 / 2,
])
output = compute_angular_error_xyz_arr(input1, input2)
assert np.max(np.abs(output.numpy() - target.numpy())) < 1e-5
@patch('core.train_utils.epsilon', 0)
def test_compute_angular_error():
input1 = torch.Tensor([
[math.pi / 2, 0],
[0, math.pi / 2],
[math.pi, 0],
[-math.pi / 2, 0],
[math.pi / 4, 0],
])
input2 = torch.Tensor([
[-math.pi / 2, 0],
[math.pi, 0],
[0, 0],
[-math.pi / 2, 0],
[-math.pi / 4, 0],
], )
target = torch.Tensor([
180,
180 / 2,
180,
0,
180 / 2,
])
output = compute_angular_error(input1, input2)
assert torch.mean(target) == output
|
[
"core.train_utils.compute_angular_error",
"mock.patch",
"torch.mean",
"core.train_utils.compute_angular_error_xyz_arr",
"torch.Tensor",
"math.sqrt",
"numpy.array",
"core.train_utils.spherical2cartesial"
] |
[((740, 776), 'mock.patch', 'patch', (['"""core.train_utils.epsilon"""', '(0)'], {}), "('core.train_utils.epsilon', 0)\n", (745, 776), False, 'from mock import patch\n'), ((1504, 1540), 'mock.patch', 'patch', (['"""core.train_utils.epsilon"""', '(0)'], {}), "('core.train_utils.epsilon', 0)\n", (1509, 1540), False, 'from mock import patch\n'), ((222, 331), 'torch.Tensor', 'torch.Tensor', (['[[0, 0], [math.pi / 2, 0], [-math.pi / 2, 0], [0, math.pi / 2], [math.pi / \n 2, math.pi / 2]]'], {}), '([[0, 0], [math.pi / 2, 0], [-math.pi / 2, 0], [0, math.pi / 2],\n [math.pi / 2, math.pi / 2]])\n', (234, 331), False, 'import torch\n'), ((392, 459), 'numpy.array', 'np.array', (['[[0, 0, -1], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, 1, 0]]'], {}), '([[0, 0, -1], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, 1, 0]])\n', (400, 459), True, 'import numpy as np\n'), ((517, 547), 'core.train_utils.spherical2cartesial', 'spherical2cartesial', (['spherical'], {}), '(spherical)\n', (536, 547), False, 'from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial\n'), ((1272, 1320), 'torch.Tensor', 'torch.Tensor', (['[0, 180, 180 / 2, 180, 0, 180 / 2]'], {}), '([0, 180, 180 / 2, 180, 0, 180 / 2])\n', (1284, 1320), False, 'import torch\n'), ((1389, 1434), 'core.train_utils.compute_angular_error_xyz_arr', 'compute_angular_error_xyz_arr', (['input1', 'input2'], {}), '(input1, input2)\n', (1418, 1434), False, 'from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial\n'), ((1588, 1693), 'torch.Tensor', 'torch.Tensor', (['[[math.pi / 2, 0], [0, math.pi / 2], [math.pi, 0], [-math.pi / 2, 0], [math\n .pi / 4, 0]]'], {}), '([[math.pi / 2, 0], [0, math.pi / 2], [math.pi, 0], [-math.pi /\n 2, 0], [math.pi / 4, 0]])\n', (1600, 1693), False, 'import torch\n'), ((1750, 1848), 'torch.Tensor', 'torch.Tensor', (['[[-math.pi / 2, 0], [math.pi, 0], [0, 0], [-math.pi / 2, 0], [-math.pi / 4, 0]]'], {}), '([[-math.pi / 2, 0], [math.pi, 0], [0, 0], [-math.pi / 2, 0], [\n -math.pi / 4, 0]])\n', (1762, 1848), False, 'import torch\n'), ((1906, 1951), 'torch.Tensor', 'torch.Tensor', (['[180, 180 / 2, 180, 0, 180 / 2]'], {}), '([180, 180 / 2, 180, 0, 180 / 2])\n', (1918, 1951), False, 'import torch\n'), ((2012, 2049), 'core.train_utils.compute_angular_error', 'compute_angular_error', (['input1', 'input2'], {}), '(input1, input2)\n', (2033, 2049), False, 'from core.train_utils import compute_angular_error, compute_angular_error_xyz_arr, spherical2cartesial\n'), ((2061, 2079), 'torch.mean', 'torch.mean', (['target'], {}), '(target)\n', (2071, 2079), False, 'import torch\n'), ((865, 877), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (874, 877), False, 'import math\n'), ((890, 902), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (899, 902), False, 'import math\n'), ((995, 1007), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1004, 1007), False, 'import math\n'), ((1016, 1028), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1025, 1028), False, 'import math\n'), ((1081, 1093), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1090, 1093), False, 'import math\n'), ((1106, 1118), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1115, 1118), False, 'import math\n'), ((1214, 1226), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1223, 1226), False, 'import math\n'), ((1235, 1247), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1244, 1247), False, 'import math\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import glob,os,csv,re,math
import shutil, time
from astropy.io import ascii
import matplotlib.pyplot as plt
# Load all data files:
psdir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/'
hrdir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/'
# Original directories:
pande_dir='/Users/maryumsayeed/Desktop/pande/pande_lcs/'
ast_dir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/data/large_train_sample/'
# Directories when testing sections of lightcurves:
# pande_dir='/Users/maryumsayeed/Desktop/pande/pande_lcs_third/'
# ast_dir ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/data/large_train_sample_third/'
pande_lcs =glob.glob(pande_dir+'*.fits')
ast_lcs =glob.glob(ast_dir+'*.fits')
print('# of Pande .ps files:',len(glob.glob(pande_dir+'*.ps')))
print('# of Pande .fits files:',len(glob.glob(pande_dir+'*.fits')))
print('# of Astero. .ps files:',len(glob.glob(ast_dir+'*.ps')))
print('# of Astero. .fits files:',len(glob.glob(ast_dir+'*.fits')))
# Load Berger+ stellar properties catalogues:
gaia =ascii.read('/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/DR2PapTable1.txt',delimiter='&')
gaia =gaia[gaia['binaryFlag']==0] #remove any binaries
kepler_catalogue=pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/GKSPC_InOut_V4.csv')#,skiprows=1,delimiter=',',usecols=[0,1])
# Get Kps for all stars:
kpfile ='/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/KIC_Kepmag_Berger2018.csv'
kp_all =pd.read_csv(kpfile,usecols=['KIC','kic_kepmag'])
# # Load Asteroseismic Samples:
# Don't want to include any Mathur sample:
mathur_header=['KIC','loggi','e_loggi','r_loggi','n_loggi','logg','E_logg','e_logg','Mass','E_Mass','e_Mass']
mathur_2017 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/mathur_2017.txt',delimiter=';',skiprows=54,names=mathur_header)
mathur_2017 =mathur_2017[mathur_2017['n_loggi']=='AST'] #include only asteroseismic measurements
yu_header=['KICID','Teff','err','logg','logg_err','Fe/H','err','M_noCorrection','M_nocorr_err','R_noCorrection','err','M_RGB','M_RGB_err','R_RGB','err','M_Clump','M_Clump_err','R_Clump','err','EvoPhase']
yu_2018 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/rg_yu.txt',delimiter='|',names=yu_header,skiprows=1,index_col=False)#,names=yu_header)
#chaplin_2014=pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/Chaplin_2014.txt',skiprows=47,delimiter='\t',names=chaplin_header)
#huber_2013 =pd.read_csv('/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/Huber_2013.txt',delimiter='\t',skiprows=37,names=['KIC','Mass','Mass_err'])
mathur_kics=np.array(mathur_2017['KIC'])
yu_kics=np.array(yu_2018['KICID'])
#chaplin_kics=np.array(chaplin_2014['KIC'])
#huber_kics=np.array(huber_2013['KIC'])
print('# of stars in Yu+2018:',len(yu_kics))
print('# of stars in Mathur+17:',len(mathur_kics))
astero_kics=np.concatenate([mathur_kics,yu_kics])
astero_kics=np.array(list(set(astero_kics)))
print('Total seismic stars:',len(astero_kics))
# # Load Pande sample:
pande =pd.read_csv('/Users/maryumsayeed/Desktop/pande/pande_granulation.txt')#,skiprows=1,usecols=[0],dtype=int,delimiter=',')
pande_kics=list(pande['#KIC'])
print('# of stars in Pande+2018:',len(pande))
# If star in both sample, treat it as asteroseismic star to increase ast. sample.
# If star only in Pande sample, keep it there.
# If star only in ast. sample, keep it there.
pande_stars0=(set(pande_kics) - set(astero_kics))
print('# stars only in Pande+ and not astero',len(pande_stars0))
print('# total astero. stars:',len(astero_kics))
print('# stars in both Pande+ and astero catalogues:',len(list(set(pande_kics) & set(astero_kics))))
# # Get catalogues of non-granulation stars:
not_dir='/Users/maryumsayeed/Desktop/HuberNess/mlearning/ACFcannon-master/not_granulation_star/'
dscutis =np.loadtxt(not_dir+'murphy_dscuti.txt',usecols=[0,-1],delimiter=',',skiprows=1,dtype=int)
idx=np.where(dscutis[:,1]==1)[0] #stars that have dSct flag
dscutis =dscutis[idx][:,0]
binaries =np.loadtxt(not_dir+'ebinary.txt',usecols=[0],dtype=int,delimiter=',')
exoplanets =pd.read_csv(not_dir+'koi_planethosts.csv',skiprows=53,usecols=['kepid','koi_disposition','koi_pdisposition'])
#exoplanets=exoplanets[exoplanets['koi_pdisposition']!='FALSE POSITIVE'] # Remove false positive exoplanets:
exoplanets =[int(i) for i in list(exoplanets['kepid'])]
superflares=np.loadtxt(hrdir+'superflares_shibayama2013.txt',skiprows=33,usecols=[0],dtype=int)
superflares=[int(i) for i in list(superflares)]
flares =list(np.loadtxt(not_dir+'flares_davenport2016.txt',usecols=[0],skiprows=1,delimiter=',',dtype=int))
rotating =list(np.loadtxt(not_dir+'mcquillan_rotation.txt',usecols=[0],skiprows=1,delimiter=',',dtype=int))
clas =ascii.read(not_dir+'debosscher2011.dat')
gdor =clas[(clas['V1'] == 'GDOR') | (clas['V1'] == 'SPB')]
gdor =[int(i) for i in list(gdor['KIC'])]
dscutis2 =clas[clas['V1'] == 'DSCUT']
dscutis2 =[int(i) for i in list(dscutis2['KIC'])]
rrlyr =pd.read_csv(not_dir+'rrlyr.txt')
rrlyr =[int(i) for i in list(rrlyr['kic'])]
# # Remove non-granulation stars:
pande_stars=list(set(pande_stars0)-set(binaries)-set(exoplanets)-set(flares)-set(rotating) -set(superflares)-set(dscutis)-set(dscutis2)-set(gdor)-set(rrlyr))
astero_stars=list(set(astero_kics)-set(binaries)-set(exoplanets)-set(flares)-set(rotating) -set(superflares)-set(dscutis)-set(dscutis2)-set(gdor)-set(rrlyr))
print('# of non-granulation stars removed from astero sample:',len(astero_kics)-len(astero_stars))
print('# of non-granulation stars removed from pande sample:',len(pande_stars0)-len(pande_stars))
# Only get stars in Gaia catalogue (Berger+2018):
print('(before cross-referenced with Gaia) # of Pande stars:',len(pande_stars))
print('(before cross-referenced with Gaia) # of Astero. stars:',len(astero_stars))
pande_stars = list((set(pande_stars) & set(gaia['KIC'])))
astero_stars = list((set(astero_stars) & set(gaia['KIC'])))
print('final # of Pande stars:',len(pande_stars))
print('final # of asteroseismic stars:',len(astero_stars))
# Check if all Pande stars have a light curve downloaded :
print('\n','===== PANDE =====')
pande_kics_downloaded=[]
for file in pande_lcs:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
pande_kics_downloaded.append(kic)
print('These should be the same:')
print('---Stars downloaded:',len(pande_kics_downloaded))
print('---Stars needed:',len(pande_stars))
if len(pande_kics_downloaded) > len(pande_stars):
print('We have more stars downloaded than we need from Pande+18.')
else:
print("Don't have all the stars that we need. Download more!")
# Only use Pande stars we have downloaded:
#pande_stars = list(set(set(pande_stars)-set(pande_not_downloaded)))
pande_below_dc=ascii.read(psdir+'LLR_gaia/pande_kics_below_duty_cycle.txt',names=['KICID'])
pande_below_89=ascii.read(psdir+'LLR_gaia/pande_kics_below_89_days.txt',names=['KICID'])
pande_below_dc,pande_below_89=pande_below_dc['KICID'],pande_below_89['KICID']
pande_not_downloaded =[]
pande_stars_downloaded=[]
for kic in pande_stars:
if kic in pande_kics_downloaded:
pande_stars_downloaded.append(kic)
else:
pande_not_downloaded.append(kic)
print('Need from Pande+18',len(pande_stars))
print('Downloaded',len(pande_stars_downloaded))
print('Have but removed aka:')
print('---# of Pande stars below 89 days',len(pande_below_89))
print('---# of Pande stars below duty cycle',len(pande_below_dc))
print('Pande not downloaded',len(pande_not_downloaded))
print('Good pande stars',len(pande_stars))
# Check if all astero. stars have a light curve downloaded :
print('\n','===== ASTERO. =====')
ast_kics_downloaded=[]
for file in ast_lcs:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
ast_kics_downloaded.append(kic)
print('These should be the same:')
print('---Stars downloaded:',len(ast_kics_downloaded))
print('---Stars needed:',len(astero_stars))
if len(ast_kics_downloaded) > len(astero_stars):
print('We have more stars downloaded than we need from astero catalogues.')
else:
print("Don't have all the stars that we need. Download more!")
astero_below_dc=ascii.read(psdir+'LLR_seismic/astero_kics_below_duty_cycle.txt',names=['KICID'])
astero_below_89=ascii.read(psdir+'LLR_seismic/astero_kics_below_89_days.txt',names=['KICID'])
astero_below_dc,astero_below_89=astero_below_dc['KICID'],astero_below_89['KICID']
astero_not_downloaded =[]
astero_stars_downloaded=[]
for kic in astero_stars:
if kic in ast_kics_downloaded:
astero_stars_downloaded.append(kic)
else:
astero_not_downloaded.append(kic)
print('Need from catalogues',len(astero_stars))
print('Downloaded',len(ast_kics_downloaded))
print('Have but removed aka:')
print('---# of astero stars below 89 days',len(astero_below_89))
print('---# of astero stars below duty cycle',len(astero_below_dc))
print('Astero not downloaded',len(astero_not_downloaded))
print('Good astero stars',len(astero_stars))
# In[13]:
# ascii.write([astero_stars],psdir+'astero_stars_we_need.txt',overwrite=True)
# ascii.write([ast_kics_downloaded],psdir+'astero_stars_downloaded.txt',overwrite=True)
# ascii.write([good_astero_stars],psdir+'good_stars_downloaded.txt',overwrite=True)
fn='/Users/maryumsayeed/Downloads/'
# np.savetxt(fn+'pande_not_downloaded.txt',pande_not_downloaded,fmt='%s')
# np.savetxt(fn+'astero_not_downloaded.txt',astero_not_downloaded,fmt='%s')
# # Find logg for Pande:
print('\n','Getting logg for Pande. stars...')
pande_ps=glob.glob(pande_dir+'*.ps')
pande_no_logg=0
pande_final_sample=[]
pande_loggs=[]
check_dups=[]
for file in pande_ps:
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic in pande_stars:
row=kepler_catalogue.loc[kepler_catalogue['KIC']==kic]
logg=row['iso_logg'].item()
if math.isnan(logg) is False: # check to see there are no nan loggs
logg_pos_err=row['iso_logg_err1']
logg_neg_err=row['iso_logg_err2']
pande_final_sample.append([file,logg])
pande_loggs.append(logg)
else:
pande_no_logg+=1
else:
continue
print('Pande w/ no logg:',pande_no_logg)
# Double check all these stars are in Pande:
kic_not_in_pande=[]
for i in pande_final_sample:
file=i[0]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic not in pande_kics:
kic_not_in_pande.append(kic)
print('# stars not in Pande.',len(kic_not_in_pande))
print('# Pande stars to save:',len(pande_final_sample))
diff=2000
# np.savetxt(psdir+'pande_final_sample_full.txt',pande_final_sample,fmt='%s')
# np.savetxt(psdir+'pande_pickle_1.txt',pande_final_sample[0:2000],fmt='%s')
# np.savetxt(psdir+'pande_pickle_2.txt',pande_final_sample[2000:4000],fmt='%s')
# np.savetxt(psdir+'pande_pickle_3.txt',pande_final_sample[4000:],fmt='%s')
# # Find logg for Astero. stars:
print('\n','Getting logg for Astero. stars...')
astero_ps=glob.glob(ast_dir+'*.ps')
files,loggs=[],np.zeros(len(astero_ps))
c1,c2,c3,none=0,0,0,0
for i in range(0,len(astero_ps)):
file=astero_ps[i]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic in astero_stars:
if kic in yu_kics:
row=yu_2018.loc[yu_2018['KICID']==kic]
logg =row['logg'].item()
c1+=1
elif kic in mathur_kics:
row =mathur_2017.loc[mathur_2017['KIC']==kic]
logg =row['loggi'].item()
c2+=1
else:
none+=1
loggs[i]=logg
files.append(file)
# astero_final_sample.append([file,logg])
# astero_loggs.append(logg)
else:
continue
files,loggs=np.array(files),np.array(loggs).astype(float)
print('Yu+:',c1,'Mathur+',c2,'None',none)
idx=np.where(loggs>0)[0] #aka select valid stars
astero_files,astero_loggs=files[idx],loggs[idx]
astero_final_sample=np.array([astero_files,astero_loggs]).T
# Double check all these stars are in Pande:
kic_not_in_astero=[]
for i in astero_final_sample:
file=i[0]
kic=re.search('kplr(.*)-', file).group(1)
kic=int(kic.lstrip('0'))
if kic not in astero_stars:
kic_not_in_astero.append(kic)
print('# stars not in Astero.',len(kic_not_in_astero))
print('# Astero. stars to save:',len(astero_final_sample))
diff=4000
# np.savetxt(psdir+'astero_final_sample_full.txt',astero_final_sample,fmt='%s')
# np.savetxt(psdir+'astero_final_sample_1.txt',astero_final_sample[0:4000],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_2.txt',astero_final_sample[4000:4000+diff],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_3.txt',astero_final_sample[8000:8000+diff],fmt='%s')
# np.savetxt(psdir+'astero_final_sample_4.txt',astero_final_sample[12000:12000+diff],fmt='%s')
|
[
"pandas.read_csv",
"numpy.where",
"math.isnan",
"numpy.array",
"numpy.concatenate",
"numpy.loadtxt",
"astropy.io.ascii.read",
"glob.glob",
"re.search"
] |
[((790, 821), 'glob.glob', 'glob.glob', (["(pande_dir + '*.fits')"], {}), "(pande_dir + '*.fits')\n", (799, 821), False, 'import glob, os, csv, re, math\n'), ((835, 864), 'glob.glob', 'glob.glob', (["(ast_dir + '*.fits')"], {}), "(ast_dir + '*.fits')\n", (844, 864), False, 'import glob, os, csv, re, math\n'), ((1184, 1301), 'astropy.io.ascii.read', 'ascii.read', (['"""/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/DR2PapTable1.txt"""'], {'delimiter': '"""&"""'}), "(\n '/Users/maryumsayeed/Desktop/HuberNess/mlearning/powerspectrum/DR2PapTable1.txt'\n , delimiter='&')\n", (1194, 1301), False, 'from astropy.io import ascii\n'), ((1368, 1470), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/GKSPC_InOut_V4.csv"""'], {}), "(\n '/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/GKSPC_InOut_V4.csv'\n )\n", (1379, 1470), True, 'import pandas as pd\n'), ((1635, 1685), 'pandas.read_csv', 'pd.read_csv', (['kpfile'], {'usecols': "['KIC', 'kic_kepmag']"}), "(kpfile, usecols=['KIC', 'kic_kepmag'])\n", (1646, 1685), True, 'import pandas as pd\n'), ((1885, 2033), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/mathur_2017.txt"""'], {'delimiter': '""";"""', 'skiprows': '(54)', 'names': 'mathur_header'}), "(\n '/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/mathur_2017.txt'\n , delimiter=';', skiprows=54, names=mathur_header)\n", (1896, 2033), True, 'import pandas as pd\n'), ((2336, 2489), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/rg_yu.txt"""'], {'delimiter': '"""|"""', 'names': 'yu_header', 'skiprows': '(1)', 'index_col': '(False)'}), "(\n '/Users/maryumsayeed/Desktop/HuberNess/mlearning/hrdmachine/rg_yu.txt',\n delimiter='|', names=yu_header, skiprows=1, index_col=False)\n", (2347, 2489), True, 'import pandas as pd\n'), ((2823, 2851), 'numpy.array', 'np.array', (["mathur_2017['KIC']"], {}), "(mathur_2017['KIC'])\n", (2831, 2851), True, 'import numpy as np\n'), ((2860, 2886), 'numpy.array', 'np.array', (["yu_2018['KICID']"], {}), "(yu_2018['KICID'])\n", (2868, 2886), True, 'import numpy as np\n'), ((3080, 3118), 'numpy.concatenate', 'np.concatenate', (['[mathur_kics, yu_kics]'], {}), '([mathur_kics, yu_kics])\n', (3094, 3118), True, 'import numpy as np\n'), ((3246, 3316), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/maryumsayeed/Desktop/pande/pande_granulation.txt"""'], {}), "('/Users/maryumsayeed/Desktop/pande/pande_granulation.txt')\n", (3257, 3316), True, 'import pandas as pd\n'), ((4044, 4144), 'numpy.loadtxt', 'np.loadtxt', (["(not_dir + 'murphy_dscuti.txt')"], {'usecols': '[0, -1]', 'delimiter': '""","""', 'skiprows': '(1)', 'dtype': 'int'}), "(not_dir + 'murphy_dscuti.txt', usecols=[0, -1], delimiter=',',\n skiprows=1, dtype=int)\n", (4054, 4144), True, 'import numpy as np\n'), ((4236, 4310), 'numpy.loadtxt', 'np.loadtxt', (["(not_dir + 'ebinary.txt')"], {'usecols': '[0]', 'dtype': 'int', 'delimiter': '""","""'}), "(not_dir + 'ebinary.txt', usecols=[0], dtype=int, delimiter=',')\n", (4246, 4310), True, 'import numpy as np\n'), ((4318, 4437), 'pandas.read_csv', 'pd.read_csv', (["(not_dir + 'koi_planethosts.csv')"], {'skiprows': '(53)', 'usecols': "['kepid', 'koi_disposition', 'koi_pdisposition']"}), "(not_dir + 'koi_planethosts.csv', skiprows=53, usecols=['kepid',\n 'koi_disposition', 'koi_pdisposition'])\n", (4329, 4437), True, 'import pandas as pd\n'), ((4606, 4699), 'numpy.loadtxt', 'np.loadtxt', (["(hrdir + 'superflares_shibayama2013.txt')"], {'skiprows': '(33)', 'usecols': '[0]', 'dtype': 'int'}), "(hrdir + 'superflares_shibayama2013.txt', skiprows=33, usecols=[0\n ], dtype=int)\n", (4616, 4699), True, 'import numpy as np\n'), ((4972, 5014), 'astropy.io.ascii.read', 'ascii.read', (["(not_dir + 'debosscher2011.dat')"], {}), "(not_dir + 'debosscher2011.dat')\n", (4982, 5014), False, 'from astropy.io import ascii\n'), ((5230, 5264), 'pandas.read_csv', 'pd.read_csv', (["(not_dir + 'rrlyr.txt')"], {}), "(not_dir + 'rrlyr.txt')\n", (5241, 5264), True, 'import pandas as pd\n'), ((7017, 7096), 'astropy.io.ascii.read', 'ascii.read', (["(psdir + 'LLR_gaia/pande_kics_below_duty_cycle.txt')"], {'names': "['KICID']"}), "(psdir + 'LLR_gaia/pande_kics_below_duty_cycle.txt', names=['KICID'])\n", (7027, 7096), False, 'from astropy.io import ascii\n'), ((7109, 7185), 'astropy.io.ascii.read', 'ascii.read', (["(psdir + 'LLR_gaia/pande_kics_below_89_days.txt')"], {'names': "['KICID']"}), "(psdir + 'LLR_gaia/pande_kics_below_89_days.txt', names=['KICID'])\n", (7119, 7185), False, 'from astropy.io import ascii\n'), ((8424, 8512), 'astropy.io.ascii.read', 'ascii.read', (["(psdir + 'LLR_seismic/astero_kics_below_duty_cycle.txt')"], {'names': "['KICID']"}), "(psdir + 'LLR_seismic/astero_kics_below_duty_cycle.txt', names=[\n 'KICID'])\n", (8434, 8512), False, 'from astropy.io import ascii\n'), ((8521, 8606), 'astropy.io.ascii.read', 'ascii.read', (["(psdir + 'LLR_seismic/astero_kics_below_89_days.txt')"], {'names': "['KICID']"}), "(psdir + 'LLR_seismic/astero_kics_below_89_days.txt', names=['KICID']\n )\n", (8531, 8606), False, 'from astropy.io import ascii\n'), ((9784, 9813), 'glob.glob', 'glob.glob', (["(pande_dir + '*.ps')"], {}), "(pande_dir + '*.ps')\n", (9793, 9813), False, 'import glob, os, csv, re, math\n'), ((11142, 11169), 'glob.glob', 'glob.glob', (["(ast_dir + '*.ps')"], {}), "(ast_dir + '*.ps')\n", (11151, 11169), False, 'import glob, os, csv, re, math\n'), ((4138, 4166), 'numpy.where', 'np.where', (['(dscutis[:, 1] == 1)'], {}), '(dscutis[:, 1] == 1)\n', (4146, 4166), True, 'import numpy as np\n'), ((4755, 4858), 'numpy.loadtxt', 'np.loadtxt', (["(not_dir + 'flares_davenport2016.txt')"], {'usecols': '[0]', 'skiprows': '(1)', 'delimiter': '""","""', 'dtype': 'int'}), "(not_dir + 'flares_davenport2016.txt', usecols=[0], skiprows=1,\n delimiter=',', dtype=int)\n", (4765, 4858), True, 'import numpy as np\n'), ((4867, 4968), 'numpy.loadtxt', 'np.loadtxt', (["(not_dir + 'mcquillan_rotation.txt')"], {'usecols': '[0]', 'skiprows': '(1)', 'delimiter': '""","""', 'dtype': 'int'}), "(not_dir + 'mcquillan_rotation.txt', usecols=[0], skiprows=1,\n delimiter=',', dtype=int)\n", (4877, 4968), True, 'import numpy as np\n'), ((11880, 11895), 'numpy.array', 'np.array', (['files'], {}), '(files)\n', (11888, 11895), True, 'import numpy as np\n'), ((11973, 11992), 'numpy.where', 'np.where', (['(loggs > 0)'], {}), '(loggs > 0)\n', (11981, 11992), True, 'import numpy as np\n'), ((12086, 12124), 'numpy.array', 'np.array', (['[astero_files, astero_loggs]'], {}), '([astero_files, astero_loggs])\n', (12094, 12124), True, 'import numpy as np\n'), ((897, 926), 'glob.glob', 'glob.glob', (["(pande_dir + '*.ps')"], {}), "(pande_dir + '*.ps')\n", (906, 926), False, 'import glob, os, csv, re, math\n'), ((963, 994), 'glob.glob', 'glob.glob', (["(pande_dir + '*.fits')"], {}), "(pande_dir + '*.fits')\n", (972, 994), False, 'import glob, os, csv, re, math\n'), ((1031, 1058), 'glob.glob', 'glob.glob', (["(ast_dir + '*.ps')"], {}), "(ast_dir + '*.ps')\n", (1040, 1058), False, 'import glob, os, csv, re, math\n'), ((1097, 1126), 'glob.glob', 'glob.glob', (["(ast_dir + '*.fits')"], {}), "(ast_dir + '*.fits')\n", (1106, 1126), False, 'import glob, os, csv, re, math\n'), ((6466, 6494), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (6475, 6494), False, 'import glob, os, csv, re, math\n'), ((7979, 8007), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (7988, 8007), False, 'import glob, os, csv, re, math\n'), ((9906, 9934), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (9915, 9934), False, 'import glob, os, csv, re, math\n'), ((10086, 10102), 'math.isnan', 'math.isnan', (['logg'], {}), '(logg)\n', (10096, 10102), False, 'import glob, os, csv, re, math\n'), ((10496, 10524), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (10505, 10524), False, 'import glob, os, csv, re, math\n'), ((11294, 11322), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (11303, 11322), False, 'import glob, os, csv, re, math\n'), ((11896, 11911), 'numpy.array', 'np.array', (['loggs'], {}), '(loggs)\n', (11904, 11911), True, 'import numpy as np\n'), ((12246, 12274), 're.search', 're.search', (['"""kplr(.*)-"""', 'file'], {}), "('kplr(.*)-', file)\n", (12255, 12274), False, 'import glob, os, csv, re, math\n')]
|
import numpy as np
from tabulate import tabulate
import matplotlib.pyplot as plt
import Page_Rank_Utils as pru
def detectedConverged(y,x,epsilon):
C = set()
N = set()
for i in range(len(y)):
if abs(y[i] - x[i])/abs(x[i]) < epsilon:
C.add(i)
else:
N.add(i)
return N, C
def filter(A_prime, x_prime, N, C):
n = N.shape[0]
for i in range(n):
if i in C:
x_prime[i] = 0
for j in range(n):
A_prime[i][j] = 0
return A_prime, x_prime
def Filter_APR(G, weight, period):
P = pru.stochastic_transition_matrix(G, weight, True)
n = P.shape[0]
# initialize eigenvectors
v_list = []
idx = 0
v_init = np.zeros(n)
v_init[-1] = 1
v_list.append(v_init)
converged = True
while not converged:
return
|
[
"Page_Rank_Utils.stochastic_transition_matrix",
"numpy.zeros"
] |
[((591, 640), 'Page_Rank_Utils.stochastic_transition_matrix', 'pru.stochastic_transition_matrix', (['G', 'weight', '(True)'], {}), '(G, weight, True)\n', (623, 640), True, 'import Page_Rank_Utils as pru\n'), ((733, 744), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (741, 744), True, 'import numpy as np\n')]
|
#tiersweekly.py
from fantasyfootball import tiers
from fantasyfootball import fantasypros as fp
from fantasyfootball import config
from fantasyfootball import ffcalculator
from fantasyfootball.config import FIGURE_DIR
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from matplotlib import pyplot as plt
from matplotlib import patches as mpatches
from matplotlib.lines import Line2D
import numpy as np
import matplotlib.style as style
from datetime import date
from os import path
from collections import OrderedDict
flex_list = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>']
work_list = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
sean_list = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
justin_list = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME> II',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
different_spelling = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
]
def make_clustering_viz_flex(tiers=15, kmeans=False, league=config.sean, player_cutoff=150, player_per_chart=50, x_size=20, y_size=15, covariance_type='diag', save=True, export=False, player_list=None):
"""
Generates a chart with colored tiers; you can either use kmeans of GMM
Optional: Pass in a custom tier dict to show varying numbers of tiers; default will be uniform across position
Optional: Pass in a custom pos_n dict to show different numbers of players by position
"""
pos = 'FLEX'
palette = ['red', 'blue', 'green', 'orange', '#900C3F', 'maroon', 'cornflowerblue', 'greenyellow', 'coral', 'orchid', 'firebrick', 'lightsteelblue', 'palegreen', 'darkorange', 'crimson', 'darkred', 'aqua', 'forestgreen', 'navajowhite', 'mediumpurple']
pos_shape = {
'RB': 'o',
'WR': 's',
'TE': '^'
}
df = fp.create_fantasy_pros_ecr_df(league)
#derive pos for flex players
pos_df = df.loc[df['pos'] != pos]
pos_map = dict(zip(pos_df['player_name'].to_list(), pos_df['pos'].to_list()))
df['pos_map'] = df['player_name'].map(pos_map)
df = (df.loc[df['pos'] == pos]
.sort_values('rank')
.reset_index(drop=True)
.head(player_cutoff)
)
df['rank'] = df['rank'].astype('int')
today = date.today()
date_str = today.strftime('%m.%d.%Y')
x = df.loc[:, ['best', 'worst', 'avg']].copy()
if kmeans:
kmm = KMeans(n_clusters=tiers).fit(x)
labels = kmm.predict(x)
else: #gausianmixture
gmm = GaussianMixture(n_components=tiers, covariance_type=covariance_type, random_state=0).fit(x)
labels = gmm.predict(x)
unique_labels = list(OrderedDict.fromkeys(labels))
rank_dict = dict(zip(unique_labels, range(1,len(unique_labels)+1)))
df['tiers'] = labels
df['tiers'] = df['tiers'].map(rank_dict)
style.use('ggplot')
colors = dict(zip(range(1, tiers+1), palette[:tiers]))
tier_lookup = dict(zip(palette[:tiers], range(1, tiers+1)))
chart_n = (player_cutoff // player_per_chart) + (player_cutoff % player_per_chart > 0)
#filter current team players
if isinstance(player_list, list):
df = df.loc[df['player_name'].isin(player_list)].copy()
for ix, chunk_df in enumerate(np.array_split(df, chart_n)):
fig, ax = plt.subplots();
min_tier = min(chunk_df['tiers'])
max_tier = max(chunk_df['tiers'])
patches = []
color_chunk = [colors[i] for i in range(min_tier, max_tier + 1)]
patches = [mpatches.Patch(color=color, alpha=0.5, label=f'Tier {tier_lookup[color]}') for color in color_chunk]
pos_patches = [Line2D([0], [0], color='gray', label=pos, marker=shape, lw=0, markersize=12) for pos, shape in pos_shape.items()]
for _, row in chunk_df.iterrows():
xmin = row['best']
xmax = row['worst']
ymin, ymax = row['rank'], row['rank']
center = row['avg']
player = row['player_name'] + ', ' +row['tm'] + ' (' + row['pos_map'] + ')'
tier = row['tiers']
plt.scatter(center, ymax, color='gray', zorder=2, s=100, marker=pos_shape[row['pos_map']])
plt.scatter(xmin, ymax, marker= "|", color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1)
plt.scatter(xmax, ymax, marker= "|", color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1)
plt.plot((xmin, xmax), (ymin, ymax), color=colors.get(tier, 'moccasin'), alpha=0.5, zorder=1, linewidth=5.0)
plt.annotate(player, xy=(xmax+1, ymax))
#first legend
first_legend = plt.legend(handles=pos_patches, loc='lower left', borderpad=1, fontsize=12)
ax = plt.gca().add_artist(first_legend)
#second legend
plt.legend(handles=patches, borderpad=1, fontsize=12)
if player_list is not None:
league_name = league['name']
plt.title(f'{date_str} Fantasy Football Weekly - {pos} - {league_name} - {ix+1}')
else:
plt.title(f'{date_str} Fantasy Football Weekly - {pos} {ix+1}')
plt.xlabel('Average Expert Overall Rank')
plt.ylabel('Expert Consensus Position Rank')
fig.set_size_inches(x_size, y_size)
plt.gca().invert_yaxis()
#plt.tight_layout()
if save:
if kmeans:
if player_list is not None:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_kmeans__FLEX_{league_name}_{ix+1}.png'))
else:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_kmeans__{pos}_{ix+1}.png'))
else:
if player_list is not None:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_gmm__FLEX_list{league_name}_{ix+1}.png'))
else:
plt.savefig(path.join(FIGURE_DIR,fr'{date_str}_rangeofrankings_gmm_{pos}_{ix+1}.png'))
if export:
df.to_csv(path.join(FIGURE_DIR,fr'{date_str}_ecr_tiers.csv'), index=False)
#return plt.show()
if __name__ == "__main__":
#run elbow chart or AIC/BIC chart to estimate optimal number of k for each pos
#revisit week 1 to see if URL changes for each week - if so, refactor viz func and fp df func
sean = config.sean
work = config.work
justin = config.justin
pos_tier_dict_viz = {
'RB' : 8,
'QB' : 6,
'WR' : 5,
'TE' : 5,
'DST' : 6,
'K' : 7
}
tiers.make_clustering_viz(tier_dict=pos_tier_dict_viz, league=sean, pos_n=35, covariance_type='diag', draft=False, save=True)
make_clustering_viz_flex(export=True)
make_clustering_viz_flex(league=sean, player_list=sean_list)
make_clustering_viz_flex(league=work, player_list=work_list)
make_clustering_viz_flex(league=justin, player_list=justin_list)
|
[
"matplotlib.pyplot.ylabel",
"fantasyfootball.tiers.make_clustering_viz",
"numpy.array_split",
"matplotlib.pyplot.annotate",
"matplotlib.style.use",
"matplotlib.lines.Line2D",
"collections.OrderedDict.fromkeys",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"sklearn.mixture.GaussianMixture",
"matplotlib.pyplot.gca",
"matplotlib.patches.Patch",
"matplotlib.pyplot.title",
"datetime.date.today",
"matplotlib.pyplot.legend",
"sklearn.cluster.KMeans",
"os.path.join",
"fantasyfootball.fantasypros.create_fantasy_pros_ecr_df",
"matplotlib.pyplot.subplots"
] |
[((2721, 2758), 'fantasyfootball.fantasypros.create_fantasy_pros_ecr_df', 'fp.create_fantasy_pros_ecr_df', (['league'], {}), '(league)\n', (2750, 2758), True, 'from fantasyfootball import fantasypros as fp\n'), ((3148, 3160), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3158, 3160), False, 'from datetime import date\n'), ((3746, 3765), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3755, 3765), True, 'import matplotlib.style as style\n'), ((7407, 7537), 'fantasyfootball.tiers.make_clustering_viz', 'tiers.make_clustering_viz', ([], {'tier_dict': 'pos_tier_dict_viz', 'league': 'sean', 'pos_n': '(35)', 'covariance_type': '"""diag"""', 'draft': '(False)', 'save': '(True)'}), "(tier_dict=pos_tier_dict_viz, league=sean, pos_n=\n 35, covariance_type='diag', draft=False, save=True)\n", (7432, 7537), False, 'from fantasyfootball import tiers\n'), ((3536, 3564), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['labels'], {}), '(labels)\n', (3556, 3564), False, 'from collections import OrderedDict\n'), ((4155, 4182), 'numpy.array_split', 'np.array_split', (['df', 'chart_n'], {}), '(df, chart_n)\n', (4169, 4182), True, 'import numpy as np\n'), ((4203, 4217), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4215, 4217), True, 'from matplotlib import pyplot as plt\n'), ((5518, 5593), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'pos_patches', 'loc': '"""lower left"""', 'borderpad': '(1)', 'fontsize': '(12)'}), "(handles=pos_patches, loc='lower left', borderpad=1, fontsize=12)\n", (5528, 5593), True, 'from matplotlib import pyplot as plt\n'), ((5673, 5726), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'patches', 'borderpad': '(1)', 'fontsize': '(12)'}), '(handles=patches, borderpad=1, fontsize=12)\n', (5683, 5726), True, 'from matplotlib import pyplot as plt\n'), ((5996, 6037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Average Expert Overall Rank"""'], {}), "('Average Expert Overall Rank')\n", (6006, 6037), True, 'from matplotlib import pyplot as plt\n'), ((6046, 6090), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expert Consensus Position Rank"""'], {}), "('Expert Consensus Position Rank')\n", (6056, 6090), True, 'from matplotlib import pyplot as plt\n'), ((4416, 4490), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'color', 'alpha': '(0.5)', 'label': 'f"""Tier {tier_lookup[color]}"""'}), "(color=color, alpha=0.5, label=f'Tier {tier_lookup[color]}')\n", (4430, 4490), True, 'from matplotlib import patches as mpatches\n'), ((4540, 4616), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'color': '"""gray"""', 'label': 'pos', 'marker': 'shape', 'lw': '(0)', 'markersize': '(12)'}), "([0], [0], color='gray', label=pos, marker=shape, lw=0, markersize=12)\n", (4546, 4616), False, 'from matplotlib.lines import Line2D\n'), ((4996, 5091), 'matplotlib.pyplot.scatter', 'plt.scatter', (['center', 'ymax'], {'color': '"""gray"""', 'zorder': '(2)', 's': '(100)', 'marker': "pos_shape[row['pos_map']]"}), "(center, ymax, color='gray', zorder=2, s=100, marker=pos_shape[\n row['pos_map']])\n", (5007, 5091), True, 'from matplotlib import pyplot as plt\n'), ((5432, 5473), 'matplotlib.pyplot.annotate', 'plt.annotate', (['player'], {'xy': '(xmax + 1, ymax)'}), '(player, xy=(xmax + 1, ymax))\n', (5444, 5473), True, 'from matplotlib import pyplot as plt\n'), ((5816, 5904), 'matplotlib.pyplot.title', 'plt.title', (['f"""{date_str} Fantasy Football Weekly - {pos} - {league_name} - {ix + 1}"""'], {}), "(\n f'{date_str} Fantasy Football Weekly - {pos} - {league_name} - {ix + 1}')\n", (5825, 5904), True, 'from matplotlib import pyplot as plt\n'), ((5924, 5989), 'matplotlib.pyplot.title', 'plt.title', (['f"""{date_str} Fantasy Football Weekly - {pos} {ix + 1}"""'], {}), "(f'{date_str} Fantasy Football Weekly - {pos} {ix + 1}')\n", (5933, 5989), True, 'from matplotlib import pyplot as plt\n'), ((3283, 3307), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'tiers'}), '(n_clusters=tiers)\n', (3289, 3307), False, 'from sklearn.cluster import KMeans\n'), ((3387, 3475), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'tiers', 'covariance_type': 'covariance_type', 'random_state': '(0)'}), '(n_components=tiers, covariance_type=covariance_type,\n random_state=0)\n', (3402, 3475), False, 'from sklearn.mixture import GaussianMixture\n'), ((5607, 5616), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5614, 5616), True, 'from matplotlib import pyplot as plt\n'), ((6144, 6153), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6151, 6153), True, 'from matplotlib import pyplot as plt\n'), ((6896, 6946), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_ecr_tiers.csv"""'], {}), "(FIGURE_DIR, f'{date_str}_ecr_tiers.csv')\n", (6905, 6946), False, 'from os import path\n'), ((6313, 6409), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_rangeofrankings_kmeans__FLEX_{league_name}_{ix + 1}.png"""'], {}), "(FIGURE_DIR,\n f'{date_str}_rangeofrankings_kmeans__FLEX_{league_name}_{ix + 1}.png')\n", (6322, 6409), False, 'from os import path\n'), ((6460, 6539), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_rangeofrankings_kmeans__{pos}_{ix + 1}.png"""'], {}), "(FIGURE_DIR, f'{date_str}_rangeofrankings_kmeans__{pos}_{ix + 1}.png')\n", (6469, 6539), False, 'from os import path\n'), ((6633, 6730), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_rangeofrankings_gmm__FLEX_list{league_name}_{ix + 1}.png"""'], {}), "(FIGURE_DIR,\n f'{date_str}_rangeofrankings_gmm__FLEX_list{league_name}_{ix + 1}.png')\n", (6642, 6730), False, 'from os import path\n'), ((6780, 6855), 'os.path.join', 'path.join', (['FIGURE_DIR', 'f"""{date_str}_rangeofrankings_gmm_{pos}_{ix + 1}.png"""'], {}), "(FIGURE_DIR, f'{date_str}_rangeofrankings_gmm_{pos}_{ix + 1}.png')\n", (6789, 6855), False, 'from os import path\n')]
|
import itertools
import os
import shutil
import numpy as np
import gym
from gym import spaces
import robosuite
from robosuite.controllers import load_controller_config
import robosuite.utils.macros as macros
import imageio, tqdm
from her import HERReplayBuffer
from tianshou.data import Batch
macros.SIMULATION_TIMESTEP = 0.02
np.set_printoptions(suppress=True)
class PushingEnvironment(gym.Env):
def __init__(self, horizon, control_freq, num_obstacles=0, renderable=False):
self.num_obstacles = num_obstacles
self.renderable = renderable
self.env = robosuite.make(
"Push",
robots=["Panda"],
controller_configs=load_controller_config(default_controller="OSC_POSE"),
has_renderer=False,
has_offscreen_renderer=renderable,
render_visual_mesh=renderable,
render_collision_mesh=False,
camera_names=["agentview"] if renderable else None,
control_freq=control_freq,
horizon=horizon,
use_object_obs=True,
use_camera_obs=renderable,
hard_reset=False,
num_obstacles=num_obstacles,
)
low, high = self.env.action_spec
self.action_space = spaces.Box(low=low[:3], high=high[:3])
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=[12 + 6 * num_obstacles])
self.curr_obs = None
self.step_num = None
def seed(self, seed=None):
if seed is not None:
np.random.seed(seed)
self.action_space.seed(seed)
def _get_flat_obs(self, obs):
return np.concatenate([
obs["robot0_eef_pos"],
obs["gripper_to_cube_pos"],
obs["gripper_to_goal_pos"],
obs["cube_to_goal_pos"],
] + list(itertools.chain.from_iterable(zip(
[obs[f"gripper_to_obstacle{i}_pos"] for i in range(self.num_obstacles)],
[obs[f"cube_to_obstacle{i}_pos"] for i in range(self.num_obstacles)]
))))
def reset(self):
self.curr_obs = self.env.reset()
self.step_num = 0
return self._get_flat_obs(self.curr_obs)
def step(self, action):
next_obs, reward, done, info = self.env.step(np.concatenate([action, [0, 0, 0]]))
info["TimeLimit.truncated"] = done
return_obs = self._get_flat_obs(next_obs)
if self.renderable:
info["image"] = self.curr_obs["agentview_image"][::-1]
info["step"] = self.step_num
if done:
info["final_image"] = next_obs["agentview_image"][::-1]
self.curr_obs = next_obs
self.step_num += 1
return return_obs, reward, done, info
def her(self, obs, obs_next):
"""
Takes a list of observations (and next observations) from an entire episode and returns
the HER-modified version of the episode in the form of 4 lists: (obs, obs_next, reward, done).
"""
obs = np.array(obs)
obs_next = np.array(obs_next)
# final cube position
fake_goal = obs_next[-1, :3] - obs_next[-1, 3:6]
# gripper to goal pos
obs[:, 6:9] = obs[:, :3] - fake_goal
obs_next[:, 6:9] = obs_next[:, :3] - fake_goal
# cube to goal pos
obs[:, 9:] = (obs[:, :3] - obs[:, 3:6]) - fake_goal
obs_next[:, 9:] = (obs_next[:, :3] - obs_next[:, 3:6]) - fake_goal
rewards = [self.env.compute_reward(fake_goal, on[:3] - on[3:6], {}) for on in obs_next]
# rewards = []
# for on in obs_next:
# reward = self.compute_reward(fake_goal, on[:3] - on[3:6], {})
# rewards.append(reward)
# if reward == 0:
# break
dones = np.full_like(rewards, False, dtype=bool)
dones[-1] = True
infos = {
"TimeLimit.truncated": dones.copy()
}
return obs[:len(rewards)], obs_next[:len(rewards)], np.array(rewards), dones, infos
def render(self, mode="human"):
assert self.renderable
return self.curr_obs["agentview_image"][::-1]
if __name__ == "__main__":
shutil.rmtree("render")
os.makedirs("render")
env = PushingEnvironment(1, 2, 10, renderable=True)
env.seed(0)
# buf = HERReplayBuffer(env, total_size=20, buffer_num=1)
obs = env.reset()
# for i in range(3):
# buf.add(Batch(
# obs=[obs],
# obs_next=[obs],
# act=[[0, 0, 0]],
# rew=[-100],
# done=[False if i < 2 else True]
# ))
# actions = [[0, 0, 1]] * 2 + [[0, -1, 0]] * 2 + [[1, 0, -1]] * 2 + [[0, 1, 0]] * 3\
# + [[0, 0, 0]] * 2 + [[1, 0, 0]] * 2 + [[0, 1, -1]] + [[-1, 0, 0]] * 4
for i in tqdm.tqdm(range(300)):
# print(env.env.robots[0]._joint_positions)
img = env.render()
imageio.imwrite(f"render/{i:03}.png", img)
obs_next, rew, done, _ = env.step(env.action_space.sample())
# if i == 17:
# done = True
# buf.add(Batch(
# obs=[obs],
# obs_next=[obs_next],
# act=[actions[i]],
# rew=[rew],
# done=[done]
# ))
obs = obs_next
if done:
# env.seed(i // 30 + 10)
env.reset()
|
[
"robosuite.controllers.load_controller_config",
"numpy.full_like",
"os.makedirs",
"imageio.imwrite",
"gym.spaces.Box",
"numpy.array",
"numpy.random.seed",
"numpy.concatenate",
"shutil.rmtree",
"numpy.set_printoptions"
] |
[((331, 365), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (350, 365), True, 'import numpy as np\n'), ((4148, 4171), 'shutil.rmtree', 'shutil.rmtree', (['"""render"""'], {}), "('render')\n", (4161, 4171), False, 'import shutil\n'), ((4176, 4197), 'os.makedirs', 'os.makedirs', (['"""render"""'], {}), "('render')\n", (4187, 4197), False, 'import os\n'), ((1254, 1292), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'low[:3]', 'high': 'high[:3]'}), '(low=low[:3], high=high[:3])\n', (1264, 1292), False, 'from gym import spaces\n'), ((1327, 1395), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '[12 + 6 * num_obstacles]'}), '(low=-np.inf, high=np.inf, shape=[12 + 6 * num_obstacles])\n', (1337, 1395), False, 'from gym import spaces\n'), ((2996, 3009), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (3004, 3009), True, 'import numpy as np\n'), ((3029, 3047), 'numpy.array', 'np.array', (['obs_next'], {}), '(obs_next)\n', (3037, 3047), True, 'import numpy as np\n'), ((3759, 3799), 'numpy.full_like', 'np.full_like', (['rewards', '(False)'], {'dtype': 'bool'}), '(rewards, False, dtype=bool)\n', (3771, 3799), True, 'import numpy as np\n'), ((4867, 4909), 'imageio.imwrite', 'imageio.imwrite', (['f"""render/{i:03}.png"""', 'img'], {}), "(f'render/{i:03}.png', img)\n", (4882, 4909), False, 'import imageio, tqdm\n'), ((1527, 1547), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1541, 1547), True, 'import numpy as np\n'), ((2259, 2294), 'numpy.concatenate', 'np.concatenate', (['[action, [0, 0, 0]]'], {}), '([action, [0, 0, 0]])\n', (2273, 2294), True, 'import numpy as np\n'), ((3961, 3978), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (3969, 3978), True, 'import numpy as np\n'), ((681, 734), 'robosuite.controllers.load_controller_config', 'load_controller_config', ([], {'default_controller': '"""OSC_POSE"""'}), "(default_controller='OSC_POSE')\n", (703, 734), False, 'from robosuite.controllers import load_controller_config\n')]
|
from __future__ import annotations
import math
from collections import deque
from typing import Optional, Callable
import numpy as np
import pygame
from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, \
PIECE_INDICES, init_zobrist, MoveFlags, GameState
from chess.utils import load_image, load_font
class Chessboard:
"""Chessboard interface (8x8 field)"""
def __init__(self, light_colour="#F0D9B5", dark_colour="#B58863") -> None:
# Board itself
self._board = np.array([Piece.empty()] * 64)
# Active colour
self._active_colour = PieceColour.White
# Castling rights
self._castling_rights = {
PieceColour.White: {
CastlingType.KingSide: False,
CastlingType.QueenSide: False
},
PieceColour.Black: {
CastlingType.KingSide: False,
CastlingType.QueenSide: False
}
}
# Store piece types as strings
self._get_piece_str = {PieceType.Pawn: "pawn",
PieceType.Knight: "knight",
PieceType.Bishop: "bishop",
PieceType.Rook: "rook",
PieceType.Queen: "queen",
PieceType.King: "king"}
# Store piece move validators
self._get_validator: dict[
PieceType, Callable[[int, int, int, int], bool]] \
= {PieceType.Pawn: self._can_pawn_make,
PieceType.Knight: self._can_knight_make,
PieceType.Bishop: self._can_bishop_make,
PieceType.Rook: self._can_rook_make,
PieceType.Queen: self._can_queen_make,
PieceType.King: self._can_king_make}
# En Passant target
self._en_passant_target: Optional[int] = None
# Half-move clock
self._halfmoves = 0
# Init zobrist hash
self._z_table = init_zobrist()
# Board appearance
self._light_colour = pygame.Color(light_colour)
self._dark_colour = pygame.Color(dark_colour)
self._light_complementary = pygame.Color("#DBAB84")
self._dark_complementary = pygame.Color("#DBC095")
self._move_colour = pygame.Color("#8D80AD")
self._bg_colour = pygame.Color("#443742")
self._side = 100 # px
self._font_size = 45
self._font_gap = 15
self._font = load_font("ubuntumono/UbuntuMono-R.ttf", self._font_size)
self._font_colour = pygame.Color("white")
@property
def board(self) -> np.ndarray:
return self._board
@property
def halfmoves(self) -> int:
return self._halfmoves
@property
def active_colour(self) -> PieceColour:
return self._active_colour
@property
def passive_colour(self) -> PieceColour:
return PieceColour.White if self._active_colour == PieceColour.Black else PieceColour.Black
def hash(self) -> int:
h = 0
for i in range(64):
piece = self._board[i]
if piece.Type != PieceType.Empty:
j = PIECE_INDICES[piece.Type.value | piece.Colour.value]
h ^= self._z_table[i][j]
return h
def set_colours(self, light_colour: str, dark_colour: str,
light_complementary: str, dark_complementary: str) -> None:
self._light_colour = pygame.Color(light_colour)
self._dark_colour = pygame.Color(dark_colour)
self._light_complementary = pygame.Color(light_complementary)
self._dark_complementary = pygame.Color(dark_complementary)
def render(self, screen: pygame.Surface,
last_move=None, skip=None, pos=None, game_info=None) -> None:
"""Render chessboard"""
if skip is not None and pos is None:
raise ValueError("skip is not None but pos is None")
screen.fill(self._bg_colour)
group = pygame.sprite.Group()
grabbed_data = None
skip: Optional[tuple[int]]
can_move_now = None if skip is None else self._get_all_piece_moves(skip[0] + skip[1] * 8)
for i, piece in enumerate(self._board):
x, y = i % 8, i // 8
if pos is not None and i in can_move_now:
pygame.draw.rect(screen, self._move_colour,
(x * self._side, y * self._side,
self._side, self._side))
elif last_move is not None and last_move.From == i:
pygame.draw.rect(screen, self._light_complementary,
(x * self._side, y * self._side,
self._side, self._side))
elif last_move is not None and last_move.To == i or (x, y) == skip:
pygame.draw.rect(screen, self._dark_complementary,
(x * self._side, y * self._side,
self._side, self._side))
else:
if (x + y) % 2 == 0:
colour = self._light_colour
else:
colour = self._dark_colour
pygame.draw.rect(screen, colour,
(x * self._side, y * self._side,
self._side, self._side))
if piece.Type == PieceType.Empty:
continue
elif (x, y) == skip:
grabbed_data = f"{self._get_piece_str[piece.Type]}_" \
f"{'w' if piece.Colour == PieceColour.White else 'b'}.png", i, group
else:
PieceSprite(
f"{self._get_piece_str[piece.Type]}_"
f"{'w' if piece.Colour == PieceColour.White else 'b'}"
f".png", i, group)
if grabbed_data is not None:
grabbed_piece = PieceSprite(*grabbed_data)
grabbed_piece.rect.x = pos[0] - 50 # type: ignore
grabbed_piece.rect.y = pos[1] - 50 # type: ignore
group.draw(screen)
text = ["Ход " + ("белых"
if self._active_colour == PieceColour.White
else "чёрных")]
if game_info is not None:
text.extend([f"Оценка: {game_info[0]}",
f"Позиций: {game_info[2]}",
f"Глубина: {game_info[3]}",
f"Время: {game_info[1]}с"])
line_pos = (screen.get_rect().h -
len(text) * (self._font_size + self._font_gap) -
self._font_gap) // 2
for line in text:
line_rendered = self._font.render(line, True, self._font_colour)
l_rect = line_rendered.get_rect()
screen.blit(line_rendered, (800 + (400 - l_rect.w) // 2, line_pos))
line_pos += self._font_size + self._font_gap
def at(self, x: int, y: int) -> Piece:
"""Get piece from position on the board"""
if 0 <= x <= 7 and 0 <= y <= 7:
return self._board[x + y * 8]
return Piece.empty()
def toggle_state(self) -> GameState:
"""Return game state after active colour move"""
other_colour = PieceColour.Black \
if self._active_colour == PieceColour.White \
else PieceColour.White
self._active_colour = other_colour
if self.get_all_moves(other_colour):
return GameState.Continue
elif self.king_is_safe(other_colour):
return GameState.Stalemate
else:
return GameState.Checkmate
def _force_can_make(self, move: Move) -> Optional[Move]:
"""
Check if the move is correct with adding corresponding flags
(!) Without checking king safety and turn order
"""
# Can't make incorrect move
if move.Captured != self._board[move.To]:
return None
this_piece: Piece = self._board[move.From]
other_piece: Piece = self._board[move.To]
# Can't make move w/o piece itself
if this_piece.Type == PieceType.Empty:
return None
# Can't eat pieces of your colour
if other_piece.Type != PieceType.Empty and \
other_piece.Colour == this_piece.Colour:
return None
# Resolving piece xy coordinates to calculate move possibility
y1, y2 = move.From // 8, move.To // 8
x1, x2 = move.From % 8, move.To % 8
# Castling
if this_piece.Type == PieceType.King and \
y1 == y2 and abs(x1 - x2) == 2 \
and move.Captured == Piece.empty():
castling = CastlingType.QueenSide if x1 - x2 == 2 \
else CastlingType.KingSide
if castling == CastlingType.QueenSide and (
self._board[move.To - 1] != Piece.empty() or
self._board[move.From - 1] != Piece.empty() or
self._board[move.From - 2] != Piece.empty()):
return None
elif castling == CastlingType.KingSide and (
self._board[move.From + 1] != Piece.empty() or
self._board[move.From + 2] != Piece.empty()):
return None
if self._castling_rights[this_piece.Colour][castling]:
lost_castling = {castling}
other_side = CastlingType.KingSide \
if castling == CastlingType.QueenSide \
else CastlingType.QueenSide
if self._castling_rights[this_piece.Colour][other_side]:
lost_castling.add(other_side)
move.Flags = MoveFlags(Castling=castling,
LoseCastling=lost_castling)
else:
return None
elif this_piece.Type == PieceType.King:
# Losing castling rights after king move
lost_castling = set()
if self._castling_rights[this_piece.Colour][CastlingType.KingSide]:
lost_castling.add(CastlingType.KingSide)
if self._castling_rights[this_piece.Colour][CastlingType.QueenSide]:
lost_castling.add(CastlingType.QueenSide)
move.Flags = MoveFlags(LoseCastling=lost_castling)
elif this_piece.Type == PieceType.Rook:
# Losing castling rights after rook move
if x1 == 0 and self._castling_rights[this_piece.Colour][CastlingType.QueenSide]:
move.Flags = MoveFlags(LoseCastling={CastlingType.QueenSide})
elif x1 == 7 and self._castling_rights[this_piece.Colour][CastlingType.KingSide]:
move.Flags = MoveFlags(LoseCastling={CastlingType.KingSide})
elif this_piece.Type == PieceType.Pawn and 0 <= move.To <= 7:
move.Flags = MoveFlags(PawnPromotion=PieceType.Queen)
if self._get_validator[this_piece.Type](x1, y1, x2, y2):
return move
return None
def can_make(self, move: Move) -> Optional[Move]:
"""Check if the move is correct"""
# Checking basic move correctness
completed_move = self._force_can_make(move)
if completed_move is not None:
# Can't capture the king
if self._board[move.To].Type == PieceType.King:
return None
# Checking king safety
self.make_move(move)
safety = self.king_is_safe(self._board[move.To].Colour)
self.unmake_move(move)
return completed_move if safety else None
return None
def make_move(self, move: Move) -> None:
"""
Make move on the board
Use board.make_move() to check if move is correct
"""
# Removing castling rights
if move.Flags.LoseCastling is not None:
this_colour = self._board[move.From].Colour
for castling in move.Flags.LoseCastling:
self._castling_rights[this_colour][castling] = False
# Moving piece
self._halfmoves += 1
self._board[move.To] = self._board[move.From]
self._board[move.From] = Piece.empty()
if move.Flags.PawnPromotion is not None:
self._board[move.To] = Piece(move.Flags.PawnPromotion,
self._board[move.To].Colour)
# Doing castling
if move.Flags.Castling is not None:
if move.Flags.Castling == CastlingType.KingSide:
self._board[move.From + 1] = self._board[move.To + 1]
self._board[move.To + 1] = Piece.empty()
else:
self._board[move.From - 1] = self._board[move.To - 2]
self._board[move.To - 2] = Piece.empty()
def unmake_move(self, move: Move) -> None:
"""Unmake move on the board (no additional checking)"""
# Returning castling rights
if move.Flags.LoseCastling is not None:
this_colour = self._board[move.To].Colour
for castling in move.Flags.LoseCastling:
self._castling_rights[this_colour][castling] = True
# Unmoving piece
self._halfmoves -= 1
self._board[move.From] = self._board[move.To]
self._board[move.To] = move.Captured
# Demoting pawn
if move.Flags.PawnPromotion is not None:
self._board[move.From] = Piece(PieceType.Pawn,
self._board[move.From].Colour)
# Undoing castling
if move.Flags.Castling is not None:
if move.Flags.Castling == CastlingType.KingSide:
self._board[move.To + 1] = self._board[move.From + 1]
self._board[move.From + 1] = Piece.empty()
else:
self._board[move.To - 2] = self._board[move.From - 1]
self._board[move.From - 1] = Piece.empty()
def get_all_moves(self, colour: PieceColour, no_castling=False) -> deque[Move]:
moves: deque[Move] = deque()
for i, piece_from in enumerate(self._board):
if piece_from.Type == PieceType.Empty or \
piece_from.Colour != colour:
continue
for j, piece_to in enumerate(self._board):
move = self.can_make(Move(i, j, piece_to))
if move is not None and (not no_castling or move.Flags.Castling is None):
moves.append(move)
return moves
def _get_all_piece_moves(self, pos: int) -> deque[int]:
moves: deque[int] = deque()
for i, piece_to in enumerate(self._board):
move = self.can_make(Move(pos, i, piece_to))
if move is not None:
moves.append(move.To)
return moves
def king_is_safe(self, colour: PieceColour) -> bool:
"""Check if king is safe on current board state"""
king_pos = np.where(self._board == Piece(PieceType.King, colour))[0][0]
king_x, king_y = king_pos % 8, king_pos // 8
right_side = range(king_x + 1, 8)
left_side = range(king_x - 1, -1, -1)
bottom_side = range(king_y + 1, 8)
top_side = range(king_y - 1, -1, -1)
o_colour = PieceColour.White if \
colour == PieceColour.Black else PieceColour.Black
o_pawn = Piece(PieceType.Pawn, o_colour)
o_knight = Piece(PieceType.Knight, o_colour)
o_bishop = Piece(PieceType.Bishop, o_colour)
o_rook = Piece(PieceType.Rook, o_colour)
o_queen = Piece(PieceType.Queen, o_colour)
o_king = Piece(PieceType.King, o_colour)
# Horizontal and vertical
def _line(iter_side: range, const_x: bool) -> bool:
for component in iter_side:
attacking_piece = self.at(king_x, component) \
if const_x \
else self.at(component, king_y)
if attacking_piece.Type != PieceType.Empty:
if attacking_piece == o_rook or \
attacking_piece == o_queen:
return True
return False
return False
if _line(right_side, False) or _line(left_side, False) or \
_line(top_side, True) or _line(bottom_side, True):
return False
# All diagonals
def _diagonal(iter_side_x: range, iter_side_y: range) -> bool:
for x, y in zip(iter_side_x, iter_side_y):
attacking_piece = self.at(x, y)
if attacking_piece.Type != PieceType.Empty:
if attacking_piece == o_bishop or \
attacking_piece == o_queen:
return True
return False
return False
if _diagonal(right_side, bottom_side) or \
_diagonal(left_side, bottom_side) or \
_diagonal(right_side, top_side) or \
_diagonal(left_side, top_side):
return False
# Pawns
sign_ = -1 if colour == PieceColour.White else 1
if self.at(king_x + 1, king_y + sign_) == o_pawn or \
self.at(king_x - 1, king_y + sign_) == o_pawn:
return False
# Knight
if self.at(king_x + 1, king_y + 2) == o_knight or \
self.at(king_x - 1, king_y + 2) == o_knight or \
self.at(king_x + 2, king_y + 1) == o_knight or \
self.at(king_x - 2, king_y + 1) == o_knight or \
self.at(king_x + 1, king_y - 2) == o_knight or \
self.at(king_x - 1, king_y - 2) == o_knight or \
self.at(king_x + 2, king_y - 1) == o_knight or \
self.at(king_x - 2, king_y - 1) == o_knight:
return False
# King
opponent_king_pos = np.where(self._board == o_king)[0][0]
if self._can_king_make(opponent_king_pos % 8,
opponent_king_pos // 8,
king_x, king_y):
return False
return True
def _can_pawn_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if pawn can make move"""
direction = -1 if \
self._board[y1 * 8 + x1].Colour == PieceColour.White \
else 1
to_capture = self._board[y2 * 8 + x2].Type != PieceType.Empty
dx = abs(x2 - x1)
if y2 - y1 == direction and \
((dx == 1 and to_capture) or (dx == 0 and not to_capture)):
return True
return (not to_capture and
(y1 == 1 or y1 == 6) and
y2 - y1 == direction * 2 and
dx == 0 and self._board[y1 * 8 + x1 + direction * 8].Type ==
PieceType.Empty)
@staticmethod
def _can_knight_make(x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if knight can make move"""
dx, dy = abs(x2 - x1), abs(y2 - y1)
return dx == 1 and dy == 2 or dx == 2 and dy == 1
def _can_bishop_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if bishop can make move"""
return (abs(x1 - x2) == abs(y1 - y2)) and self._diagonal_is_free(
x1, y1, x2, y2)
def _can_rook_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if rook can make move"""
return self._horizontal_is_free(x1, y1, x2, y2) \
if y1 == y2 else self._vertical_is_free(x1, y1, x2, y2) \
if x1 == x2 else False
def _can_queen_make(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if queen can make move"""
return \
self._can_bishop_make(x1, y1, x2, y2) or \
self._can_rook_make(x1, y1, x2, y2)
@staticmethod
def _can_king_make(x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if king can make move"""
return (abs(x2 - x1) < 2 and abs(y2 - y1) < 2) or \
(abs(x1 - x2) == 2 and y1 == y2)
def _diagonal_is_free(self, x1: int, y1: int, x2: int, y2: int) -> bool:
"""Check if diagonal is free (not included end points)"""
sign_x = int(math.copysign(1, x2 - x1))
sign_y = int(math.copysign(1, y2 - y1))
for x, y in zip(range(x1 + sign_x, x2, sign_x),
range(y1 + sign_y, y2, sign_y)):
if self._board[y * 8 + x].Type != PieceType.Empty:
return False
return True
def _horizontal_is_free(self, x1: int, y1: int, x2: int, _: int) -> bool:
"""Check if horizontal is free (not included end points)"""
sign = int(math.copysign(1, x2 - x1))
for x in range(x1 + sign, x2, sign):
if self._board[y1 * 8 + x].Type != PieceType.Empty:
return False
return True
def _vertical_is_free(self, x1: int, y1: int, _: int, y2: int) -> bool:
"""Check if vertical is free (not included end points)"""
sign = int(math.copysign(1, y2 - y1))
for y in range(y1 + sign, y2, sign):
if self._board[y * 8 + x1].Type != PieceType.Empty:
return False
return True
@classmethod
def _parse_fen(cls, fen_string: str) -> Chessboard:
"""
Parse FEN string,
use Chessboard.from_fen() instead
"""
# Setup
error_info = f"Invalid FEN string: {fen_string}"
tmp_board = cls()
fen_dict = {"p": PieceType.Pawn,
"n": PieceType.Knight,
"b": PieceType.Bishop,
"r": PieceType.Rook,
"q": PieceType.Queen,
"k": PieceType.King}
fields = fen_string.split()
assert len(fields) == 6, error_info
tmp_position = 0
# Parse First field (Piece Placement)
for sym in fields[0]:
if sym == "/":
assert tmp_position % 8 == 0, error_info
continue
if sym.isdigit():
tmp_position += int(sym)
assert tmp_position < 65, error_info
continue
assert sym.lower() in fen_dict, error_info
clr = PieceColour.White if sym.isupper() else PieceColour.Black
type_ = fen_dict[sym.lower()]
tmp_board._board[tmp_position] = Piece(type_, clr)
tmp_position += 1
assert tmp_position == 64, error_info
# Parse Second Field (Active Color)
if fields[1] == "b":
tmp_board._active_colour = PieceColour.Black
elif fields[1] == "w":
tmp_board._active_colour = PieceColour.White
else:
assert False, error_info
# Parse Third field (Castling Rights)
if fields[2] != "-":
for castling in fields[2]:
if castling.lower() == "q":
tmp_board._castling_rights[
PieceColour.White if castling.isupper()
else PieceColour.Black][CastlingType.QueenSide] = True
elif castling.lower() == "k":
tmp_board._castling_rights[
PieceColour.White if castling.isupper()
else PieceColour.Black][CastlingType.KingSide] = True
else:
assert False, error_info
# Parse Fourth field (Possible En Passant Targets)
alg_cell = fields[3]
if alg_cell != "-":
assert len(alg_cell) == 2, error_info
assert 96 < ord(alg_cell[0]) < 105, error_info
assert alg_cell[1].isdigit() and 0 < int(alg_cell[1]) < 9
tmp_board._en_passant_target = int(
(8 - int(alg_cell[1])) * 8 + ord(alg_cell[0]) - 97)
# Parse Fifth field (Full-move Number)
assert fields[4].isnumeric()
# Parse Sixth field (Half-move Clock)
assert fields[5].isnumeric() and int(fields[5]) >= 0, error_info
tmp_board._halfmoves = int(fields[5])
return tmp_board
@classmethod
def from_fen(cls, fen_string: str) -> Chessboard:
"""Create Chessboard using FEN"""
try:
return cls._parse_fen(fen_string)
except AssertionError as e:
raise ValueError(str(e))
@classmethod
def from_state(cls, state: np.ndarray) -> Chessboard:
"""Create Chessboard using state"""
tmp_board = cls()
tmp_board._board = state
return tmp_board
class PieceSprite(pygame.sprite.Sprite):
"""Piece class for drawing on a board"""
def __init__(self, sprite_img: str, pos: int,
*groups: pygame.sprite.AbstractGroup):
super().__init__(*groups)
self.image = load_image(sprite_img)
self.rect = self.image.get_rect()
self.move_sprite(pos)
def move_sprite(self, position: int) -> None:
self.rect.x = position % 8 * 100 # type: ignore
self.rect.y = position // 8 * 100 # type: ignore
|
[
"chess.utils.load_image",
"collections.deque",
"numpy.where",
"pygame.sprite.Group",
"chess.const.Move",
"chess.const.Piece.empty",
"math.copysign",
"chess.const.MoveFlags",
"pygame.draw.rect",
"pygame.Color",
"chess.utils.load_font",
"chess.const.Piece",
"chess.const.init_zobrist"
] |
[((1993, 2007), 'chess.const.init_zobrist', 'init_zobrist', ([], {}), '()\n', (2005, 2007), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((2064, 2090), 'pygame.Color', 'pygame.Color', (['light_colour'], {}), '(light_colour)\n', (2076, 2090), False, 'import pygame\n'), ((2119, 2144), 'pygame.Color', 'pygame.Color', (['dark_colour'], {}), '(dark_colour)\n', (2131, 2144), False, 'import pygame\n'), ((2181, 2204), 'pygame.Color', 'pygame.Color', (['"""#DBAB84"""'], {}), "('#DBAB84')\n", (2193, 2204), False, 'import pygame\n'), ((2240, 2263), 'pygame.Color', 'pygame.Color', (['"""#DBC095"""'], {}), "('#DBC095')\n", (2252, 2263), False, 'import pygame\n'), ((2292, 2315), 'pygame.Color', 'pygame.Color', (['"""#8D80AD"""'], {}), "('#8D80AD')\n", (2304, 2315), False, 'import pygame\n'), ((2342, 2365), 'pygame.Color', 'pygame.Color', (['"""#443742"""'], {}), "('#443742')\n", (2354, 2365), False, 'import pygame\n'), ((2475, 2532), 'chess.utils.load_font', 'load_font', (['"""ubuntumono/UbuntuMono-R.ttf"""', 'self._font_size'], {}), "('ubuntumono/UbuntuMono-R.ttf', self._font_size)\n", (2484, 2532), False, 'from chess.utils import load_image, load_font\n'), ((2561, 2582), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (2573, 2582), False, 'import pygame\n'), ((3447, 3473), 'pygame.Color', 'pygame.Color', (['light_colour'], {}), '(light_colour)\n', (3459, 3473), False, 'import pygame\n'), ((3502, 3527), 'pygame.Color', 'pygame.Color', (['dark_colour'], {}), '(dark_colour)\n', (3514, 3527), False, 'import pygame\n'), ((3564, 3597), 'pygame.Color', 'pygame.Color', (['light_complementary'], {}), '(light_complementary)\n', (3576, 3597), False, 'import pygame\n'), ((3633, 3665), 'pygame.Color', 'pygame.Color', (['dark_complementary'], {}), '(dark_complementary)\n', (3645, 3665), False, 'import pygame\n'), ((3984, 4005), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (4003, 4005), False, 'import pygame\n'), ((7122, 7135), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (7133, 7135), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((12173, 12186), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (12184, 12186), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((14027, 14034), 'collections.deque', 'deque', ([], {}), '()\n', (14032, 14034), False, 'from collections import deque\n'), ((14570, 14577), 'collections.deque', 'deque', ([], {}), '()\n', (14575, 14577), False, 'from collections import deque\n'), ((15326, 15357), 'chess.const.Piece', 'Piece', (['PieceType.Pawn', 'o_colour'], {}), '(PieceType.Pawn, o_colour)\n', (15331, 15357), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15377, 15410), 'chess.const.Piece', 'Piece', (['PieceType.Knight', 'o_colour'], {}), '(PieceType.Knight, o_colour)\n', (15382, 15410), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15430, 15463), 'chess.const.Piece', 'Piece', (['PieceType.Bishop', 'o_colour'], {}), '(PieceType.Bishop, o_colour)\n', (15435, 15463), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15481, 15512), 'chess.const.Piece', 'Piece', (['PieceType.Rook', 'o_colour'], {}), '(PieceType.Rook, o_colour)\n', (15486, 15512), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15531, 15563), 'chess.const.Piece', 'Piece', (['PieceType.Queen', 'o_colour'], {}), '(PieceType.Queen, o_colour)\n', (15536, 15563), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((15581, 15612), 'chess.const.Piece', 'Piece', (['PieceType.King', 'o_colour'], {}), '(PieceType.King, o_colour)\n', (15586, 15612), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((24726, 24748), 'chess.utils.load_image', 'load_image', (['sprite_img'], {}), '(sprite_img)\n', (24736, 24748), False, 'from chess.utils import load_image, load_font\n'), ((12271, 12331), 'chess.const.Piece', 'Piece', (['move.Flags.PawnPromotion', 'self._board[move.To].Colour'], {}), '(move.Flags.PawnPromotion, self._board[move.To].Colour)\n', (12276, 12331), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((13409, 13461), 'chess.const.Piece', 'Piece', (['PieceType.Pawn', 'self._board[move.From].Colour'], {}), '(PieceType.Pawn, self._board[move.From].Colour)\n', (13414, 13461), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((20155, 20180), 'math.copysign', 'math.copysign', (['(1)', '(x2 - x1)'], {}), '(1, x2 - x1)\n', (20168, 20180), False, 'import math\n'), ((20203, 20228), 'math.copysign', 'math.copysign', (['(1)', '(y2 - y1)'], {}), '(1, y2 - y1)\n', (20216, 20228), False, 'import math\n'), ((20621, 20646), 'math.copysign', 'math.copysign', (['(1)', '(x2 - x1)'], {}), '(1, x2 - x1)\n', (20634, 20646), False, 'import math\n'), ((20968, 20993), 'math.copysign', 'math.copysign', (['(1)', '(y2 - y1)'], {}), '(1, y2 - y1)\n', (20981, 20993), False, 'import math\n'), ((22326, 22343), 'chess.const.Piece', 'Piece', (['type_', 'clr'], {}), '(type_, clr)\n', (22331, 22343), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((4318, 4423), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self._move_colour', '(x * self._side, y * self._side, self._side, self._side)'], {}), '(screen, self._move_colour, (x * self._side, y * self._side,\n self._side, self._side))\n', (4334, 4423), False, 'import pygame\n'), ((8664, 8677), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (8675, 8677), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((9709, 9765), 'chess.const.MoveFlags', 'MoveFlags', ([], {'Castling': 'castling', 'LoseCastling': 'lost_castling'}), '(Castling=castling, LoseCastling=lost_castling)\n', (9718, 9765), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((10287, 10324), 'chess.const.MoveFlags', 'MoveFlags', ([], {'LoseCastling': 'lost_castling'}), '(LoseCastling=lost_castling)\n', (10296, 10324), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((12616, 12629), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (12627, 12629), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((12761, 12774), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (12772, 12774), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((13752, 13765), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (13763, 13765), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((13899, 13912), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (13910, 13912), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((14662, 14684), 'chess.const.Move', 'Move', (['pos', 'i', 'piece_to'], {}), '(pos, i, piece_to)\n', (14666, 14684), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((17840, 17871), 'numpy.where', 'np.where', (['(self._board == o_king)'], {}), '(self._board == o_king)\n', (17848, 17871), True, 'import numpy as np\n'), ((526, 539), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (537, 539), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((4567, 4680), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self._light_complementary', '(x * self._side, y * self._side, self._side, self._side)'], {}), '(screen, self._light_complementary, (x * self._side, y *\n self._side, self._side, self._side))\n', (4583, 4680), False, 'import pygame\n'), ((14309, 14329), 'chess.const.Move', 'Move', (['i', 'j', 'piece_to'], {}), '(i, j, piece_to)\n', (14313, 14329), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((4840, 4952), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self._dark_complementary', '(x * self._side, y * self._side, self._side, self._side)'], {}), '(screen, self._dark_complementary, (x * self._side, y *\n self._side, self._side, self._side))\n', (4856, 4952), False, 'import pygame\n'), ((5204, 5299), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'colour', '(x * self._side, y * self._side, self._side, self._side)'], {}), '(screen, colour, (x * self._side, y * self._side, self.\n _side, self._side))\n', (5220, 5299), False, 'import pygame\n'), ((8890, 8903), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (8901, 8903), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((8957, 8970), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (8968, 8970), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((9024, 9037), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (9035, 9037), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((10548, 10596), 'chess.const.MoveFlags', 'MoveFlags', ([], {'LoseCastling': '{CastlingType.QueenSide}'}), '(LoseCastling={CastlingType.QueenSide})\n', (10557, 10596), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((10863, 10903), 'chess.const.MoveFlags', 'MoveFlags', ([], {'PawnPromotion': 'PieceType.Queen'}), '(PawnPromotion=PieceType.Queen)\n', (10872, 10903), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((14938, 14967), 'chess.const.Piece', 'Piece', (['PieceType.King', 'colour'], {}), '(PieceType.King, colour)\n', (14943, 14967), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((9175, 9188), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (9186, 9188), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((9242, 9255), 'chess.const.Piece.empty', 'Piece.empty', ([], {}), '()\n', (9253, 9255), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n'), ((10720, 10767), 'chess.const.MoveFlags', 'MoveFlags', ([], {'LoseCastling': '{CastlingType.KingSide}'}), '(LoseCastling={CastlingType.KingSide})\n', (10729, 10767), False, 'from chess.const import PieceType, PieceColour, Piece, CastlingType, Move, PIECE_INDICES, init_zobrist, MoveFlags, GameState\n')]
|
import os
os.environ['PREFECT__LOGGING__LEVEL'] = 'DEBUG'
os.environ['DJANGO_ALLOW_ASYNC_UNSAFE'] = 'true'
from prefect import flow, task
import numpy as np
import pandas as pd
from django_pandas.io import read_frame
import helpers
@task
def insert_session(session_id):
from django_connect import connect
connect()
import db.models as d
session = helpers.get_session(session_id)
d.StimulusPresentation.objects.filter(session_id=session_id).delete()
# stimulus types
stim_types = read_frame(d.StimulusType.objects.all())
# stimulus presentations
stim_table = session.stimulus_presentations
stim_table = stim_table.replace({'null':None})
for k in ['phase','size','spatial_frequency']:
stim_table[k] = stim_table[k].apply(helpers.clean_string)
stim_table = stim_table.reset_index()
stim_table = stim_table.merge(stim_types.reset_index(), left_on='stimulus_name', right_on='name', how='left')
stim_table = stim_table.rename(columns={'id':'stimulus_type_id'}).drop(columns=['stimulus_name','name','index'])
stim_table['session_id'] = pd.Series([session.ecephys_session_id]*len(stim_table))
stim_table = stim_table.fillna(np.nan).replace({np.nan:None})
d.StimulusPresentation.objects.bulk_create([ d.StimulusPresentation(**v) for v in stim_table.to_dict(orient='records')])
@task
def list_units(session_id):
from django_connect import connect
connect()
import db.models as d
units = d.Unit.objects.filter(channel__session_probe__session_id=session_id)
return [ int(u.id) for u in units ]
@task
def insert_spike_times(session_id, unit_id):
from django_connect import connect
connect()
import db.models as d
print(f"insert_spike_times: session {session_id}, unit {unit_id}")
st = d.UnitSpikeTimes.objects.filter(unit_id=unit_id).delete()
session = helpers.get_session(session_id)
if unit_id in session.spike_times:
unit_spike_times = session.spike_times[unit_id]
st = d.UnitSpikeTimes(unit_id=unit_id, spike_times=list(unit_spike_times))
st.save()
@task
def insert_trial_spike_counts(unit_id):
from django_connect import connect
connect()
import db.models as d
d.TrialSpikeCounts.objects.filter(unit_id=unit_id).delete()
unit = d.Unit.objects.get(pk=unit_id)
session = unit.channel.session_probe.session
stim_table = d.StimulusPresentation.objects.filter(session=session)
stim_table = read_frame(stim_table)
unit_table = d.Unit.objects.filter(channel__session_probe__session=session)
duration = stim_table.stop_time-stim_table.start_time
spike_times = d.UnitSpikeTimes.objects.filter(unit=unit)
if len(spike_times) == 0:
return
spike_times = np.array(spike_times.first().spike_times)
count = helpers.spike_count(stim_table.start_time,stim_table.stop_time,spike_times)
this_df = pd.DataFrame(data = {
'unit_id':int(unit.id),
'stimulus_id':stim_table.id.values.astype(int),
'spike_count':count,
'spike_rate':np.divide(count,duration)
})
d.TrialSpikeCounts.objects.bulk_create([d.TrialSpikeCounts(**v) for v in this_df.to_dict(orient='records')])
@flow(name="spikes")
def spike_flow(session_id):
r0 = insert_session(session_id)
unit_ids = list_units(session_id, wait_for=[r0])
for unit_id in unit_ids.wait().result():
r1 = insert_spike_times(session_id=session_id, unit_id=unit_id)
insert_trial_spike_counts(unit_id=unit_id, wait_for=[r1])
if __name__ == "__main__":
spike_flow(session_id=732592105)
|
[
"helpers.spike_count",
"db.models.TrialSpikeCounts",
"db.models.StimulusPresentation.objects.filter",
"prefect.flow",
"db.models.StimulusPresentation",
"db.models.UnitSpikeTimes.objects.filter",
"django_pandas.io.read_frame",
"django_connect.connect",
"helpers.get_session",
"db.models.Unit.objects.filter",
"db.models.StimulusType.objects.all",
"db.models.TrialSpikeCounts.objects.filter",
"db.models.Unit.objects.get",
"numpy.divide"
] |
[((3224, 3243), 'prefect.flow', 'flow', ([], {'name': '"""spikes"""'}), "(name='spikes')\n", (3228, 3243), False, 'from prefect import flow, task\n'), ((317, 326), 'django_connect.connect', 'connect', ([], {}), '()\n', (324, 326), False, 'from django_connect import connect\n'), ((368, 399), 'helpers.get_session', 'helpers.get_session', (['session_id'], {}), '(session_id)\n', (387, 399), False, 'import helpers\n'), ((1433, 1442), 'django_connect.connect', 'connect', ([], {}), '()\n', (1440, 1442), False, 'from django_connect import connect\n'), ((1482, 1550), 'db.models.Unit.objects.filter', 'd.Unit.objects.filter', ([], {'channel__session_probe__session_id': 'session_id'}), '(channel__session_probe__session_id=session_id)\n', (1503, 1550), True, 'import db.models as d\n'), ((1688, 1697), 'django_connect.connect', 'connect', ([], {}), '()\n', (1695, 1697), False, 'from django_connect import connect\n'), ((1878, 1909), 'helpers.get_session', 'helpers.get_session', (['session_id'], {}), '(session_id)\n', (1897, 1909), False, 'import helpers\n'), ((2198, 2207), 'django_connect.connect', 'connect', ([], {}), '()\n', (2205, 2207), False, 'from django_connect import connect\n'), ((2311, 2341), 'db.models.Unit.objects.get', 'd.Unit.objects.get', ([], {'pk': 'unit_id'}), '(pk=unit_id)\n', (2329, 2341), True, 'import db.models as d\n'), ((2408, 2462), 'db.models.StimulusPresentation.objects.filter', 'd.StimulusPresentation.objects.filter', ([], {'session': 'session'}), '(session=session)\n', (2445, 2462), True, 'import db.models as d\n'), ((2480, 2502), 'django_pandas.io.read_frame', 'read_frame', (['stim_table'], {}), '(stim_table)\n', (2490, 2502), False, 'from django_pandas.io import read_frame\n'), ((2521, 2583), 'db.models.Unit.objects.filter', 'd.Unit.objects.filter', ([], {'channel__session_probe__session': 'session'}), '(channel__session_probe__session=session)\n', (2542, 2583), True, 'import db.models as d\n'), ((2662, 2704), 'db.models.UnitSpikeTimes.objects.filter', 'd.UnitSpikeTimes.objects.filter', ([], {'unit': 'unit'}), '(unit=unit)\n', (2693, 2704), True, 'import db.models as d\n'), ((2825, 2902), 'helpers.spike_count', 'helpers.spike_count', (['stim_table.start_time', 'stim_table.stop_time', 'spike_times'], {}), '(stim_table.start_time, stim_table.stop_time, spike_times)\n', (2844, 2902), False, 'import helpers\n'), ((525, 553), 'db.models.StimulusType.objects.all', 'd.StimulusType.objects.all', ([], {}), '()\n', (551, 553), True, 'import db.models as d\n'), ((405, 465), 'db.models.StimulusPresentation.objects.filter', 'd.StimulusPresentation.objects.filter', ([], {'session_id': 'session_id'}), '(session_id=session_id)\n', (442, 465), True, 'import db.models as d\n'), ((1279, 1306), 'db.models.StimulusPresentation', 'd.StimulusPresentation', ([], {}), '(**v)\n', (1301, 1306), True, 'import db.models as d\n'), ((1805, 1853), 'db.models.UnitSpikeTimes.objects.filter', 'd.UnitSpikeTimes.objects.filter', ([], {'unit_id': 'unit_id'}), '(unit_id=unit_id)\n', (1836, 1853), True, 'import db.models as d\n'), ((2239, 2289), 'db.models.TrialSpikeCounts.objects.filter', 'd.TrialSpikeCounts.objects.filter', ([], {'unit_id': 'unit_id'}), '(unit_id=unit_id)\n', (2272, 2289), True, 'import db.models as d\n'), ((3153, 3176), 'db.models.TrialSpikeCounts', 'd.TrialSpikeCounts', ([], {}), '(**v)\n', (3171, 3176), True, 'import db.models as d\n'), ((3075, 3101), 'numpy.divide', 'np.divide', (['count', 'duration'], {}), '(count, duration)\n', (3084, 3101), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 08:04:22 2018
@author: af5u13
"""
import numpy as np
import os
from .geometry_functions import deg2rad, sph2cart
from loudspeakerconfig import createArrayConfigFile
def createArrayConfigFromSofa( sofaFile, xmlFile = None, lspLabels = None, twoDSetup = False, virtualLoudspeakers = [] ):
"""
Create a loudspeaker configuraton file from a SOFA file containing a number of emitters representing loudspeakers.
Parameters
==========
sofaFile: string
A file path to a SOFA file.
xmlFile: string, optional
Path of the XML output file to be written.
Optional argument, if not provided, the SOFA file path is used with the extension replaced by ".xml"
lspLabels: list of strings, optional
List of loudspeaker labels, must match the number of emitters in the SOFA files.
If not provided, numbered labels are automatically generated.
twoDSetup: bool, optional
Flag specifying whether the aray is to be considered plane (True) or 3D (False).
Optional value, dafault is False (3D).
virtualLoudspeakers: list, optional
A list of virtual loudspeakers to be added to the setup. Each entry must be a Python dict as decribed
in the function :py:meth:`loudspeakerconfig.createArrayConfigFile`.
"""
import h5py # Import in the function to avoid a global dependency.
if not os.path.exists( sofaFile ):
raise ValueError( "SOFA file does not exist.")
if xmlFile is None:
xmlFile = os.path.basename(sofaFile) + '.xml'
fh = h5py.File( sofaFile )
ep =fh.get('EmitterPosition')
emitterCoordSystem = ep.attrs['Type'] # This is a required attribute.
emitterCoordSystem = emitterCoordSystem.decode("utf-8") # make it a string.
if emitterCoordSystem == "spherical":
posSph = np.squeeze( np.asarray(ep) )
posSph[:,0] = deg2rad( posSph[:,0] )
posSph[:,1] = deg2rad( posSph[:,1] )
posCart = sph2cart( posSph[:,0], posSph[:,1], posSph[:,2] )
else:
posCart = np.squeeze( np.asarray(ep) )
if twoDSetup:
posCart = posCart[:,0:2]
createArrayConfigFile( xmlFile,
posCart.T,
loudspeakerLabels = lspLabels,
twoDconfig = twoDSetup,
sphericalPositions = True,
virtualLoudspeakers = virtualLoudspeakers )
fh.close()
|
[
"os.path.exists",
"numpy.asarray",
"h5py.File",
"loudspeakerconfig.createArrayConfigFile",
"os.path.basename"
] |
[((1586, 1605), 'h5py.File', 'h5py.File', (['sofaFile'], {}), '(sofaFile)\n', (1595, 1605), False, 'import h5py\n'), ((2157, 2324), 'loudspeakerconfig.createArrayConfigFile', 'createArrayConfigFile', (['xmlFile', 'posCart.T'], {'loudspeakerLabels': 'lspLabels', 'twoDconfig': 'twoDSetup', 'sphericalPositions': '(True)', 'virtualLoudspeakers': 'virtualLoudspeakers'}), '(xmlFile, posCart.T, loudspeakerLabels=lspLabels,\n twoDconfig=twoDSetup, sphericalPositions=True, virtualLoudspeakers=\n virtualLoudspeakers)\n', (2178, 2324), False, 'from loudspeakerconfig import createArrayConfigFile\n'), ((1414, 1438), 'os.path.exists', 'os.path.exists', (['sofaFile'], {}), '(sofaFile)\n', (1428, 1438), False, 'import os\n'), ((1540, 1566), 'os.path.basename', 'os.path.basename', (['sofaFile'], {}), '(sofaFile)\n', (1556, 1566), False, 'import os\n'), ((1869, 1883), 'numpy.asarray', 'np.asarray', (['ep'], {}), '(ep)\n', (1879, 1883), True, 'import numpy as np\n'), ((2084, 2098), 'numpy.asarray', 'np.asarray', (['ep'], {}), '(ep)\n', (2094, 2098), True, 'import numpy as np\n')]
|
import os
import zipfile
from typing import List, Tuple, Dict
import numpy as np
import pandas as pd
import requests
import structlog
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from tensorflow import keras, one_hot
from tensorflow.keras import layers
from tensorflow.keras.callbacks import CSVLogger
from tensorflow.keras.layers import ReLU
plt.rcParams.update({'figure.figsize': (16.0, 12.0)})
_LOGGER = structlog.get_logger(__file__)
HEADER_COLUMN = 12
LABEL_COLUMN = 'False Warning'
TEXT_COLUMN = 'Text'
def download_file(url: str, local_dir: str = '.', local_filename: str = '') -> str:
"""
Downloads a file from a provided url to a local directory
:param url: URL to download the file from
:param local_dir: Local directory to download the file to (created if it does not exist)
:param local_filename: What to name the file when saved
(if empty or none, assume the name of the original name of the file)
:return: the name of the file which was saved
"""
os.makedirs(f'{local_dir}', exist_ok=True)
local_filename = local_filename if local_filename else url.split('/')[-1]
if os.path.exists(f'{local_dir}/{local_filename}'):
_LOGGER.info(f'{local_dir}/{local_filename} already exists. Skipping download.')
else:
_LOGGER.info(f"Downloading file from {url} to {local_dir}/{local_filename}.")
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(f'./{local_dir}/{local_filename}', 'wb') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
_LOGGER.info(f"Finished saving file from {url} to {local_dir}/{local_filename}.")
return f'{local_dir}/{local_filename}'
def unzip_file(path_to_zip_file: str, dir_to_extract_to: str) -> str:
"""
Unzips a zip file to a provided directory
:param path_to_file: path to zip file
:param dir_to_extract_to: directory to extract zip file
:return: full path to unzipped file (assuming there is only one)
"""
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
zip_ref.extractall(dir_to_extract_to)
return f'{dir_to_extract_to}/{zip_ref.namelist()[0]}'
def load_data(path_to_file: str) -> pd.DataFrame:
"""
Loads excel data from a supplied path into a Pandas dataframe
:param path_to_file: path to excel file
:return: Pandas dataframe containing contents of excel spreadsheet
"""
_LOGGER.info(f"Started loading the excel data from {path_to_file} into a dataframe - this may take a while. "
f"You may want to grab a coffee.")
df = pd.read_excel(path_to_file, engine='openpyxl', header=HEADER_COLUMN)
_LOGGER.info(f"Finished loading the excel data from {path_to_file} into a dataframe.")
return df
def vectorize(df: pd.DataFrame, **kwargs) -> Tuple[np.array, List[str]]:
_LOGGER.info("Converting text to feature matrix")
vectorizer = TfidfVectorizer(**kwargs)
sparse_matrix = vectorizer.fit_transform(df[TEXT_COLUMN])
feature_matrix = sparse_matrix.todense()
return feature_matrix, vectorizer.get_feature_names()
def extract_and_encode_labels(df: pd.DataFrame) -> Tuple[np.array, Dict[str, int]]:
label_mapping = dict((label, i) for i, label in enumerate(df[LABEL_COLUMN].unique()))
labels = list(df[LABEL_COLUMN].map(label_mapping))
return np.array(labels), label_mapping
if __name__ == "__main__":
local_dir = './data'
compute_features = not os.path.exists(f'{local_dir}/feature_data.csv')
model_type = "knn"
if compute_features:
# download the file
path_to_downloaded_zip_file = download_file(
'https://www.fire.tc.faa.gov/zip/MasterModelVersion3DDeliverable.zip',
local_dir)
# unzip the file
path_to_file = unzip_file(path_to_downloaded_zip_file, local_dir)
# load the file into a Pandas dataframe
df = load_data(path_to_file)
# save preprocessed data to save time for future runs
df.to_csv(f'{local_dir}/feature_data.csv')
else:
# don't go through the hassle of preprocessing if we already have the preprocessed data saved
df = pd.read_csv(f'{local_dir}/feature_data.csv')
count_of_no_text = len(df[df[TEXT_COLUMN].isnull()])
df = df.dropna(subset=[TEXT_COLUMN])
_LOGGER.info(f"Dropped {count_of_no_text} records because {TEXT_COLUMN} was null or NaN")
count_of_null_labels = len(df[df[LABEL_COLUMN].isnull()])
df = df.dropna(subset=[LABEL_COLUMN])
_LOGGER.info(f"Dropped {count_of_null_labels} records because {LABEL_COLUMN} was null or NaN")
# create a sparse feature matrix of size n x m,
# where n = number of documents, m = number of words in vocabulary
feature_matrix, feature_names = vectorize(df, min_df=0.001)
labels, label_mapping = extract_and_encode_labels(df)
num_labels = len(label_mapping)
num_features = feature_matrix.shape[1]
X_train, X_test, y_train, y_test = train_test_split(feature_matrix, labels, test_size=0.05, random_state=1)
_LOGGER.info(f"Training on {X_train.shape[0]} samples, validating on {X_test.shape[0]} samples.")
_LOGGER.info(f"Number of features: {num_features}")
if model_type == "mlp":
labels = one_hot(np.array(labels), len(label_mapping))
inputs = keras.Input(shape=(num_features,))
layer_1 = layers.Dense(8192, activation=ReLU())(inputs)
layer_2 = layers.Dense(2048, activation=ReLU())(layer_1)
layer_3 = layers.Dense(512, activation=ReLU())(layer_2)
layer_4 = layers.Dense(128, activation=ReLU())(layer_3)
layer_5 = layers.Dense(32, activation=ReLU())(layer_4)
layer_6 = layers.Dense(8, activation=ReLU())(layer_5)
outputs = layers.Dense(num_labels, activation="softmax")(layer_6)
model = keras.Model(inputs=inputs, outputs=outputs)
_LOGGER.info(model.summary())
model.compile(
optimizer=keras.optimizers.Adamax(), # Optimizer
loss=keras.losses.CategoricalCrossentropy(), # Loss function to minimize
metrics=[keras.metrics.Accuracy()] # List of metrics to monitor
)
model.fit(X_train, y_train,
validation_data=(X_test, y_test), shuffle=True, epochs=200, batch_size=64,
callbacks=[CSVLogger('./results.csv')])
model.save('model')
elif model_type == "rf":
rf = RandomForestClassifier(n_jobs=-1)
rf.fit(X_train, y_train)
training_acc = rf.score(X_train, y_train)
validation_acc = rf.score(X_test, y_test)
_LOGGER.info(f"Training accuracy with Random Forest: {training_acc}")
_LOGGER.info(f"Validation accuracy with Random Forest: {validation_acc}")
elif model_type == "knn":
knn = KNeighborsClassifier(n_neighbors=5, n_jobs=-1)
knn.fit(X_train, y_train)
training_acc = knn.score(X_train, y_train)
validation_acc = knn.score(X_test, y_test)
_LOGGER.info(f"Training accuracy with kNN: {training_acc}")
_LOGGER.info(f"Validation accuracy with kNN: {validation_acc}")
|
[
"zipfile.ZipFile",
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.array",
"tensorflow.keras.layers.Dense",
"pandas.read_excel",
"tensorflow.keras.losses.CategoricalCrossentropy",
"os.path.exists",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.ReLU",
"sklearn.ensemble.RandomForestClassifier",
"requests.get",
"tensorflow.keras.Input",
"structlog.get_logger",
"tensorflow.keras.optimizers.Adamax",
"tensorflow.keras.metrics.Accuracy",
"os.makedirs",
"tensorflow.keras.callbacks.CSVLogger",
"matplotlib.pyplot.rcParams.update",
"sklearn.feature_extraction.text.TfidfVectorizer",
"tensorflow.keras.Model"
] |
[((548, 601), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.figsize': (16.0, 12.0)}"], {}), "({'figure.figsize': (16.0, 12.0)})\n", (567, 601), True, 'import matplotlib.pyplot as plt\n'), ((612, 642), 'structlog.get_logger', 'structlog.get_logger', (['__file__'], {}), '(__file__)\n', (632, 642), False, 'import structlog\n'), ((1205, 1247), 'os.makedirs', 'os.makedirs', (['f"""{local_dir}"""'], {'exist_ok': '(True)'}), "(f'{local_dir}', exist_ok=True)\n", (1216, 1247), False, 'import os\n'), ((1333, 1380), 'os.path.exists', 'os.path.exists', (['f"""{local_dir}/{local_filename}"""'], {}), "(f'{local_dir}/{local_filename}')\n", (1347, 1380), False, 'import os\n'), ((2845, 2913), 'pandas.read_excel', 'pd.read_excel', (['path_to_file'], {'engine': '"""openpyxl"""', 'header': 'HEADER_COLUMN'}), "(path_to_file, engine='openpyxl', header=HEADER_COLUMN)\n", (2858, 2913), True, 'import pandas as pd\n'), ((3165, 3190), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '(**kwargs)\n', (3180, 3190), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5228, 5300), 'sklearn.model_selection.train_test_split', 'train_test_split', (['feature_matrix', 'labels'], {'test_size': '(0.05)', 'random_state': '(1)'}), '(feature_matrix, labels, test_size=0.05, random_state=1)\n', (5244, 5300), False, 'from sklearn.model_selection import train_test_split\n'), ((2262, 2300), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_to_zip_file', '"""r"""'], {}), "(path_to_zip_file, 'r')\n", (2277, 2300), False, 'import zipfile\n'), ((3598, 3614), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3606, 3614), True, 'import numpy as np\n'), ((3712, 3759), 'os.path.exists', 'os.path.exists', (['f"""{local_dir}/feature_data.csv"""'], {}), "(f'{local_dir}/feature_data.csv')\n", (3726, 3759), False, 'import os\n'), ((4420, 4464), 'pandas.read_csv', 'pd.read_csv', (['f"""{local_dir}/feature_data.csv"""'], {}), "(f'{local_dir}/feature_data.csv')\n", (4431, 4464), True, 'import pandas as pd\n'), ((5569, 5603), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(num_features,)'}), '(shape=(num_features,))\n', (5580, 5603), False, 'from tensorflow import keras, one_hot\n'), ((6077, 6120), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (6088, 6120), False, 'from tensorflow import keras, one_hot\n'), ((1580, 1610), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (1592, 1610), False, 'import requests\n'), ((5514, 5530), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5522, 5530), True, 'import numpy as np\n'), ((6004, 6050), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (6016, 6050), False, 'from tensorflow.keras import layers\n'), ((6674, 6707), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (6696, 6707), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((6204, 6229), 'tensorflow.keras.optimizers.Adamax', 'keras.optimizers.Adamax', ([], {}), '()\n', (6227, 6229), False, 'from tensorflow import keras, one_hot\n'), ((6261, 6299), 'tensorflow.keras.losses.CategoricalCrossentropy', 'keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (6297, 6299), False, 'from tensorflow import keras, one_hot\n'), ((7045, 7091), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)', 'n_jobs': '(-1)'}), '(n_neighbors=5, n_jobs=-1)\n', (7065, 7091), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((5652, 5658), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5656, 5658), False, 'from tensorflow.keras.layers import ReLU\n'), ((5716, 5722), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5720, 5722), False, 'from tensorflow.keras.layers import ReLU\n'), ((5780, 5786), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5784, 5786), False, 'from tensorflow.keras.layers import ReLU\n'), ((5844, 5850), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5848, 5850), False, 'from tensorflow.keras.layers import ReLU\n'), ((5907, 5913), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5911, 5913), False, 'from tensorflow.keras.layers import ReLU\n'), ((5969, 5975), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (5973, 5975), False, 'from tensorflow.keras.layers import ReLU\n'), ((6351, 6375), 'tensorflow.keras.metrics.Accuracy', 'keras.metrics.Accuracy', ([], {}), '()\n', (6373, 6375), False, 'from tensorflow import keras, one_hot\n'), ((6575, 6601), 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', (['"""./results.csv"""'], {}), "('./results.csv')\n", (6584, 6601), False, 'from tensorflow.keras.callbacks import CSVLogger\n')]
|
#!/usr/bin/env python
import os,sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import argparse
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
from PIL import Image
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-s', '--scenario', default='simple.py', help='Path of the scenario Python script.')
args = parser.parse_args()
# load scenario from script
scenario = scenarios.load(args.scenario).Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, info_callback=None, shared_viewer = False)
# render call to create viewer window (necessary only for interactive policies)
env.render()
# execution loop
obs_n = env.reset()
DISCOUNT = 0.99
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 64 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '2x256'
MIN_REWARD = 20 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 200
# Exploration settings
epsilon = 1 # not a constant, going to be decayed
EPSILON_DECAY = 0.99975
MIN_EPSILON = 0.001
# Stats settings
AGGREGATE_STATS_EVERY = 50 # episodes
SHOW_PREVIEW = False
# For stats
ep_rewards = [[-200],[-200],[-200]]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# Memory fraction, used mostly when trai8ning multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self,i):
self.index=i
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}-{}".format(MODEL_NAME, self.index,int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=(10, 10, 3))) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(5, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0] for transition in minibatch])/255
current_qs_list = self.model.predict(current_states)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states)
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
d = {1: (100, 0, 0),
2: (0, 100, 0),
3: (0, 0, 100),
4: (25,25,25)}
def getobs(obsn):
env = np.zeros((10, 10, 3), dtype=np.uint8) # starts an rbg of our size
obs=obsn.copy()
for i in obs:
i=int((i+1)/0.2)
env[int(obs[0])][int(obs[1])][0]+=100 # sets the food location tile to green color
env[int(obs[2])][int(obs[3])][1]+=100
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
img=np.array(img)
return img
def getobsi(obsn):
env = np.zeros((10, 10, 3), dtype=np.uint8) # starts an rbg of our size
obs=obsn.copy()
for i in obs:
i=int((i+1)/0.2)
env[int(obs[2])][int(obs[3])][0]+=100 # sets the food location tile to green color
env[int(obs[4])][int(obs[5])][1]+=100 # sets the enemy location to red
env[int(obs[6])][int(obs[7])][2] +=100 # sets the player tile to blue
env[int(obs[8])][int(obs[9])][0] +=25
env[int(obs[8])][int(obs[9])][1] +=25
env[int(obs[8])][int(obs[9])][2] +=25
env[int(obs[10])][int(obs[11])][0] +=25
env[int(obs[10])][int(obs[11])][1] +=25
env[int(obs[10])][int(obs[11])][2] +=25
env[int(obs[12])][int(obs[13])][0] +=25
env[int(obs[12])][int(obs[13])][1] +=25
env[int(obs[12])][int(obs[13])][2] +=25
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
img=np.array(img)
return img
# create interactive policies for each agent
policies = [DQNAgent(i) for i in range(env.n)]
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
episode_reward=[0,0,0]
step=1
for i, policy in enumerate(policies):
policy.tensorboard.step=episode
# query for action from each agent's policy
obs_n=env.reset()
done = False
while not done:
act_n = []
action_n=[]
for i, policy in enumerate(policies):
act = np.zeros(5)
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(policy.get_qs(getobs(obs_n[i])))
else:
# Get random action
action = np.random.randint(0, 5)
act[action]+=1.0
action_n.append(action)
act_n.append(act)
# step environment
newobs_n, reward_n, done_n, _ = env.step(act_n)
if step>=100:
done=True
for i, policy in enumerate(policies):
episode_reward[i]+=reward_n[i]
policy.update_replay_memory((getobs(obs_n[i]), action_n[i], reward_n[i], getobs(newobs_n[i]), done))
policy.train(done, step)
obs_n=newobs_n
step+=1
#if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
if episode % 50==1:
env.render()
for i, policy in enumerate(policies):
ep_rewards[i].append(episode_reward[i])
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[i][-AGGREGATE_STATS_EVERY:])/len(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[i][-AGGREGATE_STATS_EVERY:])
policy.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if min_reward >= MIN_REWARD:
policy.model.save(f'models/{MODEL_NAME+str(policy.index)}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
|
[
"keras.layers.Conv2D",
"numpy.array",
"keras.layers.Activation",
"keras.layers.Dense",
"tensorflow.set_random_seed",
"multiagent.environment.MultiAgentEnv",
"collections.deque",
"argparse.ArgumentParser",
"numpy.random.random",
"numpy.max",
"os.path.isdir",
"numpy.random.seed",
"keras.optimizers.Adam",
"random.sample",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.models.Sequential",
"multiagent.scenarios.load",
"tensorflow.summary.FileWriter",
"keras.layers.Dropout",
"time.time",
"PIL.Image.fromarray",
"os.makedirs",
"os.path.join",
"random.seed",
"numpy.zeros",
"numpy.random.randint"
] |
[((55, 86), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (67, 86), False, 'import os, sys\n'), ((647, 688), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'None'}), '(description=None)\n', (670, 688), False, 'import argparse\n'), ((1017, 1144), 'multiagent.environment.MultiAgentEnv', 'MultiAgentEnv', (['world', 'scenario.reset_world', 'scenario.reward', 'scenario.observation'], {'info_callback': 'None', 'shared_viewer': '(False)'}), '(world, scenario.reset_world, scenario.reward, scenario.\n observation, info_callback=None, shared_viewer=False)\n', (1030, 1144), False, 'from multiagent.environment import MultiAgentEnv\n'), ((2117, 2131), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (2128, 2131), False, 'import random\n'), ((2136, 2153), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2150, 2153), True, 'import numpy as np\n'), ((2158, 2179), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (2176, 2179), True, 'import tensorflow as tf\n'), ((2461, 2484), 'os.path.isdir', 'os.path.isdir', (['"""models"""'], {}), "('models')\n", (2474, 2484), False, 'import os, sys\n'), ((2494, 2515), 'os.makedirs', 'os.makedirs', (['"""models"""'], {}), "('models')\n", (2505, 2515), False, 'import os, sys\n'), ((8475, 8512), 'numpy.zeros', 'np.zeros', (['(10, 10, 3)'], {'dtype': 'np.uint8'}), '((10, 10, 3), dtype=np.uint8)\n', (8483, 8512), True, 'import numpy as np\n'), ((8770, 8797), 'PIL.Image.fromarray', 'Image.fromarray', (['env', '"""RGB"""'], {}), "(env, 'RGB')\n", (8785, 8797), False, 'from PIL import Image\n'), ((8881, 8894), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (8889, 8894), True, 'import numpy as np\n'), ((8959, 8996), 'numpy.zeros', 'np.zeros', (['(10, 10, 3)'], {'dtype': 'np.uint8'}), '((10, 10, 3), dtype=np.uint8)\n', (8967, 8996), True, 'import numpy as np\n'), ((9810, 9837), 'PIL.Image.fromarray', 'Image.fromarray', (['env', '"""RGB"""'], {}), "(env, 'RGB')\n", (9825, 9837), False, 'from PIL import Image\n'), ((9921, 9934), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9929, 9934), True, 'import numpy as np\n'), ((877, 906), 'multiagent.scenarios.load', 'scenarios.load', (['args.scenario'], {}), '(args.scenario)\n', (891, 906), True, 'import multiagent.scenarios as scenarios\n'), ((2833, 2868), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.log_dir'], {}), '(self.log_dir)\n', (2854, 2868), True, 'import tensorflow as tf\n'), ((4104, 4136), 'collections.deque', 'deque', ([], {'maxlen': 'REPLAY_MEMORY_SIZE'}), '(maxlen=REPLAY_MEMORY_SIZE)\n', (4109, 4136), False, 'from collections import deque\n'), ((4497, 4509), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4507, 4509), False, 'from keras.models import Sequential\n'), ((5911, 5960), 'random.sample', 'random.sample', (['self.replay_memory', 'MINIBATCH_SIZE'], {}), '(self.replay_memory, MINIBATCH_SIZE)\n', (5924, 5960), False, 'import random\n'), ((4532, 4576), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'input_shape': '(10, 10, 3)'}), '(256, (3, 3), input_shape=(10, 10, 3))\n', (4538, 4576), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4661, 4679), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4671, 4679), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4703, 4733), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4715, 4733), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4757, 4769), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4764, 4769), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4798, 4817), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {}), '(256, (3, 3))\n', (4804, 4817), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4841, 4859), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4851, 4859), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4883, 4913), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4895, 4913), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4937, 4949), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4944, 4949), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((4978, 4987), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4985, 4987), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((5070, 5079), 'keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (5075, 5079), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((5108, 5137), 'keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""linear"""'}), "(5, activation='linear')\n", (5113, 5137), False, 'from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten\n'), ((6077, 6130), 'numpy.array', 'np.array', (['[transition[0] for transition in minibatch]'], {}), '([transition[0] for transition in minibatch])\n', (6085, 6130), True, 'import numpy as np\n'), ((6411, 6464), 'numpy.array', 'np.array', (['[transition[3] for transition in minibatch]'], {}), '([transition[3] for transition in minibatch])\n', (6419, 6464), True, 'import numpy as np\n'), ((7549, 7560), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7557, 7560), True, 'import numpy as np\n'), ((10578, 10589), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (10586, 10589), True, 'import numpy as np\n'), ((5231, 5245), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (5235, 5245), False, 'from keras.optimizers import Adam\n'), ((6997, 7026), 'numpy.max', 'np.max', (['future_qs_list[index]'], {}), '(future_qs_list[index])\n', (7003, 7026), True, 'import numpy as np\n'), ((7532, 7543), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (7540, 7543), True, 'import numpy as np\n'), ((10609, 10627), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10625, 10627), True, 'import numpy as np\n'), ((10848, 10871), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (10865, 10871), True, 'import numpy as np\n'), ((4291, 4302), 'time.time', 'time.time', ([], {}), '()\n', (4300, 4302), False, 'import time\n'), ((8271, 8286), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (8279, 8286), True, 'import numpy as np\n'), ((12482, 12493), 'time.time', 'time.time', ([], {}), '()\n', (12491, 12493), False, 'import time\n')]
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np
import torch.nn.functional as F
from attention import AdditiveAttention
class Encoder(nn.Module):
"""Encoder bi-GRU"""
def __init__(self, input_dim, char_embed_dim,
encoder_hidd_dim,
decoder_hidd_dim,
num_layers,
morph_embeddings=None,
fasttext_embeddings=None,
char_padding_idx=0,
word_padding_idx=0,
dropout=0):
super(Encoder, self).__init__()
morph_embeddings_dim = 0
self.morph_embedding_layer = None
fasttext_embeddings_dim = 0
self.fasttext_embedding_layer = None
self.char_embedding_layer = nn.Embedding(input_dim,
char_embed_dim,
padding_idx=char_padding_idx)
if morph_embeddings is not None:
self.morph_embedding_layer = nn.Embedding.from_pretrained(morph_embeddings,
padding_idx=word_padding_idx)
morph_embeddings_dim = morph_embeddings.shape[1]
if fasttext_embeddings is not None:
self.fasttext_embedding_layer = nn.Embedding.from_pretrained(fasttext_embeddings)
fasttext_embeddings_dim = fasttext_embeddings.shape[1]
self.rnn = nn.GRU(input_size=char_embed_dim + morph_embeddings_dim + fasttext_embeddings_dim,
hidden_size=encoder_hidd_dim,
num_layers=num_layers,
batch_first=True,
bidirectional=True,
dropout=dropout if num_layers > 1 else 0.0)
self.linear_map = nn.Linear(encoder_hidd_dim * 2, decoder_hidd_dim)
def forward(self, char_src_seqs, word_src_seqs, src_seqs_lengths):
embedded_seqs = self.char_embedding_layer(char_src_seqs)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim]
# Add morph embeddings to the char embeddings if needed
if self.morph_embedding_layer is not None:
embedded_word_seqs_morph = self.morph_embedding_layer(word_src_seqs)
# embedded_word_seqs_morph shape: [batch_size, max_src_seq_len, morph_embeddings_dim]
embedded_seqs = torch.cat((embedded_seqs, embedded_word_seqs_morph), dim=2)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim + morph_embeddings_dim]
# Add fasttext embeddings to the char embeddings if needed
if self.fasttext_embedding_layer is not None:
embedded_word_seqs_ft = self.fasttext_embedding_layer(word_src_seqs)
# embedded_word_seqs_ft shape: [batch_size, max_src_seq_len, fasttext_embeddings_dim]
embedded_seqs = torch.cat((embedded_seqs, embedded_word_seqs_ft), dim=2)
# embedded_seqs shape: [batch_size, max_src_seq_len, char_embed_dim + fasttext_embeddings_dim]
# packing the embedded_seqs
packed_embedded_seqs = pack_padded_sequence(embedded_seqs, src_seqs_lengths, batch_first=True)
output, hidd = self.rnn(packed_embedded_seqs)
# hidd shape: [num_layers * num_dirs, batch_size, encoder_hidd_dim]
# concatenating the forward and backward vectors for each layer
hidd = torch.cat([hidd[0:hidd.size(0):2], hidd[1:hidd.size(0):2]], dim=2)
# hidd shape: [num layers, batch_size, num_directions * encoder_hidd_dim]
# mapping the encode hidd state to the decoder hidd dim space
hidd = torch.tanh(self.linear_map(hidd))
# unpacking the output
output, lengths = pad_packed_sequence(output, batch_first=True)
# output shape: [batch_size, src_seqs_length, num_dirs * encoder_hidd_dim]
return output, hidd
class Decoder(nn.Module):
"""Decoder GRU
Things to note:
- The input to the decoder rnn at each time step is the
concatenation of the embedded token and the context vector
- The context vector will have a size of batch_size, encoder_hidd_dim * 2
- The prediction layer input is the concatenation of
the context vector and the h_t of the decoder
"""
def __init__(self, input_dim, char_embed_dim,
decoder_hidd_dim, num_layers,
output_dim,
encoder_hidd_dim,
padding_idx=0,
embed_trg_gender=False,
gender_embeddings=None,
gender_input_dim=0,
gender_embed_dim=0,
dropout=0):
super(Decoder, self).__init__()
self.attention = AdditiveAttention(encoder_hidd_dim=encoder_hidd_dim,
decoder_hidd_dim=decoder_hidd_dim)
self.gender_embedding_layer = None
if embed_trg_gender:
if gender_embeddings is None:
self.gender_embedding_layer = nn.Embedding(gender_input_dim, gender_embed_dim)
else:
self.gender_embedding_layer = nn.Embedding.from_pretrained(gender_embeddings)
self.char_embedding_layer = nn.Embedding(input_dim,
char_embed_dim,
padding_idx=padding_idx)
# the input to the rnn is the context_vector + embedded token --> embed_dim + hidd_dim
self.rnn = nn.GRU(input_size=char_embed_dim + encoder_hidd_dim * 2,
hidden_size=decoder_hidd_dim,
num_layers=num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.0)
# the input to the classifier is h_t + context_vector + gender_embed_dim? --> hidd_dim * 2
self.classification_layer = nn.Linear(encoder_hidd_dim * 2
+ decoder_hidd_dim * num_layers
+ gender_embed_dim + char_embed_dim, output_dim)
self.dropout_layer = nn.Dropout(dropout)
def forward(self, trg_seqs, encoder_outputs, decoder_h_t, context_vectors,
attention_mask, trg_gender=None):
# trg_seqs shape: [batch_size]
batch_size = trg_seqs.shape[0]
trg_seqs = trg_seqs.unsqueeze(1)
# trg_seqs shape: [batch_size, 1]
# Step 1: embedding the target seqs
embedded_seqs = self.char_embedding_layer(trg_seqs)
# embedded_seqs shape: [batch_size, 1, embed_dim]
# context_vectors shape: [batch_size, encoder_hidd_dim * 2]
# changing shape to: [batch_size, 1, encoder_hidd_dim * 2]
context_vectors = context_vectors.unsqueeze(1)
# concatenating the embedded trg sequence with the context_vectors
rnn_input = torch.cat((embedded_seqs, context_vectors), dim=2)
# rnn_input shape: [batch_size, 1, embed_dim + encoder_hidd_dim * 2]
# Step 2: feeding the input to the rnn and updating the decoder_h_t
decoder_output, decoder_h_t = self.rnn(rnn_input, decoder_h_t)
# decoder output shape: [batch_size, 1, num_dirs * hidd_dim]
# decoder_h_t shape: [num_layers * num_dirs, batch_size, hidd_dim]
# Step 3: updating the context vectors through attention
context_vectors, atten_scores = self.attention(keys=encoder_outputs,
query=decoder_h_t,
mask=attention_mask)
# Step 4: get the prediction vector
# embed trg gender info if needed
if self.gender_embedding_layer is not None:
embedded_trg_gender = self.gender_embedding_layer(trg_gender)
# embedded_trg_gender shape: [batch_size, gender_embed_dim]
# concatenating decoder_h_t, context_vectors, and the
# embedded_trg_gender to create a prediction vector
if self.rnn.num_layers == 1:
assert decoder_output.squeeze(1).eq(decoder_h_t.view(decoder_h_t.shape[1], -1)).all().item()
predictions_vector = torch.cat((decoder_h_t.view(decoder_h_t.shape[1], -1),
context_vectors, embedded_trg_gender,
embedded_seqs.squeeze(1)), dim=1)
# predictions_vector: [batch_size, hidd_dim + encoder_hidd_dim * 2 + gender_embed_dim]
else:
# concatenating decoder_h_t with context_vectors to
# create a prediction vector
predictions_vector = torch.cat((decoder_h_t.view(decoder_h_t.shape[1], -1),
context_vectors, embedded_seqs.squeeze(1)), dim=1)
# predictions_vector: [batch_size, hidd_dim + encoder_hidd_dim * 2]
# Step 5: feeding the prediction vector to the fc layer
# to a make a prediction
# apply dropout if needed
predictions_vector = self.dropout_layer(predictions_vector)
prediction = self.classification_layer(predictions_vector)
# prediction shape: [batch_size, output_dim]
return prediction, decoder_h_t, atten_scores, context_vectors
class Seq2Seq(nn.Module):
"""Seq2Seq model"""
def __init__(self, encoder_input_dim, encoder_embed_dim,
encoder_hidd_dim, encoder_num_layers,
decoder_input_dim, decoder_embed_dim,
decoder_hidd_dim, decoder_num_layers,
decoder_output_dim,
morph_embeddings=None, fasttext_embeddings=None,
gender_embeddings=None,
embed_trg_gender=False, gender_input_dim=0,
gender_embed_dim=0, char_src_padding_idx=0,
word_src_padding_idx=0, trg_padding_idx=0,
dropout=0, trg_sos_idx=2):
super(Seq2Seq, self).__init__()
self.encoder = Encoder(input_dim=encoder_input_dim,
char_embed_dim=encoder_embed_dim,
encoder_hidd_dim=encoder_hidd_dim,
decoder_hidd_dim=decoder_hidd_dim,
num_layers=encoder_num_layers,
morph_embeddings=morph_embeddings,
fasttext_embeddings=fasttext_embeddings,
char_padding_idx=char_src_padding_idx,
word_padding_idx=word_src_padding_idx,
dropout=dropout)
self.decoder = Decoder(input_dim=decoder_input_dim,
char_embed_dim=decoder_embed_dim,
decoder_hidd_dim=decoder_hidd_dim,
num_layers=decoder_num_layers,
encoder_hidd_dim=encoder_hidd_dim,
output_dim=decoder_input_dim,
padding_idx=trg_padding_idx,
embed_trg_gender=embed_trg_gender,
gender_input_dim=gender_input_dim,
gender_embed_dim=gender_embed_dim,
gender_embeddings=gender_embeddings,
dropout=dropout)
self.char_src_padding_idx = char_src_padding_idx
self.trg_sos_idx = trg_sos_idx
self.sampling_temperature = 3
def create_mask(self, src_seqs, src_padding_idx):
mask = (src_seqs != src_padding_idx)
return mask
def forward(self, char_src_seqs, word_src_seqs, src_seqs_lengths, trg_seqs,
trg_gender=None, teacher_forcing_prob=0.3):
# trg_seqs shape: [batch_size, trg_seqs_length]
# reshaping to: [trg_seqs_length, batch_size]
trg_seqs = trg_seqs.permute(1, 0)
trg_seqs_length, batch_size = trg_seqs.shape
# passing the src to the encoder
encoder_outputs, encoder_hidd = self.encoder(char_src_seqs, word_src_seqs, src_seqs_lengths)
# creating attention masks
attention_mask = self.create_mask(char_src_seqs, self.char_src_padding_idx)
predictions = []
decoder_attention_scores = []
# initializing the trg_seqs to <s> token
y_t = torch.ones(batch_size, dtype=torch.long) * self.trg_sos_idx
# intializing the context_vectors to zero
context_vectors = torch.zeros(batch_size, self.encoder.rnn.hidden_size * 2)
# context_vectors shape: [batch_size, encoder_hidd_dim * 2]
# initializing the hidden state of the decoder to the encoder hidden state
decoder_h_t = encoder_hidd
# decoder_h_t shape: [batch_size, decoder_hidd_dim]
# moving y_t and context_vectors to the right device
y_t = y_t.to(encoder_hidd.device)
context_vectors = context_vectors.to(encoder_hidd.device)
for i in range(0, trg_seqs_length):
teacher_forcing = np.random.random() < teacher_forcing_prob
# if teacher_forcing, use ground truth target tokens
# as an input to the decoder
if teacher_forcing:
y_t = trg_seqs[i]
# do a single decoder step
prediction, decoder_h_t, atten_scores, context_vectors = self.decoder(trg_seqs=y_t,
trg_gender=trg_gender,
encoder_outputs=encoder_outputs,
decoder_h_t=decoder_h_t,
context_vectors=context_vectors,
attention_mask=attention_mask)
# If not teacher force, use the maximum
# prediction as an input to the decoder in
# the next time step
if not teacher_forcing:
# we multiply the predictions with a sampling_temperature
# to make the probablities peakier, so we can be confident about the
# maximum prediction
pred_output_probs = F.softmax(prediction * self.sampling_temperature, dim=1)
y_t = torch.argmax(pred_output_probs, dim=1)
predictions.append(prediction)
decoder_attention_scores.append(atten_scores)
predictions = torch.stack(predictions)
# predictions shape: [trg_seq_len, batch_size, output_dim]
predictions = predictions.permute(1, 0, 2)
# predictions shape: [batch_size, trg_seq_len, output_dim]
decoder_attention_scores = torch.stack(decoder_attention_scores)
# attention_scores_total shape: [trg_seq_len, batch_size, src_seq_len]
decoder_attention_scores = decoder_attention_scores.permute(1, 0, 2)
# attention_scores_total shape: [batch_size, trg_seq_len, src_seq_len]
return predictions, decoder_attention_scores
|
[
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.ones",
"numpy.random.random",
"torch.stack",
"attention.AdditiveAttention",
"torch.argmax",
"torch.cat",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Linear",
"torch.nn.Embedding.from_pretrained",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.GRU"
] |
[((822, 891), 'torch.nn.Embedding', 'nn.Embedding', (['input_dim', 'char_embed_dim'], {'padding_idx': 'char_padding_idx'}), '(input_dim, char_embed_dim, padding_idx=char_padding_idx)\n', (834, 891), True, 'import torch.nn as nn\n'), ((1507, 1738), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': '(char_embed_dim + morph_embeddings_dim + fasttext_embeddings_dim)', 'hidden_size': 'encoder_hidd_dim', 'num_layers': 'num_layers', 'batch_first': '(True)', 'bidirectional': '(True)', 'dropout': '(dropout if num_layers > 1 else 0.0)'}), '(input_size=char_embed_dim + morph_embeddings_dim +\n fasttext_embeddings_dim, hidden_size=encoder_hidd_dim, num_layers=\n num_layers, batch_first=True, bidirectional=True, dropout=dropout if \n num_layers > 1 else 0.0)\n', (1513, 1738), True, 'import torch.nn as nn\n'), ((1882, 1931), 'torch.nn.Linear', 'nn.Linear', (['(encoder_hidd_dim * 2)', 'decoder_hidd_dim'], {}), '(encoder_hidd_dim * 2, decoder_hidd_dim)\n', (1891, 1931), True, 'import torch.nn as nn\n'), ((3197, 3268), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['embedded_seqs', 'src_seqs_lengths'], {'batch_first': '(True)'}), '(embedded_seqs, src_seqs_lengths, batch_first=True)\n', (3217, 3268), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((3815, 3860), 'torch.nn.utils.rnn.pad_packed_sequence', 'pad_packed_sequence', (['output'], {'batch_first': '(True)'}), '(output, batch_first=True)\n', (3834, 3860), False, 'from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n'), ((4843, 4935), 'attention.AdditiveAttention', 'AdditiveAttention', ([], {'encoder_hidd_dim': 'encoder_hidd_dim', 'decoder_hidd_dim': 'decoder_hidd_dim'}), '(encoder_hidd_dim=encoder_hidd_dim, decoder_hidd_dim=\n decoder_hidd_dim)\n', (4860, 4935), False, 'from attention import AdditiveAttention\n'), ((5334, 5398), 'torch.nn.Embedding', 'nn.Embedding', (['input_dim', 'char_embed_dim'], {'padding_idx': 'padding_idx'}), '(input_dim, char_embed_dim, padding_idx=padding_idx)\n', (5346, 5398), True, 'import torch.nn as nn\n'), ((5612, 5793), 'torch.nn.GRU', 'nn.GRU', ([], {'input_size': '(char_embed_dim + encoder_hidd_dim * 2)', 'hidden_size': 'decoder_hidd_dim', 'num_layers': 'num_layers', 'batch_first': '(True)', 'dropout': '(dropout if num_layers > 1 else 0.0)'}), '(input_size=char_embed_dim + encoder_hidd_dim * 2, hidden_size=\n decoder_hidd_dim, num_layers=num_layers, batch_first=True, dropout=\n dropout if num_layers > 1 else 0.0)\n', (5618, 5793), True, 'import torch.nn as nn\n'), ((6024, 6139), 'torch.nn.Linear', 'nn.Linear', (['(encoder_hidd_dim * 2 + decoder_hidd_dim * num_layers + gender_embed_dim +\n char_embed_dim)', 'output_dim'], {}), '(encoder_hidd_dim * 2 + decoder_hidd_dim * num_layers +\n gender_embed_dim + char_embed_dim, output_dim)\n', (6033, 6139), True, 'import torch.nn as nn\n'), ((6258, 6277), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (6268, 6277), True, 'import torch.nn as nn\n'), ((7020, 7070), 'torch.cat', 'torch.cat', (['(embedded_seqs, context_vectors)'], {'dim': '(2)'}), '((embedded_seqs, context_vectors), dim=2)\n', (7029, 7070), False, 'import torch\n'), ((12639, 12696), 'torch.zeros', 'torch.zeros', (['batch_size', '(self.encoder.rnn.hidden_size * 2)'], {}), '(batch_size, self.encoder.rnn.hidden_size * 2)\n', (12650, 12696), False, 'import torch\n'), ((14750, 14774), 'torch.stack', 'torch.stack', (['predictions'], {}), '(predictions)\n', (14761, 14774), False, 'import torch\n'), ((14996, 15033), 'torch.stack', 'torch.stack', (['decoder_attention_scores'], {}), '(decoder_attention_scores)\n', (15007, 15033), False, 'import torch\n'), ((1073, 1149), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['morph_embeddings'], {'padding_idx': 'word_padding_idx'}), '(morph_embeddings, padding_idx=word_padding_idx)\n', (1101, 1149), True, 'import torch.nn as nn\n'), ((1370, 1419), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['fasttext_embeddings'], {}), '(fasttext_embeddings)\n', (1398, 1419), True, 'import torch.nn as nn\n'), ((2471, 2530), 'torch.cat', 'torch.cat', (['(embedded_seqs, embedded_word_seqs_morph)'], {'dim': '(2)'}), '((embedded_seqs, embedded_word_seqs_morph), dim=2)\n', (2480, 2530), False, 'import torch\n'), ((2965, 3021), 'torch.cat', 'torch.cat', (['(embedded_seqs, embedded_word_seqs_ft)'], {'dim': '(2)'}), '((embedded_seqs, embedded_word_seqs_ft), dim=2)\n', (2974, 3021), False, 'import torch\n'), ((12502, 12542), 'torch.ones', 'torch.ones', (['batch_size'], {'dtype': 'torch.long'}), '(batch_size, dtype=torch.long)\n', (12512, 12542), False, 'import torch\n'), ((5136, 5184), 'torch.nn.Embedding', 'nn.Embedding', (['gender_input_dim', 'gender_embed_dim'], {}), '(gender_input_dim, gender_embed_dim)\n', (5148, 5184), True, 'import torch.nn as nn\n'), ((5249, 5296), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['gender_embeddings'], {}), '(gender_embeddings)\n', (5277, 5296), True, 'import torch.nn as nn\n'), ((13190, 13208), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (13206, 13208), True, 'import numpy as np\n'), ((14507, 14563), 'torch.nn.functional.softmax', 'F.softmax', (['(prediction * self.sampling_temperature)'], {'dim': '(1)'}), '(prediction * self.sampling_temperature, dim=1)\n', (14516, 14563), True, 'import torch.nn.functional as F\n'), ((14586, 14624), 'torch.argmax', 'torch.argmax', (['pred_output_probs'], {'dim': '(1)'}), '(pred_output_probs, dim=1)\n', (14598, 14624), False, 'import torch\n')]
|
# -*- coding: UTF-8 -*-
"""
split_by_area
===========
Script : split_by_area.py
Author : <EMAIL>
Modified: 2018-08-27
Purpose : tools for working with numpy arrays
Notes:
-----
The xs and ys form pairs with the first and last points being identical
The pairs are constructed using n-1 to ensure that you don't form a
line from identical points.
First split polygon is a sample of a multipart. Added 0, 0 and 0, 80
back in
>>> xs = [0., 0., 80., 0, 0., 100., 100., 0.]
>>> ys = [0., 30., 30., 80., 100., 100., 0., 0.]
>>> a = np.array(list(zip(xs, ys))) * 1.0 # --- must be floats
>>> v = np.array([[50., 0], [50, 100.]])
>>> ext = np.array([[0., 0], [0, 100.],[100, 100.], [100., 0.], [0., 0.]])
return a, v
References:
----------
`<https://stackoverflow.com/questions/3252194/numpy-and-line-intersections>`_.
`<https://community.esri.com/message/627051?commentID=627051#comment-627051>`
`<https://community.esri.com/message/779043-re-how-to-divide-irregular-
polygon-into-equal-areas-using-arcgis-105?commentID=779043#comment-779043>`
This is a good one
`<https://tereshenkov.wordpress.com/2017/09/10/dividing-a-polygon-into-a-given
-number-of-equal-areas-with-arcpy/>`
---------------------------------------------------------------------
"""
# ---- imports, formats, constants ----
import sys
import math
from textwrap import dedent
import numpy as np
import warnings
from arcpytools_plt import (tweet, fc_info, _poly_ext,
trans_rot, cal_area, get_polys)
import arcpy
warnings.simplefilter('ignore', FutureWarning)
ft = {'bool': lambda x: repr(x.astype(np.int32)),
'float_kind': '{: 0.3f}'.format}
np.set_printoptions(edgeitems=5, linewidth=80, precision=2, suppress=True,
threshold=100, formatter=ft)
np.ma.masked_print_option.set_display('-') # change to a single -
script = sys.argv[0] # print this should you need to locate the script
# ---- Do the work or run the demo ------------------------------------------
#
frmt = """
Input features.... {}
Output features... {}
Number of splits . {}
Split types ...... {}
"""
def _cut_poly(poly, p_id, step=1.0, split_axis="X", split_fac=4, SR=None):
"""Perform the poly* cutting and return the result.
step : number
fractional step for division, 1.0 equates to 1%
split_face : number
number of areas to produce, 4, means split into 4 equal areas
"""
L, B, R, T = _poly_ext(poly)
# s_fac = math.ceil((R - L)/step)
# lefts = np.linspace(L+dx, R, num=s_fac, endpoint=True)
dx = step
dy = step
if split_axis == "X":
lefts = np.arange(L+dx, R+dx, dx, dtype='float')
splitters = np.array([[[l, B-1.0], [l, T+1.0]] for l in lefts])
elif s_axis == 'Y':
tops = np.arange(B+dy, T+dy, dy, dtype='float')
splitters = np.array([[[R+1.0, t], [L-1.0, t]] for t in tops])
cutters = []
for s in splitters:
s = s.tolist()
c = arcpy.Polyline(arcpy.Array([arcpy.Point(*xy) for xy in s]), SR)
cutters.append(c)
# ----
cuts = []
for i in cutters:
rght = poly
if i.crosses(poly):
try:
left, rght = poly.cut(i)
if rght is None:
cuts.append(left)
cuts.append(left)
poly = rght
rght = left
except RuntimeError:
tweet("Issues with poly...{}".format(p_id))
continue
else:
cuts.append(rght)
return cuts, cutters
def final_cut(cutters, poly):
""" final cut
"""
cuts = []
for i in cutters:
rght = poly
if i.crosses(poly):
try:
left, rght = poly.cut(i)
if rght is None:
cuts.append(left)
cuts.append(left)
poly = rght
rght = left
except RuntimeError:
tweet("Issues with poly...{}".format(p_id))
continue
else:
cuts.append(rght)
return cuts # , cutters
# ---- demo and tool section -------------------------------------------------
#
if len(sys.argv) == 1:
testing = False
in_pth = script.split("/")[:-2] + ["Polygon_lineTools.gdb"]
in_fc = "/".join(in_pth) + "/shapes_mtm9"
out_fc = "/".join(in_pth) + "/c0"
s_axis = "Y"
s_fac = 4
else:
testing = False
in_fc = sys.argv[1]
out_fc = sys.argv[2]
s_fac = int(sys.argv[3])
s_axis = sys.argv[4]
# ---- for both
#
shp_fld, oid_fld, shp_type, SR = fc_info(in_fc)
out_polys, out_ids = get_polys(in_fc)
#old_ids = np.repeat(out_ids, s_fac) # produce data for the output id field
# ---- instant bail
if SR.type == 'Projected':
result_ = []
for i in range(len(out_polys)):
poly = out_polys[i]
p_id = out_ids[i]
cuts, cutters = _cut_poly(poly, p_id, step=1,
split_axis = s_axis,
split_fac=4, SR=SR)
idxs = cal_area(poly, cuts, cutters, s_fac)
f_cutters = [cutters[i] for i in idxs]
r = final_cut(f_cutters, poly)
result_.extend(r)
if not testing:
if arcpy.Exists(out_fc):
arcpy.Delete_management(out_fc)
arcpy.CopyFeatures_management(result_, out_fc)
out_ids = np.repeat(out_ids, s_fac)
id_fld = np.zeros((len(result_),),
dtype=[("key", "<i4"), ("Old_ID", "<i4")])
id_fld["key"] = np.arange(1, len(result_) + 1)
id_fld["Old_ID"] = out_ids
arcpy.da.ExtendTable(out_fc, oid_fld, id_fld, "key")
else:
msg = """
-----------------------------------------------------------------
Input data is not in a projected coordinate system....
bailing...
-----------------------------------------------------------------
"""
tweet(msg)
# ----------------------------------------------------------------------
# __main__ .... code section
if __name__ == "__main__":
"""Optionally...
: - print the script source name.
: - run the _demo
"""
|
[
"arcpy.CopyFeatures_management",
"numpy.repeat",
"arcpytools_plt.fc_info",
"numpy.set_printoptions",
"arcpytools_plt.cal_area",
"arcpy.Point",
"arcpytools_plt.get_polys",
"numpy.array",
"arcpytools_plt.tweet",
"arcpy.Exists",
"arcpytools_plt._poly_ext",
"arcpy.da.ExtendTable",
"arcpy.Delete_management",
"warnings.simplefilter",
"numpy.arange",
"numpy.ma.masked_print_option.set_display"
] |
[((1572, 1618), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'FutureWarning'], {}), "('ignore', FutureWarning)\n", (1593, 1618), False, 'import warnings\n'), ((1713, 1820), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'edgeitems': '(5)', 'linewidth': '(80)', 'precision': '(2)', 'suppress': '(True)', 'threshold': '(100)', 'formatter': 'ft'}), '(edgeitems=5, linewidth=80, precision=2, suppress=True,\n threshold=100, formatter=ft)\n', (1732, 1820), True, 'import numpy as np\n'), ((1839, 1881), 'numpy.ma.masked_print_option.set_display', 'np.ma.masked_print_option.set_display', (['"""-"""'], {}), "('-')\n", (1876, 1881), True, 'import numpy as np\n'), ((4742, 4756), 'arcpytools_plt.fc_info', 'fc_info', (['in_fc'], {}), '(in_fc)\n', (4749, 4756), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((4779, 4795), 'arcpytools_plt.get_polys', 'get_polys', (['in_fc'], {}), '(in_fc)\n', (4788, 4795), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((2514, 2529), 'arcpytools_plt._poly_ext', '_poly_ext', (['poly'], {}), '(poly)\n', (2523, 2529), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((6093, 6103), 'arcpytools_plt.tweet', 'tweet', (['msg'], {}), '(msg)\n', (6098, 6103), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((2703, 2747), 'numpy.arange', 'np.arange', (['(L + dx)', '(R + dx)', 'dx'], {'dtype': '"""float"""'}), "(L + dx, R + dx, dx, dtype='float')\n", (2712, 2747), True, 'import numpy as np\n'), ((2765, 2820), 'numpy.array', 'np.array', (['[[[l, B - 1.0], [l, T + 1.0]] for l in lefts]'], {}), '([[[l, B - 1.0], [l, T + 1.0]] for l in lefts])\n', (2773, 2820), True, 'import numpy as np\n'), ((5218, 5254), 'arcpytools_plt.cal_area', 'cal_area', (['poly', 'cuts', 'cutters', 's_fac'], {}), '(poly, cuts, cutters, s_fac)\n', (5226, 5254), False, 'from arcpytools_plt import tweet, fc_info, _poly_ext, trans_rot, cal_area, get_polys\n'), ((5403, 5423), 'arcpy.Exists', 'arcpy.Exists', (['out_fc'], {}), '(out_fc)\n', (5415, 5423), False, 'import arcpy\n'), ((5479, 5525), 'arcpy.CopyFeatures_management', 'arcpy.CopyFeatures_management', (['result_', 'out_fc'], {}), '(result_, out_fc)\n', (5508, 5525), False, 'import arcpy\n'), ((5545, 5570), 'numpy.repeat', 'np.repeat', (['out_ids', 's_fac'], {}), '(out_ids, s_fac)\n', (5554, 5570), True, 'import numpy as np\n'), ((5786, 5838), 'arcpy.da.ExtendTable', 'arcpy.da.ExtendTable', (['out_fc', 'oid_fld', 'id_fld', '"""key"""'], {}), "(out_fc, oid_fld, id_fld, 'key')\n", (5806, 5838), False, 'import arcpy\n'), ((2858, 2902), 'numpy.arange', 'np.arange', (['(B + dy)', '(T + dy)', 'dy'], {'dtype': '"""float"""'}), "(B + dy, T + dy, dy, dtype='float')\n", (2867, 2902), True, 'import numpy as np\n'), ((2920, 2974), 'numpy.array', 'np.array', (['[[[R + 1.0, t], [L - 1.0, t]] for t in tops]'], {}), '([[[R + 1.0, t], [L - 1.0, t]] for t in tops])\n', (2928, 2974), True, 'import numpy as np\n'), ((5438, 5469), 'arcpy.Delete_management', 'arcpy.Delete_management', (['out_fc'], {}), '(out_fc)\n', (5461, 5469), False, 'import arcpy\n'), ((3079, 3095), 'arcpy.Point', 'arcpy.Point', (['*xy'], {}), '(*xy)\n', (3090, 3095), False, 'import arcpy\n')]
|
from scipy import linalg
from sklearn.decomposition import PCA
from scipy.optimize import linear_sum_assignment as linear_assignment
import numpy as np
"""
A function that takes a list of clusters, and a list of centroids for each cluster, and outputs the N max closest images in each cluster to its centroids
"""
def closest_to_centroid(clusters,centroids,nb_closest=20):
output = [[] for i in range(len(centroids))]
#print(clusters)
for i in range(len(centroids)):
centroid = centroids[i]
cluster = clusters[i]
try :
cluste_temp = [x.cpu() if x.is_cuda else x for x in cluster]
except :
cluste_temp = cluster
cluster = [list(x) for x in cluste_temp]
nb_components = 7 if len(cluster)>10 else len(cluster) - 1
pca = PCA(n_components=nb_components) #args.sty_dim)
if len(cluster) > nb_closest :
cluster = pca.fit_transform(cluster)
centroid = centroid.reshape(1, -1)
centroid = pca.transform(centroid)
distances = [linalg.norm(x-centroid) for x in cluster]
duplicate_distances = distances
distances.sort()
if len(distances)>=nb_closest :
distances = distances[:nb_closest]
output[i] = [True if x in distances else False for x in duplicate_distances]
return output
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
# Arguments
y: true labels, numpy.array with shape `(n_samples,)`
y_pred: predicted labels, numpy.array with shape `(n_samples,)`
# Return
accuracy, in [0,1]
"""
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_assignment(w.max() - w)
indi = list(ind[0])
indj = list(ind[1])
the_sum = sum([w[i, j] for i, j in zip(indi,indj)])
return the_sum * 1.0 / y_pred.size
|
[
"sklearn.decomposition.PCA",
"numpy.zeros",
"scipy.linalg.norm"
] |
[((1792, 1824), 'numpy.zeros', 'np.zeros', (['(D, D)'], {'dtype': 'np.int64'}), '((D, D), dtype=np.int64)\n', (1800, 1824), True, 'import numpy as np\n'), ((809, 840), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'nb_components'}), '(n_components=nb_components)\n', (812, 840), False, 'from sklearn.decomposition import PCA\n'), ((1059, 1084), 'scipy.linalg.norm', 'linalg.norm', (['(x - centroid)'], {}), '(x - centroid)\n', (1070, 1084), False, 'from scipy import linalg\n')]
|
import numpy as np
from rampwf.utils import BaseGenerativeRegressor
class GenerativeRegressor(BaseGenerativeRegressor):
def __init__(self, max_dists, target_dim):
self.decomposition = 'autoregressive'
def fit(self, X_array, y_array):
pass
def predict(self, X_array):
# constant prediction with value equal to 10
n_samples = X_array.shape[0]
types = ['norm']
means = np.full(shape=(n_samples, 1), fill_value=10)
sigmas = np.zeros((n_samples, 1))
params = np.concatenate((means, sigmas), axis=1)
weights = np.ones((n_samples, 1))
return weights, types, params
|
[
"numpy.full",
"numpy.zeros",
"numpy.ones",
"numpy.concatenate"
] |
[((431, 475), 'numpy.full', 'np.full', ([], {'shape': '(n_samples, 1)', 'fill_value': '(10)'}), '(shape=(n_samples, 1), fill_value=10)\n', (438, 475), True, 'import numpy as np\n'), ((493, 517), 'numpy.zeros', 'np.zeros', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (501, 517), True, 'import numpy as np\n'), ((536, 575), 'numpy.concatenate', 'np.concatenate', (['(means, sigmas)'], {'axis': '(1)'}), '((means, sigmas), axis=1)\n', (550, 575), True, 'import numpy as np\n'), ((594, 617), 'numpy.ones', 'np.ones', (['(n_samples, 1)'], {}), '((n_samples, 1))\n', (601, 617), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
def deconv_layer(output_shape, filter_shape, activation, strides, name):
scale = 1.0 / np.prod(filter_shape[:3])
seed = int(np.random.randint(0, 1000)) # 123
with tf.name_scope('conv_mnist/conv'):
W = tf.Variable(tf.random_uniform(filter_shape,
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed), name = name+ '_W')
b = tf.Variable(tf.zeros([filter_shape[-2]]), name=name + '_b') # use output channel
def apply(x):
output_shape_x = (x.get_shape().as_list()[0],) + output_shape
a = tf.nn.conv2d_transpose(x, W, output_shape_x, strides, 'SAME') + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply
def generator(dimH=500, dimZ=32, name='generator'):
# now construct a decoder
input_shape = (28, 28, 1)
filter_width = 5
decoder_input_shape = [(4, 4, 32), (7, 7, 32), (14, 14, 16)]
decoder_input_shape.append(input_shape)
fc_layers = [dimZ, dimH, int(np.prod(decoder_input_shape[0]))]
l = 0
# first include the MLP
mlp_layers = []
N_layers = len(fc_layers) - 1
for i in np.arange(0, N_layers):
name_layer = name + '_mlp_l%d' % l
mlp_layers.append(mlp_layer(fc_layers[i], fc_layers[i + 1], 'relu', name_layer))
l += 1
conv_layers = []
N_layers = len(decoder_input_shape) - 1
for i in np.arange(0, N_layers):
if i < N_layers - 1:
activation = 'relu'
else:
activation = 'linear'
name_layer = name + '_conv_l%d' % l
output_shape = decoder_input_shape[i + 1]
input_shape = decoder_input_shape[i]
up_height = int(np.ceil(output_shape[0] / float(input_shape[0])))
up_width = int(np.ceil(output_shape[1] / float(input_shape[1])))
strides = (1, up_height, up_width, 1)
filter_shape = (filter_width, filter_width, output_shape[-1], input_shape[-1])
conv_layers.append(deconv_layer(output_shape, filter_shape, activation, \
strides, name_layer))
l += 1
print('decoder architecture', fc_layers, 'reshape', decoder_input_shape)
def apply(z):
x = z
for layer in mlp_layers:
x = layer(x)
x = tf.reshape(x, (x.get_shape().as_list()[0],) + decoder_input_shape[0])
for layer in conv_layers:
x = layer(x)
return x
return apply
def init_weights(input_size, output_size, constant=1.0, seed=123):
""" Glorot and Bengio, 2010's initialization of network weights"""
scale = constant * np.sqrt(6.0 / (input_size + output_size))
if output_size > 0:
return tf.random_uniform((input_size, output_size),
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed)
else:
return tf.random_uniform([input_size],
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed)
def mlp_layer(d_in, d_out, activation, name):
with tf.name_scope('conv_mnist/mlp'):
W = tf.Variable(init_weights(d_in, d_out), name=name + '_W')
b = tf.Variable(tf.zeros([d_out]), name=name + '_b')
def apply_layer(x):
a = tf.matmul(x, W) + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply_layer
def get_parameters():
return tf.trainable_variables('conv_mnist')
################################## Conv Encoder ##############################
def conv_layer(filter_shape, activation, strides, name):
scale = 1.0 / np.prod(filter_shape[:3])
seed = int(np.random.randint(0, 1000)) # 123
W = tf.Variable(tf.random_uniform(filter_shape,
minval=-scale, maxval=scale,
dtype=tf.float32, seed=seed), name=name + '_W')
b = tf.Variable(tf.zeros([filter_shape[-1]]), name=name + '_b')
def apply(x):
a = tf.nn.conv2d(x, W, strides, 'SAME') + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply
def construct_filter_shapes(layer_channels, filter_width=5):
filter_shapes = []
for n_channel in layer_channels:
shape = (n_channel, filter_width, filter_width)
filter_shapes.append(shape)
return filter_shapes
def encoder_convnet(input_shape, dimH=500, dimZ=32, name='conv_encoder'):
# encoder for z (low res)
layer_channels = [input_shape[-1], 16, 32, 32]
filter_width = 5
fc_layer_sizes = [dimH]
conv_layers = []
N_layers = len(layer_channels) - 1
strides = (1, 2, 2, 1)
activation = 'relu'
l = 0
print_shapes = []
for i in range(N_layers):
name_layer = name + '_conv_l%d' % l
filter_shape = (filter_width, filter_width, layer_channels[i], layer_channels[i + 1])
print_shapes.append(filter_shape)
conv_layers.append(conv_layer(filter_shape, activation, strides, name_layer))
l += 1
# fc_layer = [int(np.prod(filter_shape)), dimH, dimZ * 2]
fc_layer = [512, dimH, dimZ*2]
print(fc_layer)
enc_mlp = []
for i in range(len(fc_layer) - 1):
if i + 2 < len(fc_layer):
activation = 'relu'
else:
activation = 'linear'
name_layer = name + '_mlp_l%d' % l
enc_mlp.append(mlp_layer2(fc_layer[i], fc_layer[i + 1], activation, name_layer))
print(fc_layer[i], fc_layer[i + 1])
l += 1
print('encoder architecture', print_shapes, 'reshape', fc_layer)
def apply(x):
out = x
for layer in conv_layers:
out = layer(out)
print(out)
out = tf.reshape(out, (out.get_shape().as_list()[0], -1))
print(out)
for layer in enc_mlp:
out = layer(out)
mu, log_sig = tf.split(out, 2, axis=1)
return mu, log_sig
return apply
def mlp_layer2(d_in, d_out, activation, name):
with tf.name_scope('conv_mnist/mlp2'):
W = tf.Variable(init_weights(d_in, d_out), name=name + '_W')
b = tf.Variable(tf.zeros([d_out]), name=name + '_b')
def apply_layer(x):
a = tf.matmul(x, W) + b
if activation == 'relu':
return tf.nn.relu(a)
if activation == 'sigmoid':
return tf.nn.sigmoid(a)
if activation == 'linear':
return a
return apply_layer
def sample_gaussian(mu, log_sig):
return mu + tf.exp(log_sig) * tf.random_normal(mu.get_shape())
|
[
"tensorflow.nn.conv2d",
"numpy.prod",
"numpy.sqrt",
"tensorflow.nn.relu",
"tensorflow.split",
"tensorflow.random_uniform",
"numpy.random.randint",
"tensorflow.nn.sigmoid",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.nn.conv2d_transpose",
"tensorflow.trainable_variables",
"tensorflow.zeros",
"tensorflow.exp",
"numpy.arange"
] |
[((1354, 1376), 'numpy.arange', 'np.arange', (['(0)', 'N_layers'], {}), '(0, N_layers)\n', (1363, 1376), True, 'import numpy as np\n'), ((1604, 1626), 'numpy.arange', 'np.arange', (['(0)', 'N_layers'], {}), '(0, N_layers)\n', (1613, 1626), True, 'import numpy as np\n'), ((3784, 3820), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""conv_mnist"""'], {}), "('conv_mnist')\n", (3806, 3820), True, 'import tensorflow as tf\n'), ((136, 161), 'numpy.prod', 'np.prod', (['filter_shape[:3]'], {}), '(filter_shape[:3])\n', (143, 161), True, 'import numpy as np\n'), ((177, 203), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (194, 203), True, 'import numpy as np\n'), ((221, 253), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv_mnist/conv"""'], {}), "('conv_mnist/conv')\n", (234, 253), True, 'import tensorflow as tf\n'), ((2824, 2865), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (input_size + output_size))'], {}), '(6.0 / (input_size + output_size))\n', (2831, 2865), True, 'import numpy as np\n'), ((2905, 3011), 'tensorflow.random_uniform', 'tf.random_uniform', (['(input_size, output_size)'], {'minval': '(-scale)', 'maxval': 'scale', 'dtype': 'tf.float32', 'seed': 'seed'}), '((input_size, output_size), minval=-scale, maxval=scale,\n dtype=tf.float32, seed=seed)\n', (2922, 3011), True, 'import tensorflow as tf\n'), ((3099, 3193), 'tensorflow.random_uniform', 'tf.random_uniform', (['[input_size]'], {'minval': '(-scale)', 'maxval': 'scale', 'dtype': 'tf.float32', 'seed': 'seed'}), '([input_size], minval=-scale, maxval=scale, dtype=tf.\n float32, seed=seed)\n', (3116, 3193), True, 'import tensorflow as tf\n'), ((3312, 3343), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv_mnist/mlp"""'], {}), "('conv_mnist/mlp')\n", (3325, 3343), True, 'import tensorflow as tf\n'), ((3977, 4002), 'numpy.prod', 'np.prod', (['filter_shape[:3]'], {}), '(filter_shape[:3])\n', (3984, 4002), True, 'import numpy as np\n'), ((4018, 4044), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (4035, 4044), True, 'import numpy as np\n'), ((4073, 4167), 'tensorflow.random_uniform', 'tf.random_uniform', (['filter_shape'], {'minval': '(-scale)', 'maxval': 'scale', 'dtype': 'tf.float32', 'seed': 'seed'}), '(filter_shape, minval=-scale, maxval=scale, dtype=tf.\n float32, seed=seed)\n', (4090, 4167), True, 'import tensorflow as tf\n'), ((4278, 4306), 'tensorflow.zeros', 'tf.zeros', (['[filter_shape[-1]]'], {}), '([filter_shape[-1]])\n', (4286, 4306), True, 'import tensorflow as tf\n'), ((6346, 6370), 'tensorflow.split', 'tf.split', (['out', '(2)'], {'axis': '(1)'}), '(out, 2, axis=1)\n', (6354, 6370), True, 'import tensorflow as tf\n'), ((6473, 6505), 'tensorflow.name_scope', 'tf.name_scope', (['"""conv_mnist/mlp2"""'], {}), "('conv_mnist/mlp2')\n", (6486, 6505), True, 'import tensorflow as tf\n'), ((279, 373), 'tensorflow.random_uniform', 'tf.random_uniform', (['filter_shape'], {'minval': '(-scale)', 'maxval': 'scale', 'dtype': 'tf.float32', 'seed': 'seed'}), '(filter_shape, minval=-scale, maxval=scale, dtype=tf.\n float32, seed=seed)\n', (296, 373), True, 'import tensorflow as tf\n'), ((489, 517), 'tensorflow.zeros', 'tf.zeros', (['[filter_shape[-2]]'], {}), '([filter_shape[-2]])\n', (497, 517), True, 'import tensorflow as tf\n'), ((660, 721), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'W', 'output_shape_x', 'strides', '"""SAME"""'], {}), "(x, W, output_shape_x, strides, 'SAME')\n", (682, 721), True, 'import tensorflow as tf\n'), ((778, 791), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (788, 791), True, 'import tensorflow as tf\n'), ((847, 863), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['a'], {}), '(a)\n', (860, 863), True, 'import tensorflow as tf\n'), ((1215, 1246), 'numpy.prod', 'np.prod', (['decoder_input_shape[0]'], {}), '(decoder_input_shape[0])\n', (1222, 1246), True, 'import numpy as np\n'), ((3438, 3455), 'tensorflow.zeros', 'tf.zeros', (['[d_out]'], {}), '([d_out])\n', (3446, 3455), True, 'import tensorflow as tf\n'), ((3512, 3527), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (3521, 3527), True, 'import tensorflow as tf\n'), ((3584, 3597), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (3594, 3597), True, 'import tensorflow as tf\n'), ((3653, 3669), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['a'], {}), '(a)\n', (3666, 3669), True, 'import tensorflow as tf\n'), ((4357, 4392), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W', 'strides', '"""SAME"""'], {}), "(x, W, strides, 'SAME')\n", (4369, 4392), True, 'import tensorflow as tf\n'), ((4449, 4462), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (4459, 4462), True, 'import tensorflow as tf\n'), ((4518, 4534), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['a'], {}), '(a)\n', (4531, 4534), True, 'import tensorflow as tf\n'), ((6600, 6617), 'tensorflow.zeros', 'tf.zeros', (['[d_out]'], {}), '([d_out])\n', (6608, 6617), True, 'import tensorflow as tf\n'), ((6674, 6689), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (6683, 6689), True, 'import tensorflow as tf\n'), ((6746, 6759), 'tensorflow.nn.relu', 'tf.nn.relu', (['a'], {}), '(a)\n', (6756, 6759), True, 'import tensorflow as tf\n'), ((6815, 6831), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['a'], {}), '(a)\n', (6828, 6831), True, 'import tensorflow as tf\n'), ((6964, 6979), 'tensorflow.exp', 'tf.exp', (['log_sig'], {}), '(log_sig)\n', (6970, 6979), True, 'import tensorflow as tf\n')]
|
import typing
from collections import Counter
import numpy as np
from pytest import approx
from zero_play.connect4.game import Connect4State
from zero_play.game_state import GameState
from zero_play.heuristic import Heuristic
from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager
from zero_play.playout import Playout
from zero_play.tictactoe.state import TicTacToeState
class FirstChoiceHeuristic(Heuristic):
def get_summary(self) -> typing.Sequence[str]:
return 'first choice',
def analyse(self, board: GameState) -> typing.Tuple[float, np.ndarray]:
policy = self.get_policy(board)
player = board.get_active_player()
if board.is_win(player):
value = 1.0
elif board.is_win(-player):
value = -1.0
else:
value = 0.0
return value, policy
def get_policy(self, board: GameState):
valid_moves = board.get_valid_moves()
if valid_moves.any():
first_valid = np.nonzero(valid_moves)[0][0]
else:
first_valid = 0
policy = np.zeros_like(valid_moves)
policy[first_valid] = 1.0
return policy
class EarlyChoiceHeuristic(FirstChoiceHeuristic):
""" Thinks each move is 90% as good as the previous option. """
def get_summary(self) -> typing.Sequence[str]:
return 'early choice',
def get_policy(self, board: GameState):
valid_moves = board.get_valid_moves()
if not valid_moves.any():
valid_moves = (valid_moves == 0)
raw_policy = np.multiply(valid_moves, 0.9 ** np.arange(len(valid_moves)))
policy = raw_policy / raw_policy.sum()
return policy
def test_repr():
board_text = """\
.O.
.X.
...
"""
board = TicTacToeState(board_text)
expected_repr = "SearchNode(TicTacToeState(spaces=array([[0, -1, 0], [0, 1, 0], [0, 0, 0]])))"
node = SearchNode(board)
node_repr = repr(node)
assert node_repr == expected_repr
def test_eq():
board1 = TicTacToeState()
board2 = TicTacToeState()
board3 = TicTacToeState("""\
...
.X.
...
""")
node1 = SearchNode(board1)
node2 = SearchNode(board2)
node3 = SearchNode(board3)
assert node1 == node2
assert node1 != node3
assert node1 != 42
def test_default_board():
expected_board = TicTacToeState()
expected_node = SearchNode(expected_board)
node = SearchNode(expected_board)
assert expected_node == node
def test_select_leaf_self():
game = TicTacToeState()
node = SearchNode(game)
expected_leaf = node
leaf = node.select_leaf()
assert expected_leaf == leaf
def test_select_first_child():
start_state = TicTacToeState()
expected_leaf_board = start_state.make_move(0)
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
node.record_value(1)
leaf = node.select_leaf()
assert leaf == expected_leaf
assert node.average_value == -1.0
def test_select_second_child():
start_state = TicTacToeState()
expected_leaf_board = start_state.make_move(1)
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
node.select_leaf().record_value(0)
node.select_leaf().record_value(0)
leaf = node.select_leaf()
assert leaf == expected_leaf
assert node.average_value == 0
def test_select_grandchild():
start_state = TicTacToeState()
expected_leaf_board = TicTacToeState("""\
XO.
...
...
""")
expected_leaf = SearchNode(expected_leaf_board)
node = SearchNode(start_state)
for _ in range(10):
node.select_leaf().record_value(0)
leaf = node.select_leaf()
assert leaf == expected_leaf
def test_select_good_grandchild():
start_state = TicTacToeState()
node = SearchNode(start_state)
node.select_leaf().record_value(0) # Root node returns itself.
node.select_leaf().record_value(0) # Move 0 AT 1A, value is a tie.
node.select_leaf().record_value(-1) # Move 1 AT 1B, value is a win.
# Expect it to exploit the win at 1B, and try the first grandchild at 1A.
expected_leaf_board = TicTacToeState("""\
ABC
1 OX.
2 ...
3 ...
""")
expected_leaf = SearchNode(expected_leaf_board)
leaf = node.select_leaf()
assert leaf == expected_leaf
def test_select_no_children():
start_board = TicTacToeState("""\
XOX
OOX
.XO
""")
expected_leaf_board = TicTacToeState("""\
XOX
OOX
XXO
""")
expected_leaf = SearchNode(expected_leaf_board)
start_node = SearchNode(start_board)
leaf1 = start_node.select_leaf()
leaf1.record_value(1)
leaf2 = start_node.select_leaf()
leaf2.record_value(1)
leaf3 = start_node.select_leaf()
assert leaf1 == start_node
assert leaf2 == expected_leaf
assert leaf3 == expected_leaf
def test_choose_move():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
expected_display = """\
.......
.......
.......
..XXX..
OXOXO..
XOXOXOO
"""
player = MctsPlayer(start_state, iteration_count=200)
move = player.choose_move(state1)
state2 = state1.make_move(move)
display = state2.display()
assert display == expected_display
def test_choose_move_in_pool():
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
player = MctsPlayer(start_state, iteration_count=200, process_count=2)
valid_moves = start_state.get_valid_moves()
move = player.choose_move(state1)
# Can't rely on which move, because other process has separate random seed.
assert valid_moves[move]
def test_choose_moves_at_random():
""" Early moves are chosen from a weighted random population. """
np.random.seed(0)
start_state = TicTacToeState()
state1 = TicTacToeState("""\
...
...
X..
""")
player = MctsPlayer(start_state,
iteration_count=80,
heuristic=EarlyChoiceHeuristic())
moves = set()
for _ in range(10):
move = player.choose_move(state1)
moves.add(move)
player.search_manager.reset()
assert 1 < len(moves)
def test_choose_move_no_iterations():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
...XX..
OXOXO..
XOXOXOO
""")
test_count = 400
expected_count = test_count/7
expected_low = expected_count * 0.9
expected_high = expected_count * 1.1
move_counts = Counter()
for _ in range(test_count):
player = MctsPlayer(start_state, iteration_count=0)
move = player.choose_move(state1)
move_counts[move] += 1
assert expected_low < move_counts[2] < expected_high
def test_analyse_finished_game():
board = TicTacToeState("""\
OXO
XXO
XOX
""")
heuristic = Playout()
expected_value = 0 # A tie
expected_policy = [1/9] * 9
value, policy = heuristic.analyse(board)
assert expected_value == value
assert expected_policy == policy.tolist()
def test_search_manager_reuses_node():
start_state = TicTacToeState()
manager = SearchManager(start_state, Playout())
manager.search(start_state, iterations=10)
move = manager.get_best_move()
state2 = start_state.make_move(move)
node = manager.current_node
first_value_count = node.value_count
manager.search(state2, iterations=10)
second_value_count = node.value_count
assert first_value_count > 0
assert first_value_count + 10 == second_value_count
def test_search_manager_with_opponent():
""" Like when opponent is not sharing the SearchManager. """
start_state = TicTacToeState()
manager = SearchManager(start_state, Playout())
manager.search(start_state, iterations=10)
node = manager.current_node.children[0] # Didn't call get_best_move().
move = 0
state2 = start_state.make_move(move)
first_value_count = node.value_count
manager.search(state2, iterations=10)
second_value_count = node.value_count
assert first_value_count > 0
assert first_value_count + 10 == second_value_count
def test_annotate():
start_state = TicTacToeState()
player = MctsPlayer(start_state,
iteration_count=10,
heuristic=FirstChoiceHeuristic())
player.choose_move(start_state)
move_probabilities = player.get_move_probabilities(start_state)
best_move, best_probability, best_count, best_value = move_probabilities[0]
assert best_move == '1A'
assert best_probability == approx(0.999013)
assert best_count == 9
assert best_value == approx(2/9)
def test_create_training_data():
start_state = TicTacToeState()
manager = SearchManager(start_state, FirstChoiceHeuristic())
expected_boards, expected_outputs = zip(*[
[start_state.get_spaces(),
np.array([1., 0., 0., 0., 0., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
X..
...
...
""").get_spaces(), np.array([0., 1., 0., 0., 0., 0., 0., 0., 0., 1.])],
[TicTacToeState("""\
XO.
...
...
""").get_spaces(), np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
XOX
...
...
""").get_spaces(), np.array([0., 0., 0., 1., 0., 0., 0., 0., 0., 1.])],
[TicTacToeState("""\
XOX
O..
...
""").get_spaces(), np.array([0., 0., 0., 0., 1., 0., 0., 0., 0., -1.])],
[TicTacToeState("""\
XOX
OX.
...
""").get_spaces(), np.array([0., 0., 0., 0., 0., 1., 0., 0., 0., 1.])],
[TicTacToeState("""\
XOX
OXO
...
""").get_spaces(), np.array([0., 0., 0., 0., 0., 0., 1., 0., 0., -1.])]])
expected_boards = np.stack(expected_boards)
expected_outputs = np.stack(expected_outputs)
boards, outputs = manager.create_training_data(iterations=1, data_size=7)
assert repr(boards) == repr(expected_boards)
assert repr(outputs) == repr(expected_outputs)
def test_win_scores_one():
""" Expose bug where search continues after a game-ending position. """
state1 = TicTacToeState("""\
..X
XX.
OO.
""")
player = MctsPlayer(TicTacToeState(), state1.X_PLAYER, iteration_count=100)
move = player.choose_move(state1)
search_node1 = player.search_manager.current_node.parent
for child_node in search_node1.children:
if child_node.move == 8:
assert child_node.average_value == 1.0
assert move == 8
def test_choose_move_sets_current_node():
np.random.seed(0)
start_state = Connect4State()
state1 = Connect4State("""\
.......
.......
.......
.......
OXOXOXO
XOXOXOX
""")
player = MctsPlayer(start_state, iteration_count=20)
move1 = player.choose_move(state1)
current_node1 = player.search_manager.current_node
state2 = state1.make_move(move1)
move2 = player.choose_move(state2)
current_node2 = player.search_manager.current_node
state3 = state2.make_move(move2)
assert current_node1.game_state == state2
assert current_node2.game_state == state3
|
[
"pytest.approx",
"zero_play.playout.Playout",
"zero_play.tictactoe.state.TicTacToeState",
"zero_play.mcts_player.MctsPlayer",
"zero_play.connect4.game.Connect4State",
"collections.Counter",
"numpy.stack",
"zero_play.mcts_player.SearchNode",
"numpy.array",
"numpy.random.seed",
"numpy.nonzero",
"numpy.zeros_like"
] |
[((1770, 1796), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['board_text'], {}), '(board_text)\n', (1784, 1796), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((1908, 1925), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['board'], {}), '(board)\n', (1918, 1925), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2022, 2038), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2036, 2038), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2052, 2068), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2066, 2068), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2082, 2115), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""...\n.X.\n...\n"""'], {}), "('...\\n.X.\\n...\\n')\n", (2096, 2115), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2132, 2150), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['board1'], {}), '(board1)\n', (2142, 2150), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2163, 2181), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['board2'], {}), '(board2)\n', (2173, 2181), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2194, 2212), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['board3'], {}), '(board3)\n', (2204, 2212), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2338, 2354), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2352, 2354), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2375, 2401), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_board'], {}), '(expected_board)\n', (2385, 2401), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2414, 2440), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_board'], {}), '(expected_board)\n', (2424, 2440), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2517, 2533), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2531, 2533), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2545, 2561), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['game'], {}), '(game)\n', (2555, 2561), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2703, 2719), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (2717, 2719), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((2791, 2822), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (2801, 2822), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((2835, 2858), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_state'], {}), '(start_state)\n', (2845, 2858), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3039, 3055), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (3053, 3055), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((3127, 3158), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (3137, 3158), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3171, 3194), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_state'], {}), '(start_state)\n', (3181, 3194), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3423, 3439), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (3437, 3439), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((3466, 3499), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XO.\n...\n...\n"""'], {}), "('XO.\\n...\\n...\\n')\n", (3480, 3499), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((3523, 3554), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (3533, 3554), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3567, 3590), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_state'], {}), '(start_state)\n', (3577, 3590), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((3778, 3794), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (3792, 3794), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((3806, 3829), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_state'], {}), '(start_state)\n', (3816, 3829), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((4147, 4193), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['""" ABC\n1 OX.\n2 ...\n3 ...\n"""'], {}), '(""" ABC\n1 OX.\n2 ...\n3 ...\n""")\n', (4161, 4193), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((4216, 4247), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (4226, 4247), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((4364, 4397), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nOOX\n.XO\n"""'], {}), "('XOX\\nOOX\\n.XO\\n')\n", (4378, 4397), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((4427, 4460), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nOOX\nXXO\n"""'], {}), "('XOX\\nOOX\\nXXO\\n')\n", (4441, 4460), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((4484, 4515), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['expected_leaf_board'], {}), '(expected_leaf_board)\n', (4494, 4515), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((4534, 4557), 'zero_play.mcts_player.SearchNode', 'SearchNode', (['start_board'], {}), '(start_board)\n', (4544, 4557), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((4851, 4868), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4865, 4868), True, 'import numpy as np\n'), ((4887, 4902), 'zero_play.connect4.game.Connect4State', 'Connect4State', ([], {}), '()\n', (4900, 4902), False, 'from zero_play.connect4.game import Connect4State\n'), ((4916, 4985), 'zero_play.connect4.game.Connect4State', 'Connect4State', (['""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n"""'], {}), '(""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n""")\n', (4929, 4985), False, 'from zero_play.connect4.game import Connect4State\n'), ((5081, 5125), 'zero_play.mcts_player.MctsPlayer', 'MctsPlayer', (['start_state'], {'iteration_count': '(200)'}), '(start_state, iteration_count=200)\n', (5091, 5125), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((5324, 5339), 'zero_play.connect4.game.Connect4State', 'Connect4State', ([], {}), '()\n', (5337, 5339), False, 'from zero_play.connect4.game import Connect4State\n'), ((5353, 5422), 'zero_play.connect4.game.Connect4State', 'Connect4State', (['""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n"""'], {}), '(""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n""")\n', (5366, 5422), False, 'from zero_play.connect4.game import Connect4State\n'), ((5438, 5499), 'zero_play.mcts_player.MctsPlayer', 'MctsPlayer', (['start_state'], {'iteration_count': '(200)', 'process_count': '(2)'}), '(start_state, iteration_count=200, process_count=2)\n', (5448, 5499), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((5808, 5825), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5822, 5825), True, 'import numpy as np\n'), ((5844, 5860), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (5858, 5860), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((5874, 5907), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""...\n...\nX..\n"""'], {}), "('...\\n...\\nX..\\n')\n", (5888, 5907), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((6268, 6285), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6282, 6285), True, 'import numpy as np\n'), ((6304, 6319), 'zero_play.connect4.game.Connect4State', 'Connect4State', ([], {}), '()\n', (6317, 6319), False, 'from zero_play.connect4.game import Connect4State\n'), ((6333, 6402), 'zero_play.connect4.game.Connect4State', 'Connect4State', (['""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n"""'], {}), '(""".......\n.......\n.......\n...XX..\nOXOXO..\nXOXOXOO\n""")\n', (6346, 6402), False, 'from zero_play.connect4.game import Connect4State\n'), ((6559, 6568), 'collections.Counter', 'Counter', ([], {}), '()\n', (6566, 6568), False, 'from collections import Counter\n'), ((6841, 6874), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""OXO\nXXO\nXOX\n"""'], {}), "('OXO\\nXXO\\nXOX\\n')\n", (6855, 6874), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((6894, 6903), 'zero_play.playout.Playout', 'Playout', ([], {}), '()\n', (6901, 6903), False, 'from zero_play.playout import Playout\n'), ((7155, 7171), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (7169, 7171), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((7721, 7737), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (7735, 7737), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((8224, 8240), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (8238, 8240), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((8759, 8775), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (8773, 8775), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9690, 9715), 'numpy.stack', 'np.stack', (['expected_boards'], {}), '(expected_boards)\n', (9698, 9715), True, 'import numpy as np\n'), ((9739, 9765), 'numpy.stack', 'np.stack', (['expected_outputs'], {}), '(expected_outputs)\n', (9747, 9765), True, 'import numpy as np\n'), ((10064, 10097), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""..X\nXX.\nOO.\n"""'], {}), "('..X\\nXX.\\nOO.\\n')\n", (10078, 10097), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((10481, 10498), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (10495, 10498), True, 'import numpy as np\n'), ((10517, 10532), 'zero_play.connect4.game.Connect4State', 'Connect4State', ([], {}), '()\n', (10530, 10532), False, 'from zero_play.connect4.game import Connect4State\n'), ((10546, 10615), 'zero_play.connect4.game.Connect4State', 'Connect4State', (['""".......\n.......\n.......\n.......\nOXOXOXO\nXOXOXOX\n"""'], {}), '(""".......\n.......\n.......\n.......\nOXOXOXO\nXOXOXOX\n""")\n', (10559, 10615), False, 'from zero_play.connect4.game import Connect4State\n'), ((10631, 10674), 'zero_play.mcts_player.MctsPlayer', 'MctsPlayer', (['start_state'], {'iteration_count': '(20)'}), '(start_state, iteration_count=20)\n', (10641, 10674), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((1095, 1121), 'numpy.zeros_like', 'np.zeros_like', (['valid_moves'], {}), '(valid_moves)\n', (1108, 1121), True, 'import numpy as np\n'), ((6618, 6660), 'zero_play.mcts_player.MctsPlayer', 'MctsPlayer', (['start_state'], {'iteration_count': '(0)'}), '(start_state, iteration_count=0)\n', (6628, 6660), False, 'from zero_play.mcts_player import SearchNode, MctsPlayer, SearchManager\n'), ((7213, 7222), 'zero_play.playout.Playout', 'Playout', ([], {}), '()\n', (7220, 7222), False, 'from zero_play.playout import Playout\n'), ((7779, 7788), 'zero_play.playout.Playout', 'Playout', ([], {}), '()\n', (7786, 7788), False, 'from zero_play.playout import Playout\n'), ((8625, 8641), 'pytest.approx', 'approx', (['(0.999013)'], {}), '(0.999013)\n', (8631, 8641), False, 'from pytest import approx\n'), ((8694, 8707), 'pytest.approx', 'approx', (['(2 / 9)'], {}), '(2 / 9)\n', (8700, 8707), False, 'from pytest import approx\n'), ((10126, 10142), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', ([], {}), '()\n', (10140, 10142), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((1006, 1029), 'numpy.nonzero', 'np.nonzero', (['valid_moves'], {}), '(valid_moves)\n', (1016, 1029), True, 'import numpy as np\n'), ((8932, 8993), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0]'], {}), '([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0])\n', (8940, 8993), True, 'import numpy as np\n'), ((9046, 9106), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n', (9054, 9106), True, 'import numpy as np\n'), ((9159, 9220), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0]'], {}), '([0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0])\n', (9167, 9220), True, 'import numpy as np\n'), ((9273, 9333), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n', (9281, 9333), True, 'import numpy as np\n'), ((9386, 9447), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0])\n', (9394, 9447), True, 'import numpy as np\n'), ((9500, 9560), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0])\n', (9508, 9560), True, 'import numpy as np\n'), ((9613, 9674), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, -1.0])\n', (9621, 9674), True, 'import numpy as np\n'), ((8995, 9028), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""X..\n...\n...\n"""'], {}), "('X..\\n...\\n...\\n')\n", (9009, 9028), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9108, 9141), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XO.\n...\n...\n"""'], {}), "('XO.\\n...\\n...\\n')\n", (9122, 9141), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9222, 9255), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\n...\n...\n"""'], {}), "('XOX\\n...\\n...\\n')\n", (9236, 9255), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9335, 9368), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nO..\n...\n"""'], {}), "('XOX\\nO..\\n...\\n')\n", (9349, 9368), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9449, 9482), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nOX.\n...\n"""'], {}), "('XOX\\nOX.\\n...\\n')\n", (9463, 9482), False, 'from zero_play.tictactoe.state import TicTacToeState\n'), ((9562, 9595), 'zero_play.tictactoe.state.TicTacToeState', 'TicTacToeState', (['"""XOX\nOXO\n...\n"""'], {}), "('XOX\\nOXO\\n...\\n')\n", (9576, 9595), False, 'from zero_play.tictactoe.state import TicTacToeState\n')]
|
from dudes.Ranks import Ranks
import numpy as np
import sys
def printDebug(DEBUG, l):
if DEBUG: sys.stderr.write(str(l) + "\n")
def group_max(groups, data, pre_order=None):
if pre_order is None:
order = np.lexsort((data, groups))
else:
order = pre_order
groups = groups[order] #this is only needed if groups is unsorted
data = data[order]
index = np.empty(len(groups), 'bool')
index[-1] = True
index[:-1] = groups[1:] != groups[:-1]
if pre_order is None:
return order, index
else:
return index # Return the data array in an orderer way (matching the output of np.unique(groups))
def getNameRank(rankid):
# Returns the fixed ranks based on rankid
if rankid<len(Ranks.ranks):
return Ranks.ranks[rankid]
else:
return Ranks.ranks[-1] # more than one no_rank/strain
def getIndexRank(rank):
# Returns the fixed ranks based on rankid
return Ranks.ranks.index(rank)
|
[
"numpy.lexsort",
"dudes.Ranks.Ranks.ranks.index"
] |
[((870, 893), 'dudes.Ranks.Ranks.ranks.index', 'Ranks.ranks.index', (['rank'], {}), '(rank)\n', (887, 893), False, 'from dudes.Ranks import Ranks\n'), ((213, 239), 'numpy.lexsort', 'np.lexsort', (['(data, groups)'], {}), '((data, groups))\n', (223, 239), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 3 10:27:25 2019
@author: alishbaimran
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from imutils import paths
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from keras.applications import VGG19
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.models import Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.callbacks import EarlyStopping
# defining constants and variables
img_width, img_height = 128, 128
train_data_dir = "data/train"
validation_data_dir = "data/val"
test_data_dir = "data/test"
NB = 2
BS = 64
EPOCHS = 10
# creating train, validation and test data generators
TRAIN = len(list(paths.list_images(train_data_dir)))
VAL = len(list(paths.list_images(validation_data_dir)))
TEST = len(list(paths.list_images(test_data_dir)))
trainAug = ImageDataGenerator(rescale = 1./255,
fill_mode = "nearest")
valAug = ImageDataGenerator(rescale = 1./255,
fill_mode = "nearest")
trainGen = trainAug.flow_from_directory(
train_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = True,
class_mode = "categorical")
valGen = valAug.flow_from_directory(
validation_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = False,
class_mode = "categorical")
testGen = valAug.flow_from_directory(
test_data_dir,
target_size = (img_height, img_width),
batch_size = BS,
shuffle = False,
class_mode = "categorical")
# loading pre-trained model, training additional features and saving model
base_model = VGG19(weights = "imagenet", include_top=False,
input_shape = (img_width, img_height, 3))
x = base_model.output
x = Flatten()(x)
x = Dense(1024, activation = "relu")(x)
x = Dropout(0.4)(x)
x = Dense(256, activation = "relu")(x)
x = Dropout(0.2)(x)
preds = Dense(NB, activation = "softmax")(x)
model = Model(input = base_model.input, output = preds)
for i,layer in enumerate(model.layers):
print(i,layer.name)
for layer in model.layers[:16]:
layer.trainable=False
for layer in model.layers[16:]:
layer.trainable=True
model.summary()
early = EarlyStopping(monitor = 'val_acc', min_delta = 0,
patience = 10, verbose= 1 , mode = 'auto')
model.compile(loss = "binary_crossentropy",
optimizer = SGD(lr=0.001, momentum=0.9),
metrics=["accuracy"])
H = model.fit_generator(
trainGen,
epochs = EPOCHS,
steps_per_epoch = TRAIN // BS,
validation_data = valGen,
validation_steps = VAL // BS,
callbacks = [early])
model.save('model.h5')
# generating predictions using model
testGen.reset()
predictions = model.predict_generator(testGen, steps = (TEST // BS) + 1)
predictions = np.argmax(predictions, axis=1)
print("Test set accuracy: " +
str(accuracy_score(testGen.classes, predictions, normalize=True) * 100)
+ "%")
print(classification_report(testGen.classes, predictions,
target_names=testGen.class_indices.keys()))
# plotting training data
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, EPOCHS), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, EPOCHS), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, EPOCHS), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, EPOCHS), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy on Dataset")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig("plot.jpg")
|
[
"matplotlib.pyplot.ylabel",
"keras.preprocessing.image.ImageDataGenerator",
"keras.optimizers.SGD",
"imutils.paths.list_images",
"keras.layers.Dense",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"keras.applications.VGG19",
"matplotlib.pyplot.savefig",
"keras.layers.Flatten",
"numpy.argmax",
"matplotlib.pyplot.title",
"keras.layers.Dropout",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure"
] |
[((1032, 1090), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255, fill_mode='nearest')\n", (1050, 1090), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1125, 1183), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255, fill_mode='nearest')\n", (1143, 1183), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2102, 2190), 'keras.applications.VGG19', 'VGG19', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(img_width, img_height, 3)'}), "(weights='imagenet', include_top=False, input_shape=(img_width,\n img_height, 3))\n", (2107, 2190), False, 'from keras.applications import VGG19\n'), ((2433, 2476), 'keras.models.Model', 'Model', ([], {'input': 'base_model.input', 'output': 'preds'}), '(input=base_model.input, output=preds)\n', (2438, 2476), False, 'from keras.models import Model\n'), ((2700, 2787), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)', 'mode': '"""auto"""'}), "(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode=\n 'auto')\n", (2713, 2787), False, 'from keras.callbacks import EarlyStopping\n'), ((3363, 3393), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (3372, 3393), True, 'import numpy as np\n'), ((3689, 3712), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3702, 3712), True, 'import matplotlib.pyplot as plt\n'), ((3714, 3726), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3724, 3726), True, 'import matplotlib.pyplot as plt\n'), ((4012, 4062), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Loss and Accuracy on Dataset"""'], {}), "('Training Loss and Accuracy on Dataset')\n", (4021, 4062), True, 'import matplotlib.pyplot as plt\n'), ((4064, 4085), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch #"""'], {}), "('Epoch #')\n", (4074, 4085), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4114), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss/Accuracy"""'], {}), "('Loss/Accuracy')\n", (4097, 4114), True, 'import matplotlib.pyplot as plt\n'), ((4116, 4144), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (4126, 4144), True, 'import matplotlib.pyplot as plt\n'), ((4146, 4169), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.jpg"""'], {}), "('plot.jpg')\n", (4157, 4169), True, 'import matplotlib.pyplot as plt\n'), ((2240, 2249), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2247, 2249), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2258, 2288), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (2263, 2288), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2299, 2311), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2306, 2311), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2320, 2349), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (2325, 2349), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2360, 2372), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2367, 2372), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((2385, 2416), 'keras.layers.Dense', 'Dense', (['NB'], {'activation': '"""softmax"""'}), "(NB, activation='softmax')\n", (2390, 2416), False, 'from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\n'), ((3737, 3757), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3746, 3757), True, 'import numpy as np\n'), ((3808, 3828), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3817, 3828), True, 'import numpy as np\n'), ((3881, 3901), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3890, 3901), True, 'import numpy as np\n'), ((3950, 3970), 'numpy.arange', 'np.arange', (['(0)', 'EPOCHS'], {}), '(0, EPOCHS)\n', (3959, 3970), True, 'import numpy as np\n'), ((873, 906), 'imutils.paths.list_images', 'paths.list_images', (['train_data_dir'], {}), '(train_data_dir)\n', (890, 906), False, 'from imutils import paths\n'), ((925, 963), 'imutils.paths.list_images', 'paths.list_images', (['validation_data_dir'], {}), '(validation_data_dir)\n', (942, 963), False, 'from imutils import paths\n'), ((983, 1015), 'imutils.paths.list_images', 'paths.list_images', (['test_data_dir'], {}), '(test_data_dir)\n', (1000, 1015), False, 'from imutils import paths\n'), ((2898, 2925), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.001)', 'momentum': '(0.9)'}), '(lr=0.001, momentum=0.9)\n', (2901, 2925), False, 'from keras.optimizers import SGD\n'), ((3439, 3499), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testGen.classes', 'predictions'], {'normalize': '(True)'}), '(testGen.classes, predictions, normalize=True)\n', (3453, 3499), False, 'from sklearn.metrics import accuracy_score\n')]
|
### DEPRECATE THESE? OLD VERSIONS OF CLEANING FUNCTIONS FOR JUST EBIKES
### NO LONGER WORKING WITH THESE
import pandas as pd
import numpy as np
from shapely.geometry import Point
import geopandas as gpd
from cabi.utils import which_anc, station_anc_dict
from cabi.get_data import anc_gdf
gdf = anc_gdf()
anc_dict = station_anc_dict()
station_keys = anc_dict.keys()
## NEEDS WORK!! FIX GET_DATA MODULE SO THAT LOAD CLEAN DOCKLESS CAN JUST CALL FROM THERE
def load_clean_dockless():
# FIX THIS CALL GET_DATA MODULE
df = pd.read_pickle('../data/wip/raw_dockless.pkl')
cleaned_ebikes = clean_frame(df)
cleaned_ebikes = cleaned_ebikes.drop('rideable_type', axis=1)
return cleaned_ebikes
def load_geo_ebikes():
df = load_clean_dockless()
geo_ebikes = to_geo(df)
return geo_ebikes
def load_clean_full():
"""DOCSTRING MAKE THIS EXTENSIBLE TO MORE MONTHS"""
df = pd.read_pickle('../data/wip/raw_apr_to_jul_df.pkl')
cleaned_full = clean_frame(df)
return cleaned_full
def geo_longer(df):
"""NEEDS DOCSTRING THIS FUNCTION MAKES ONE TIME COLUMN FROM START/END
AND DOUBLES THE LENGTH OF THE DF IN PROCESS, A GOOD TEST IS WHETHER OR NOT
THE LEN IS 2x OG DF"""
# List all the columns that are not start/end time for easy melt operation below
cols = list(df.columns)
cols.remove('started_at')
cols.remove('ended_at')
# Combine started_at/ended_at into one column 'time', indicating whether
# this was a trip start or trip end in another column, 'start_end',
# set index of new df to 'time'
# sort the index, so it makes sense as a time series
long_geo = df.rename(columns={'started_at': 'start', 'ended_at': 'end'}) \
.melt(id_vars=cols \
, value_vars=['start', 'end'] \
, var_name='start_end' \
, value_name='time') \
.set_index('time') \
.sort_index()
return long_geo
def load_long_geo():
"""DOCSTRING"""
df = load_geo_ebikes()
long_geo = geo_longer(df)
return long_geo
def load_long_geo_full():
"""DOCSTRING"""
df = load_geo_ebikes()
long_geo = geo_longer(df)
return long_geo
def anc_frame(df):
"""DOCSTRING"""
anc_df = df.drop(['start_station_name', 'end_station_name'], axis=1)
return anc_df
def load_long_anc():
"""DOCSTRING"""
df = load_long_geo()
anc_df = anc_frame(df)
return anc_df
# NEEDS WORK!! FIX DOCSTRING!! GENERALIZE TO ANY LOCATION COL (station etc.)
# This is likely uneccesary now that we have a more generalized long df function
def net_gain_loss_anc(ANC_name, df):
"""NEEDS DOCSTRING THIS FUNCTION RETURNS A SERIES (list? np.array?) OF 1 0 -1 VALUES
1 if RIDE ENDED IN ANC 0 IF RIDE DID NOT LEAVE OR END IN ANC -1 IF RIDE LEFT FROM ANC"""
conditions = [
(df['start_end'] == 'start') & (df['ANC_start'] == ANC_name),
(df['start_end'] == 'end') & (df['ANC_end'] == ANC_name),
(df['ANC_start'] != ANC_name) & (df['ANC_end'] != ANC_name),
(df['start_end'] == 'end') & (df['ANC_end'] != ANC_name),
(df['start_end'] == 'start') & (df['ANC_start'] != ANC_name)
]
values = [
-1,
1,
0,
0,
0
]
return np.select(conditions, values)
def plus_minus_anc_frame(df):
"""DOCSTRING GENERALIZE THIS FUNCTION TO ACCEPT OTHER THINGS BESIDE ANC REMOVE DEPENDENCY ON GDF"""
# Create dictionary of ancs (keys) and series of plus minus values returned from net_gain_loss_anc (values)
# for each unique ANC_ID
plus_minus_dict = {anc: net_gain_loss_anc(anc, df) \
for anc in \
list(gdf.ANC_ID)}
# Convert dict to dataframe, index by the (time) index of long_anc_df passed
anc_plus_minus_df = pd.DataFrame(plus_minus_dict, index=df.index)
return anc_plus_minus_df
def load_plus_minus_anc():
df = load_long_anc()
plus_minus = plus_minus_anc_frame(df)
return plus_minus
|
[
"pandas.read_pickle",
"numpy.select",
"cabi.utils.station_anc_dict",
"cabi.get_data.anc_gdf",
"pandas.DataFrame"
] |
[((297, 306), 'cabi.get_data.anc_gdf', 'anc_gdf', ([], {}), '()\n', (304, 306), False, 'from cabi.get_data import anc_gdf\n'), ((318, 336), 'cabi.utils.station_anc_dict', 'station_anc_dict', ([], {}), '()\n', (334, 336), False, 'from cabi.utils import which_anc, station_anc_dict\n'), ((533, 579), 'pandas.read_pickle', 'pd.read_pickle', (['"""../data/wip/raw_dockless.pkl"""'], {}), "('../data/wip/raw_dockless.pkl')\n", (547, 579), True, 'import pandas as pd\n'), ((913, 964), 'pandas.read_pickle', 'pd.read_pickle', (['"""../data/wip/raw_apr_to_jul_df.pkl"""'], {}), "('../data/wip/raw_apr_to_jul_df.pkl')\n", (927, 964), True, 'import pandas as pd\n'), ((3279, 3308), 'numpy.select', 'np.select', (['conditions', 'values'], {}), '(conditions, values)\n', (3288, 3308), True, 'import numpy as np\n'), ((3834, 3879), 'pandas.DataFrame', 'pd.DataFrame', (['plus_minus_dict'], {'index': 'df.index'}), '(plus_minus_dict, index=df.index)\n', (3846, 3879), True, 'import pandas as pd\n')]
|
"""
Copyright (C) 2022 <NAME>
Released under MIT License. See the file LICENSE for details.
This module describes 2D/3D tracks. GUTS's output is a list of instances
of these classes.
"""
import numpy as np
from filter import filter2D, filter3D
from options import Options, Filter2DParams, Filter3DParams
from position import Position, Position3D
from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle
from activecorners import activecorners
from world import World
curr_id = 0
def next_id():
global curr_id
curr_id += 1
return curr_id
def reset_id():
global curr_id
curr_id = 0
class Track:
def __init__(self, options:Options, class_name:str, world:World,
current_time=None, det=None):
self.options = options
self.type = None # either "2D" och "3D"
self.history = dict()
self.times = dict() # total amount of seconds as a float, for each frame
self.class_name = class_name
self.world = world
self.last_updated = current_time
self.last_updated_frame = None
self.id = next_id()
self.should_die = False
def is_numerically_stable(self):
# Bad numerics can sometimes make Kalman numbers grow very large or NaN
# We are not interested in such tracks
c1 = np.any(np.abs(self.filter.x) > 1e8)
c2 = np.any(np.isnan(self.filter.x))
return (not c1) and (not c2)
def finalize(self):
if self.last_updated_frame is None:
self.history = {}
else:
self.history = {key:val for (key,val) in self.history.items() if
key <= self.last_updated_frame}
self.history = {key:val for (key,val) in self.history.items() if
not np.any(np.isnan(val))}
# Remove the track if it never moves significantly
has_significant_motion = False
has_significant_motion_counter = 0
first_pos = None
prev_frame = None
for frame_no, x_vec in self.history.items():
pos = x_vec[0:2]
if first_pos is None:
first_pos = pos
else:
assert frame_no > prev_frame
dist = vector_dist(pos, first_pos)
if dist > self.options.significant_motion_distance:
has_significant_motion_counter += 1
if has_significant_motion_counter > 8:
has_significant_motion = True
break
else:
has_significant_motion_counter = 0
prev_frame = frame_no
if not has_significant_motion:
self.history = dict()
class Track2D(Track):
def __init__(self, pos:Position, **kwargs):
super().__init__(**kwargs)
self.type = '2D'
p:Filter2DParams = kwargs['options'].params2D
x1, y1, x2, y2 = pos.aabb
x = (x1+x2)/2
y = (y1+y2)/2
self.filter = filter2D([x, y], [x2-x1, y2-y1],
P_factor=p.P_factor, Q_c=p.Q_c, Q_s=p.Q_s,
Q_v=p.Q_v, Q_ds=p.Q_ds, Q_a=p.Q_a, Q_cov=p.Q_cov,
Q_scov=p.Q_scov, R_c=p.R_c, R_s=p.R_s)
if not self.options.tracks2D:
raise ValueError("Tried to create a 2D track when not allowed")
def store_history(self, frame_no:int, time:float):
if frame_no in self.history:
raise ValueError(f"Frame number {frame_no} already exists!!")
self.history[frame_no] = self.filter.x.copy()
self.times[frame_no] = time
def predict(self):
self.filter.predict()
if not self.is_numerically_stable():
self.should_die = True
def get_x(self):
return self.filter.x
def update(self, det:Position, dt:float, frame_no:int, current_time:float):
x1, y1, x2, y2 = det.aabb
w = x2-x1
h = y2-y1
x = (x1+x2)/2
y = (y1+y2)/2
z = np.array([x, y, w, h], dtype=np.float32)
self.filter.update(z, dt)
assert current_time > self.last_updated
self.last_updated = current_time
self.last_updated_frame = frame_no
# Determine if track has sufficient amount of movement to be converted to a
# 3D track instead
def saom(self, current_time:float):
# 2D tracks need to have been recently updated for SAOM to trigger
# otherwise drifting nonsense tracks become 3D tracks
max_time = 2.01*(1.0/self.options.frame_rate)
if self.history and (current_time-self.last_updated)<=max_time:
first = min(self.history.keys())
xf, yf, wf, hf = self.history[first][0:4]
xn, yn, wn, hn = self.filter.x[0:4]
typical_size = np.mean([wf, hf, wn, hn])
dist = vector_dist([xf, yf], [xn, yn])
ratio = dist/typical_size
if ratio > self.options.saom_thresh:
return True
return False
# Convert to 3D track
def to3D(self, current_time:float):
first = min(self.history.keys())
dt = current_time - self.times[first]
assert dt > 0
xf, yf, wf, hf = self.history[first][0:4]
xn, yn, wn, hn = self.filter.x[0:4]
aabb_first = to_aabb(xf, yf, wf, hf)
aabb_now = to_aabb(xn, yn, wn, hn)
pos_first = Position(aabb=aabb_first, class_name=self.class_name)
pos_now = Position(aabb=aabb_now, class_name=self.class_name)
out = activecorners(pos1=pos_first, pos2=pos_now,
class_name=self.class_name,
world=self.world, dt=dt)
if out is None:
# Conversion to 3D failed, try again later
return self
else:
X, Y, l, w, h, v, phi = out
pos3D=np.array([X, Y], dtype=np.float32)
shape=np.array([l, w, h], dtype=np.float32)
new_track = Track3D(pos3D, shape, phi, v,
world=self.world, class_name=self.class_name,
options=self.options, current_time=current_time,
aabb_history=dict_copy(self.history),
old_times=self.times)
# Same ID to clearly mark that this 3D track inherits from 2D track
# Unintended side effect is that the track counter is increased
new_track.id = self.id
return new_track
class Track3D(Track):
def __init__(self, pos3D:np.ndarray, shape:np.ndarray, phi:float,
v:float, aabb_history:dict, old_times:dict, **kwargs):
super().__init__(**kwargs)
self.type = '3D'
self.tau = 1.0 / kwargs['world'].frame_rate
self.options = kwargs['options']
self.height = shape[-1]
self.aabb_history = aabb_history
self.times = dict_merge(self.times, old_times)
self.previous_detection = None
self.old_phi = None
if phi is None:
# If the road user is standing still, we still want to let
# activecorners work (or do we?)
self.init_filter(pos3D, shape, phi, v, self.tau)
elif np.isnan(phi):
# If we don't have phi yet, wait to create filter until we do
# which should happen at next update
# For now, just store the position which we'll need to compute phi
# This is only done in GUTS, active corners should never output NaN
self.filter = None
self.previous_detection = kwargs['det']
else:
self.init_filter(pos3D, shape, phi, v, self.tau)
def __repr__(self):
frames = list(self.history.keys())
if frames:
frames.sort()
start = frames[0]
stop = frames[-1]
else:
start = '?'
stop = '?'
return f"Track3D {self.class_name} {self.id}, {start}-{stop}"
def init_filter(self, pos3D, shape, phi, v, tau):
p:Filter3DParams = self.options.params3D
self.filter = filter3D(pos3D[0:2], shape[0:2], phi, v, tau=tau,
kappa=p.kappa, P_factor=p.P_factor,
Q_c=p.Q_c, Q_s=p.Q_s, Q_phi=p.Q_phi, Q_v=p.Q_v,
Q_omega=p.Q_omega, Q_cov=p.Q_cov,
R_c=p.R_c, R_s=p.R_s, R_phi=p.R_phi,
min_v_for_rotate=self.options.min_v_for_rotate)
def store_history(self, frame_no:int, time:float):
if self.filter is None:
return
if frame_no in self.history:
raise ValueError(f"Frame number {frame_no} already exists!!")
self.history[frame_no] = self.filter.x.copy()
self.times[frame_no] = time
def predict(self):
if self.filter is None:
return
self.filter.predict()
if not self.is_numerically_stable():
self.should_die = True
def get_x(self):
if self.filter is None:
x = np.array([*self.previous_detection.pos3D.flatten()[0:2],
*self.previous_detection.shape[0:2], float("nan")],
dtype=np.float32)
return x
else:
return self.filter.x
def vector_for_scoring(self, frame_no:int):
X = self.history[frame_no]
# Scoring vector should be x, y, l, w, phi
return X[0:5]
def suitable_previous_aabb_time(self, current_time:float):
good_number_of_frames = 5
l = len(self.aabb_history)
if l <= good_number_of_frames:
frame_no = min(self.aabb_history.keys())
return frame_no, current_time-self.times[frame_no]
else:
frame_nos = list(self.aabb_history.keys())
frame_nos.sort()
# Hopefully not too distant and also not too recent..?
frame_no = frame_nos[-good_number_of_frames]
return frame_no, current_time-self.times[frame_no]
def update(self, det, dt:float, frame_no:int, current_time:float):
assert current_time >= self.last_updated
self.last_updated = current_time
self.last_updated_frame = frame_no
if isinstance(det, Position3D):
X, Y = det.pos3D[0:2]
x, y = self.previous_detection.pos3D[0:2]
dist = vector_dist([X,Y], [x,y])
if dist > self.options.min_dist_for_phi:
phi = np.arctan2(Y-y, X-x)
factor = self.options.phi_smoothing_factor
if factor > 0.0 and (self.old_phi is not None):
phi = weighted_angle(self.old_phi, phi, factor)
if self.filter is None:
v = dist/self.tau
self.init_filter(det.pos3D, det.shape, phi, v, self.tau)
else:
z = np.array([*det.pos3D[0:2], *det.shape[0:2], phi],
dtype=np.float32)
self.filter.update(z)
self.old_phi = phi
elif isinstance(det, Position):
before, before_dt = self.suitable_previous_aabb_time(current_time)
xb, yb, wb, hb = self.aabb_history[before][0:4]
aabb_before = to_aabb(xb, yb, wb, hb)
pos_before = Position(aabb=aabb_before, class_name=self.class_name)
out = activecorners(pos_before, det,
self.class_name, self.world,
before_dt)
if out is None:
# Don't update the filter if active corners fail!
pass
else:
X, Y, l, w, h, v, phi = out
if l is None or w is None:
l, w = self.filter.x[2:4]
if h is None:
h = self.height
if phi is None:
phi = self.filter.x[4]
z = np.array([X, Y, l, w, phi], dtype=np.float32).flatten()
self.filter.update(z)
# Gradually update the height
self.height = 0.9 * self.height + 0.1 * h
# Store new AABB in AABB history, because this isn't done elsewhere
x1, y1, x2, y2 = det.aabb
xn = (x1+x2)/2.0
yn = (y1+y2)/2.0
wn = x2-x1
hn = y2-y1
to_be_stored = np.array([xn, yn, wn, hn], dtype=np.float32)
self.aabb_history[frame_no] = to_be_stored
else:
raise ValueError(f"Detection was of unknown type {type(det)}")
self.previous_detection = det
|
[
"numpy.mean",
"numpy.abs",
"util.dict_merge",
"util.to_aabb",
"filter.filter2D",
"util.weighted_angle",
"filter.filter3D",
"numpy.array",
"util.dict_copy",
"numpy.isnan",
"numpy.arctan2",
"activecorners.activecorners",
"position.Position",
"util.vector_dist"
] |
[((3262, 3439), 'filter.filter2D', 'filter2D', (['[x, y]', '[x2 - x1, y2 - y1]'], {'P_factor': 'p.P_factor', 'Q_c': 'p.Q_c', 'Q_s': 'p.Q_s', 'Q_v': 'p.Q_v', 'Q_ds': 'p.Q_ds', 'Q_a': 'p.Q_a', 'Q_cov': 'p.Q_cov', 'Q_scov': 'p.Q_scov', 'R_c': 'p.R_c', 'R_s': 'p.R_s'}), '([x, y], [x2 - x1, y2 - y1], P_factor=p.P_factor, Q_c=p.Q_c, Q_s=p.\n Q_s, Q_v=p.Q_v, Q_ds=p.Q_ds, Q_a=p.Q_a, Q_cov=p.Q_cov, Q_scov=p.Q_scov,\n R_c=p.R_c, R_s=p.R_s)\n', (3270, 3439), False, 'from filter import filter2D, filter3D\n'), ((4304, 4344), 'numpy.array', 'np.array', (['[x, y, w, h]'], {'dtype': 'np.float32'}), '([x, y, w, h], dtype=np.float32)\n', (4312, 4344), True, 'import numpy as np\n'), ((5616, 5639), 'util.to_aabb', 'to_aabb', (['xf', 'yf', 'wf', 'hf'], {}), '(xf, yf, wf, hf)\n', (5623, 5639), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((5659, 5682), 'util.to_aabb', 'to_aabb', (['xn', 'yn', 'wn', 'hn'], {}), '(xn, yn, wn, hn)\n', (5666, 5682), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((5703, 5756), 'position.Position', 'Position', ([], {'aabb': 'aabb_first', 'class_name': 'self.class_name'}), '(aabb=aabb_first, class_name=self.class_name)\n', (5711, 5756), False, 'from position import Position, Position3D\n'), ((5775, 5826), 'position.Position', 'Position', ([], {'aabb': 'aabb_now', 'class_name': 'self.class_name'}), '(aabb=aabb_now, class_name=self.class_name)\n', (5783, 5826), False, 'from position import Position, Position3D\n'), ((5842, 5942), 'activecorners.activecorners', 'activecorners', ([], {'pos1': 'pos_first', 'pos2': 'pos_now', 'class_name': 'self.class_name', 'world': 'self.world', 'dt': 'dt'}), '(pos1=pos_first, pos2=pos_now, class_name=self.class_name,\n world=self.world, dt=dt)\n', (5855, 5942), False, 'from activecorners import activecorners\n'), ((7271, 7304), 'util.dict_merge', 'dict_merge', (['self.times', 'old_times'], {}), '(self.times, old_times)\n', (7281, 7304), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((8492, 8758), 'filter.filter3D', 'filter3D', (['pos3D[0:2]', 'shape[0:2]', 'phi', 'v'], {'tau': 'tau', 'kappa': 'p.kappa', 'P_factor': 'p.P_factor', 'Q_c': 'p.Q_c', 'Q_s': 'p.Q_s', 'Q_phi': 'p.Q_phi', 'Q_v': 'p.Q_v', 'Q_omega': 'p.Q_omega', 'Q_cov': 'p.Q_cov', 'R_c': 'p.R_c', 'R_s': 'p.R_s', 'R_phi': 'p.R_phi', 'min_v_for_rotate': 'self.options.min_v_for_rotate'}), '(pos3D[0:2], shape[0:2], phi, v, tau=tau, kappa=p.kappa, P_factor=p\n .P_factor, Q_c=p.Q_c, Q_s=p.Q_s, Q_phi=p.Q_phi, Q_v=p.Q_v, Q_omega=p.\n Q_omega, Q_cov=p.Q_cov, R_c=p.R_c, R_s=p.R_s, R_phi=p.R_phi,\n min_v_for_rotate=self.options.min_v_for_rotate)\n', (8500, 8758), False, 'from filter import filter2D, filter3D\n'), ((1411, 1434), 'numpy.isnan', 'np.isnan', (['self.filter.x'], {}), '(self.filter.x)\n', (1419, 1434), True, 'import numpy as np\n'), ((5095, 5120), 'numpy.mean', 'np.mean', (['[wf, hf, wn, hn]'], {}), '([wf, hf, wn, hn])\n', (5102, 5120), True, 'import numpy as np\n'), ((5140, 5171), 'util.vector_dist', 'vector_dist', (['[xf, yf]', '[xn, yn]'], {}), '([xf, yf], [xn, yn])\n', (5151, 5171), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((6184, 6218), 'numpy.array', 'np.array', (['[X, Y]'], {'dtype': 'np.float32'}), '([X, Y], dtype=np.float32)\n', (6192, 6218), True, 'import numpy as np\n'), ((6237, 6274), 'numpy.array', 'np.array', (['[l, w, h]'], {'dtype': 'np.float32'}), '([l, w, h], dtype=np.float32)\n', (6245, 6274), True, 'import numpy as np\n'), ((7598, 7611), 'numpy.isnan', 'np.isnan', (['phi'], {}), '(phi)\n', (7606, 7611), True, 'import numpy as np\n'), ((10833, 10860), 'util.vector_dist', 'vector_dist', (['[X, Y]', '[x, y]'], {}), '([X, Y], [x, y])\n', (10844, 10860), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((1362, 1383), 'numpy.abs', 'np.abs', (['self.filter.x'], {}), '(self.filter.x)\n', (1368, 1383), True, 'import numpy as np\n'), ((10934, 10958), 'numpy.arctan2', 'np.arctan2', (['(Y - y)', '(X - x)'], {}), '(Y - y, X - x)\n', (10944, 10958), True, 'import numpy as np\n'), ((11751, 11774), 'util.to_aabb', 'to_aabb', (['xb', 'yb', 'wb', 'hb'], {}), '(xb, yb, wb, hb)\n', (11758, 11774), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((11800, 11854), 'position.Position', 'Position', ([], {'aabb': 'aabb_before', 'class_name': 'self.class_name'}), '(aabb=aabb_before, class_name=self.class_name)\n', (11808, 11854), False, 'from position import Position, Position3D\n'), ((11873, 11943), 'activecorners.activecorners', 'activecorners', (['pos_before', 'det', 'self.class_name', 'self.world', 'before_dt'], {}), '(pos_before, det, self.class_name, self.world, before_dt)\n', (11886, 11943), False, 'from activecorners import activecorners\n'), ((12922, 12966), 'numpy.array', 'np.array', (['[xn, yn, wn, hn]'], {'dtype': 'np.float32'}), '([xn, yn, wn, hn], dtype=np.float32)\n', (12930, 12966), True, 'import numpy as np\n'), ((2358, 2385), 'util.vector_dist', 'vector_dist', (['pos', 'first_pos'], {}), '(pos, first_pos)\n', (2369, 2385), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((6536, 6559), 'util.dict_copy', 'dict_copy', (['self.history'], {}), '(self.history)\n', (6545, 6559), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((11121, 11162), 'util.weighted_angle', 'weighted_angle', (['self.old_phi', 'phi', 'factor'], {}), '(self.old_phi, phi, factor)\n', (11135, 11162), False, 'from util import vector_dist, to_aabb, dict_copy, dict_merge, weighted_angle\n'), ((11365, 11432), 'numpy.array', 'np.array', (['[*det.pos3D[0:2], *det.shape[0:2], phi]'], {'dtype': 'np.float32'}), '([*det.pos3D[0:2], *det.shape[0:2], phi], dtype=np.float32)\n', (11373, 11432), True, 'import numpy as np\n'), ((1860, 1873), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (1868, 1873), True, 'import numpy as np\n'), ((12439, 12484), 'numpy.array', 'np.array', (['[X, Y, l, w, phi]'], {'dtype': 'np.float32'}), '([X, Y, l, w, phi], dtype=np.float32)\n', (12447, 12484), True, 'import numpy as np\n')]
|
import numpy.random as rand
import numpy as np
import pandas as pd
import random
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
from Particle import Particle
#Initialization of the plots
fig = plt.figure(figsize=(20,10))
axes = [None, None, None]
def generate_random_particle(_id, input_size, neurons):
"""Function to generate random particle to init PSO algorithm"""
position = []
speed = []
n_neurons = sum(neurons)
n_weights = input_size * neurons[0]
for i in range(len(neurons) - 1):
n_weights = n_weights + neurons[i]*neurons[i+1]
total_n_values = n_weights + (2* n_neurons) # give the PSO the possibility to select the activation functions and bias, subtract one if the activation function is not needed for the last neuron
position = 2 * rand.random_sample(total_n_values) - 1
speed = np.zeros(total_n_values)
return Particle(_id, position, speed, n_weights, n_neurons)
class PSO:
"""Class that implements the PSO algorithm"""
def __init__(self, swarm_size, n_informants, alpha_max, alpha_min, beta, gamma, delta, epsilon, ann, max_iterations, test_set_path, input_size):
axes[1] = fig.add_subplot(132)
axes[2] = fig.add_subplot(133)
self.swarm_size = swarm_size
self.alpha_max = alpha_max
self.alpha_min = alpha_min
self.beta = beta
self.gamma = gamma
self.delta = delta
self.epsilon = epsilon
self.swarm = [generate_random_particle(id, input_size, ann.neurons) for id in range(swarm_size)] # init swarm
self.best = None
self.best_fitness = 1000 #initialise the error to an high value
self.ann = ann
self.max_iterations = max_iterations
self.input_size = input_size
self.n_informants = n_informants
# Setup the dataset structure to expect and the function plots based on the input size
if input_size == 1:
columns = ['x', 'y']
axes[0] = fig.add_subplot(131)
else:
columns = ['x1', 'x2', 'y']
axes[0] = fig.add_subplot(131, projection='3d')
self.test_set = pd.read_csv(test_set_path, sep='\s+|\t+|\s+\t+|\t+\s+', header=None, names=columns, engine='python')
#init arrays used to plot the results during the execution
self.error = []
self.steps = []
self.best_record = []
#assign informants to each particle
for p in self.swarm:
p.select_informants(self.swarm, self.n_informants)
def execute(self):
""" Function to run the PSO algorithm"""
anim = FuncAnimation(fig, self.step, frames=self.max_iterations, repeat=False)
plt.show()
def step(self, i):
""" Wrapper to execute one step of the PSO algorithm and plot the indermediate results"""
self.pso_step(i+1)
self.plot_result()
def pso_step(self, i):
""" Execution of a step of the PSO algorithm as explained in the lectures slides """
for particle in self.swarm:
self.assess_fitness(particle)
if self.best is None or particle.fitness < self.best_fitness:
self.best = particle
self.best_fitness = particle.fitness
self.best_fitness_position = particle.best_fitness_position
x_swarm = self.best_fitness_position
for particle in self.swarm:
new_speed = np.zeros(particle.speed.shape)
x_fit = particle.best_fitness_position
x_inf = particle.get_previous_fittest_of_informants()
for l in range(len(particle.position)):
a = (self.alpha_max - self.alpha_min) * ((self.max_iterations - i) / self.max_iterations) + self.alpha_min
b = random.uniform(0, self.beta)
c = random.uniform(0, self.gamma)
d = random.uniform(0, self.delta)
new_speed[l] = a * particle.speed[l] + b * (x_fit[l] - particle.position[l]) + c * (x_inf[l] - particle.position[l]) + d * (x_swarm[l] - particle.position[l])
particle.speed = new_speed
particle.update_position(self.epsilon)
self.steps.append(i)
self.error.append(self.best_fitness)
self.best_record.append(self.best.id)
print("{} | Best fitness so far: {}".format(i, self.best_fitness))
def assess_fitness(self, particle):
""" Function to assess the fitness of a particle using MSE"""
graph = []
old_fitness = particle.best_fitness
self.ann.set_values(particle.position)
mse = 0
n = len(self.test_set)
for _, row in self.test_set.iterrows():
if self.input_size == 1:
x_i = [row[0]]
d = row[1]
else:
x_i = [row[0], row[1]]
d = row[2]
u = self.ann.process(x_i)
graph.append(u)
mse_i = (d - u) ** 2
mse = mse + mse_i
particle.fitness = mse / n
if (particle.fitness < old_fitness):
particle.best_fitness_graph = graph
particle.best_fitness = particle.fitness
particle.best_fitness_position = particle.position
def plot_result(self):
"Function to plot the intermediate results of the PSO algorithm"
#clear the figure from previous step's results
axes[0].clear()
axes[1].clear()
axes[2].clear()
#Reconstruct the cleared plots
axes[0].title.set_text('Functions')
axes[1].title.set_text('MSE')
axes[1].set_xlabel('Number of iterations')
axes[1].set_ylabel('Mean Squared Error')
axes[2].title.set_text('Best Particle')
axes[2].set_xlabel('Number of iterations')
axes[2].set_ylabel('Best Particle ID')
#plot the results in a different manner depending on the input size
if self.input_size == 1:
x = self.test_set['x']
y = self.test_set['y']
g = self.best.best_fitness_graph
axes[0].plot(x,g, label='Approximated Function')
axes[0].plot(x,y, label='Desidered Function')
axes[0].legend()
else:
x1 = self.test_set['x1']
x2 = self.test_set['x2']
y = self.test_set['y']
g = self.best.best_fitness_graph
axes[0].scatter(x1, x2, y, label='Desidered Function')
axes[0].scatter(x1, x2, g, label='Approximated Function')
axes[0].legend()
#plot error
axes[1].set_ylim([0, 0.1])
axes[1].plot(self.steps, self.error)
#plot the fittest particle
axes[2].plot(self.steps, self.best_record)
axes[2].set_ylim([0, self.swarm_size])
|
[
"random.uniform",
"numpy.random.random_sample",
"pandas.read_csv",
"Particle.Particle",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.show"
] |
[((280, 308), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (290, 308), True, 'import matplotlib.pyplot as plt\n'), ((926, 950), 'numpy.zeros', 'np.zeros', (['total_n_values'], {}), '(total_n_values)\n', (934, 950), True, 'import numpy as np\n'), ((962, 1014), 'Particle.Particle', 'Particle', (['_id', 'position', 'speed', 'n_weights', 'n_neurons'], {}), '(_id, position, speed, n_weights, n_neurons)\n', (970, 1014), False, 'from Particle import Particle\n'), ((2232, 2339), 'pandas.read_csv', 'pd.read_csv', (['test_set_path'], {'sep': '"""\\\\s+|\t+|\\\\s+\t+|\t+\\\\s+"""', 'header': 'None', 'names': 'columns', 'engine': '"""python"""'}), "(test_set_path, sep='\\\\s+|\\t+|\\\\s+\\t+|\\t+\\\\s+', header=None,\n names=columns, engine='python')\n", (2243, 2339), True, 'import pandas as pd\n'), ((2712, 2783), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'self.step'], {'frames': 'self.max_iterations', 'repeat': '(False)'}), '(fig, self.step, frames=self.max_iterations, repeat=False)\n', (2725, 2783), False, 'from matplotlib.animation import FuncAnimation\n'), ((2792, 2802), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2800, 2802), True, 'import matplotlib.pyplot as plt\n'), ((875, 909), 'numpy.random.random_sample', 'rand.random_sample', (['total_n_values'], {}), '(total_n_values)\n', (893, 909), True, 'import numpy.random as rand\n'), ((3544, 3574), 'numpy.zeros', 'np.zeros', (['particle.speed.shape'], {}), '(particle.speed.shape)\n', (3552, 3574), True, 'import numpy as np\n'), ((3887, 3915), 'random.uniform', 'random.uniform', (['(0)', 'self.beta'], {}), '(0, self.beta)\n', (3901, 3915), False, 'import random\n'), ((3936, 3965), 'random.uniform', 'random.uniform', (['(0)', 'self.gamma'], {}), '(0, self.gamma)\n', (3950, 3965), False, 'import random\n'), ((3986, 4015), 'random.uniform', 'random.uniform', (['(0)', 'self.delta'], {}), '(0, self.delta)\n', (4000, 4015), False, 'import random\n')]
|
from railrl.data_management.simple_replay_pool import SimpleReplayPool
from railrl.predictors.dynamics_model import FullyConnectedEncoder, InverseModel, ForwardModel
import tensorflow as tf
import time
import numpy as np
from sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer
from railrl.misc.pyhelper_fns.vis_utils import MyAnimationMulti
def planner_info(arm_loss, box_loss, forward_models_outputs):
return {'arm_loss':arm_loss, 'box_loss':box_loss, \
'forward_models_outputs': forward_models_outputs}
def gather_cols(params, indices, name=None):
"""Gather columns of a 2D tensor.
Args:
params: A 2D tensor.
indices: A 1D tensor. Must be one of the following types: ``int32``, ``int64``.
name: A name for the operation (optional).
Returns:
A 2D Tensor. Has the same type as ``params``.
"""
with tf.op_scope([params, indices], name, "gather_cols") as scope:
# Check input
params = tf.convert_to_tensor(params, name="params")
indices = tf.convert_to_tensor(indices, name="indices")
try:
params.get_shape().assert_has_rank(2)
except ValueError:
raise ValueError('\'params\' must be 2D.')
try:
indices.get_shape().assert_has_rank(1)
except ValueError:
raise ValueError('\'params\' must be 1D.')
# Define op
p_shape = tf.shape(params)
p_flat = tf.reshape(params, [-1])
i_flat = tf.reshape(tf.reshape(tf.range(0, p_shape[0]) * p_shape[1],
[-1, 1]) + indices, [-1])
return tf.reshape(tf.gather(p_flat, i_flat),
[p_shape[0], -1])
"""
Planner takes two states (S_init and S_goal) and output an action.
Fine Tune is out of the scope of Planner
"""
class Planner(object):
def __init__(
self,
dynamic_model,
encoder,
sess
):
self.encoder = encoder
self.dynamic_model = dynamic_model
self.sess = sess
##initialize the model.....
def get_action(S_init, S_goal):
return None
"""
Inverde_model planner should be easy, just return the action
"""
class InverseModelPlanner(object):
def __init__(
self,
dynamic_model,
env,
encoder,
sess = None,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
#re-construct the dynamic model
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
self.inverse_model = dynamic_model.get_weight_tied_copy(feature_input1=encoder1.output,
feature_input2=encoder2.output)
def get_action(self, S_init, S_goal):
action = self.sess.run(self.inverse_model.output, feed_dict = \
{self.S_init_ph:S_init, self.S_goal_ph: S_goal})
return action
"""
ForwardModel planner, optimize action according to this objective:
min_{a} (S_next - S_goal)^2
"""
class CEMPlanner_arm_coord():
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
max_length = 15,
sample_batch_size = 2000,
top_k = 200,
action_penalty=False,
accumulated_loss = False):
self.sample_batch_size = sample_batch_size
self.top_k = top_k
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, None, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, [None, 24])
self.S_goal_ph = tf.placeholder(tf.float32, [None, 24])
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
self.encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output] #for debug purpose only
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
def transfer_box_global_tf(obs):
arm2box = gather_cols(obs, [4,5])/10.0
return gather_cols(obs, [21,22]) + arm2box
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
self.objective_topk_index_list = []
current_objective = 0
#objective
for forward_model in self.forward_model_list:
if accumulated_loss:
current_objective += tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)), axis = 1)
else:
current_objective = tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)), axis = 1)
self.objective_list.append(current_objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(transfer_box_global_tf(forward_model.output)-\
transfer_box_global_tf(self.encoder2.output)))*100)
if action_penalty:
for i in range(len(self.objective_list)):
self.objective_list[i] += tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.5
def get_action(self, S_init, S_goal, steps = 1, plot_loss = False, debug = False, stop_variance = 0.2, stop_itr = 3, init_batch_size = 50000):
assert(steps <= self.max_length)
#fit a multivariable Gaussian
mean_list = None
cov_matrix = None
batch_S_init = np.dot(np.ones([init_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([init_batch_size, 1]), S_goal.reshape(1,-1))
#CEM
actions = np.random.rand(self.max_length, init_batch_size, 4)*2 - 1
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
# debug
# action_pen, objective_debug = self.sess.run([tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.3, self.objective_list[14]], feed_dict = {self.action_ph:actions, \
# self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
# import pdb; pdb.set_trace()
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
batch_S_init = np.dot(np.ones([self.sample_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([self.sample_batch_size, 1]), S_goal.reshape(1,-1))
for i in range(stop_itr-1):
actions = np.random.multivariate_normal(mean_list, cov_matrix, self.sample_batch_size).reshape(self.sample_batch_size, self.max_length, 4)
actions = np.moveaxis(actions, 0,1)
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
# import pdb; pdb.set_trace()
#if debug, visualize all forward model's output
best_action = best_actions[:,0,:]
arm_loss, box_loss,forward_models_outputs, final_objective = self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list, self.objective_list[steps-1]], \
{self.action_ph: best_action.reshape(15,1,4), \
self.S_init_ph:[S_init], self.S_goal_ph:[S_goal]})
print("final objective")
print(final_objective)
# import pdb; pdb.set_trace()
return best_actions[0,0], {'arm_loss':arm_loss, 'box_loss':box_loss, 'forward_models_outputs':forward_models_outputs[:steps]}
class CEMPlanner():
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
pos_only = True,
max_length = 15,
sample_batch_size = 2000,
top_k = 200,
action_penalty=False,
accumulated_loss = False):
self.sample_batch_size = sample_batch_size
self.top_k = top_k
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, None, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, [None]+list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, [None]+list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=self.S_init_ph)
self.encoder2 = encoder.get_weight_tied_copy(observation_input=self.S_goal_ph)
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output] #for debug purpose only
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
self.objective_topk_index_list = []
current_objective = 0
if pos_only:
for forward_model in self.forward_model_list:
if accumulated_loss:
current_objective += tf.reduce_sum(tf.square(gather_cols(forward_model.output, [4,5,6])\
- gather_cols(self.encoder2.output, [4,5,6])), axis = 1)
else:
current_objective = tf.reduce_sum(tf.square(gather_cols(forward_model.output, list(range(4,7)))\
- gather_cols(self.encoder2.output, list(range(4,7)))), axis = 1)
self.objective_list.append(current_objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0])))
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
if action_penalty:
for i in range(len(self.objective_list)):
self.objective_list[i] += tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.5
def get_action(self, S_init, S_goal, steps = 1, plot_loss = False, debug = False, stop_variance = 0.2, stop_itr = 3, init_batch_size = 50000):
assert(steps <= self.max_length)
#fit a multivariable Gaussian
mean_list = None
cov_matrix = None
batch_S_init = np.dot(np.ones([init_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([init_batch_size, 1]), S_goal.reshape(1,-1))
#CEM
actions = np.random.rand(self.max_length, init_batch_size, 4)*2 - 1
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
#debug
# action_pen, objective_debug = self.sess.run([tf.reduce_sum(tf.square(self.action_ph),axis = [0,2])*0.3, self.objective_list[14]], feed_dict = {self.action_ph:actions, \
# self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
# import pdb; pdb.set_trace()
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
batch_S_init = np.dot(np.ones([self.sample_batch_size, 1]), S_init.reshape(1,-1))
batch_S_goal = np.dot(np.ones([self.sample_batch_size, 1]), S_goal.reshape(1,-1))
for i in range(stop_itr-1):
actions = np.random.multivariate_normal(mean_list, cov_matrix, self.sample_batch_size).reshape(self.sample_batch_size, self.max_length, 4)
actions = np.moveaxis(actions, 0,1)
objective_list = self.sess.run(self.objective_list[steps-1], feed_dict = {self.action_ph:actions, \
self.S_init_ph:batch_S_init, self.S_goal_ph:batch_S_goal})
sorted_index = np.argsort(objective_list)[:self.top_k]
best_actions = actions[:,sorted_index, :]
trans_best_actions = np.moveaxis(best_actions, 0, 1).reshape(self.top_k, -1)
cov_matrix = np.cov(trans_best_actions.T)
mean_list = np.mean(trans_best_actions.T, axis = 1)
# import pdb; pdb.set_trace()
#if debug, visualize all forward model's output
best_action = best_actions[:,0,:]
arm_loss, box_loss,forward_models_outputs, final_objective = self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list, self.objective_list[steps-1]], \
{self.action_ph: best_action.reshape(15,1,4), \
self.S_init_ph:[S_init], self.S_goal_ph:[S_goal]})
print("final objective")
print(final_objective)
arm_obj = np.sum(np.square(forward_models_outputs[steps-1][0][:4] - S_goal[:4]))
box_obj = np.sum(np.square(forward_models_outputs[steps-1][0][4:7] - S_goal[4:7]))
print('arm objective is {}, box objective is {}'.format(arm_obj, box_obj))
# import pdb; pdb.set_trace()
return best_actions[0,0], {'arm_loss':arm_loss, 'box_loss':box_loss, 'forward_models_outputs':forward_models_outputs[:steps]}
class FastClippedSgdShootingForwardModelPlanner_cumulated_obj(object):
def __init__(
self,
dynamic_model,
encoder,
env,
init_lr = 0.5,
sess = None,
pos_only = False,
max_length = 15,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.init_lr = init_lr
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, 1, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
## objective
self.objective_list = []
self.forward_model_loss_list = []
self.arm_loss_list = []
self.box_loss_list = []
objective = 0
factor = 1
if pos_only:
for forward_model in self.forward_model_list:
factor=factor*0.4
self.forward_model_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6])))
objective += factor*tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6]))
self.objective_list.append(objective)
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
objective += tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0]))
self.objective_list.append(objective)
self.action_grad_list = []
for obj in self.objective_list:
#those tail term in action_ph will receive 0 gradient
self.action_grad_list.append(tf.gradients(obj, self.action_ph))
self.vis_tool = MyAnimationMulti(None, numPlots=2, isIm=[0,0], axTitles=['(S1-S_goal)^2', 'sum(S_i-S_goal)^2'])
def get_action(self, S_init, S_goal, steps = None, plot_loss = False):
if steps == None:
steps = 1 #greedy planner
else:
assert(steps <= self.max_length)
action = np.zeros([self.max_length, 1, 4])
action_grad = self.action_grad_list[steps - 1]
# TODO: Find a good stop criteria
now = time.time()
S1_loss_list = []
Sn_loss_list = []
for i in range(0,101):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : action}
S1_loss, Sn_loss = self.sess.run([self.objective_list[0], self.objective_list[steps-1]], feed_dict=feed_dict)
S1_loss_list.append(S1_loss)
Sn_loss_list.append(Sn_loss)
if plot_loss and i%20 ==0:
self.vis_tool._display([[range(i+1), S1_loss_list],[range(i+1), Sn_loss_list]])
gradient = np.array(self.sess.run(action_grad, feed_dict = feed_dict)[0])
if np.isnan(gradient).any():
action = np.random.rand(self.max_length, 1, 4)-0.5
print('nan gradient step{}'.format(i))
import pdb; pdb.set_trace()
else:
if np.linalg.norm(gradient) > steps*4:
gradient = gradient/np.linalg.norm(gradient)*4*steps
action -= gradient/1.0*self.init_lr
action = np.clip(action, -1, 1)
# if i %200 == 0:
# print("#########Optimizing action#########")
# action_loss, predicted_next_state = self.sess.run([self.objective_list[steps-1], self.forward_model_list[steps-1].output], feed_dict = feed_dict)
# box_loss = np.sum(np.square(predicted_next_state[0][4:6] - S_goal[4:6]))
# arm_loss = np.sum(np.square(predicted_next_state[0][0:4] - S_goal[0:4]))
# print("action_loss(sum_square_error(S_goal, S_next)) is {}, box_loss is {}, arm_loss is {}".format(action_loss, box_loss, arm_loss))
# print("current_action is {}".format(action[0][0]))
# # print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
# print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
# now = time.time()
return action[0][0], self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], self.forward_model_list[0].output], feed_dict)
class FastClippedSgdShootingForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
init_lr = 0.5,
sess = None,
pos_only = False,
max_length = 15,
):
self.env = env
if sess == None:
sess =tf.get_default_session()
self.sess = sess
self.init_lr = init_lr
self.max_length = max_length
self.action_ph = tf.placeholder(tf.float32, [max_length, 1, 4])
self.forward_model_list = []
#build the recurrent model w.t. the max length
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
#only two feature encoders
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph[0])
self.forward_model_list.append(forward_model)
self.forward_model_output_list = [forward_model.output]
for i in range(1,max_length):
forward_model = dynamic_model.get_weight_tied_copy(feature_input = forward_model.output,\
action_input = self.action_ph[i])
self.forward_model_list.append(forward_model)
self.forward_model_output_list.append(forward_model.output)
## objective
self.objective_list = []
self.arm_loss_list = []
self.box_loss_list = []
if pos_only:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:6] - self.encoder2.output[0][:6])))
self.arm_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][:4] - self.encoder2.output[0][:4])))
self.box_loss_list.append(tf.reduce_sum(tf.square(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])))
else:
for forward_model in self.forward_model_list:
self.objective_list.append(tf.reduce_sum(tf.square(forward_model.output[0] - self.encoder2.output[0])))
self.action_grad_list = []
for obj in self.objective_list:
#those tail term in action_ph will receive 0 gradient
self.action_grad_list.append(tf.gradients(obj, self.action_ph))
self.vis_tool = MyAnimationMulti(None, numPlots=2, isIm=[0,0], axTitles=['(S1-S_goal)^2', '(S_n-S_goal)^2'])
def get_action(self, S_init, S_goal, steps = None, plot_loss = False):
if steps == None:
steps = 1 #greedy planner
else:
assert(steps <= self.max_length)
action = np.zeros([self.max_length, 1, 4])
action_grad = self.action_grad_list[steps - 1]
# TODO: Find a good stop criteria
now = time.time()
S1_loss_list = []
Sn_loss_list = []
for i in range(0,51):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : action}
S1_loss, Sn_loss = self.sess.run([self.box_loss_list[0], self.box_loss_list[steps-1]], feed_dict=feed_dict)
S1_loss_list.append(S1_loss)
Sn_loss_list.append(Sn_loss)
if plot_loss and i %1 == 0:
self.vis_tool._display([[range(i+1), S1_loss_list],[range(i+1), Sn_loss_list]])
gradient = np.array(self.sess.run(action_grad, feed_dict = feed_dict)[0])
if np.isnan(gradient).any():
action = np.random.rand(self.max_length, 1, 4)-0.5
print('nan gradient step{}'.format(i))
import pdb; pdb.set_trace()
else:
if np.linalg.norm(gradient) > steps*4:
gradient = gradient/np.linalg.norm(gradient)*4*steps
action -= gradient/(1.+i*0.05)*self.init_lr
action = np.clip(action, -1, 1)
arm_loss, box_loss, forward_models_outputs = \
self.sess.run([self.arm_loss_list[0], self.box_loss_list[0], \
self.forward_model_output_list], feed_dict)
return action[0][0], planner_info(arm_loss, box_loss, forward_models_outputs[:steps])
class FastClippedSgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
# with tf.variable_scope('action_optimizer'):
# self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.action_ph = tf.placeholder(tf.float32, [None, 4])
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action_ph)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
self.arm_loss = tf.reduce_sum(tf.square(self.forward_model.output[0][:4] - self.encoder2.output[0][:4]))
self.box_loss = tf.reduce_sum(tf.square(self.forward_model.output[0][4:6] - self.encoder2.output[0][4:6]))
#Adam optimizer has its own variables. Wrap it by a namescope
self.action_grad = tf.gradients(self.objective, self.action_ph)
# with tf.variable_scope('action_optimizer'):
# self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.clipped_action])
# self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
# variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
# self.sess.run(tf.initialize_variables(variables))
action = np.random.rand(4)-0.5
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,151):
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal, self.action_ph : [action]}
gradient = self.sess.run([self.action_grad], feed_dict = feed_dict)[0][0][0]
#raises NotImplementedError: ('Trying to optimize unsupported type ', <tf.Tensor 'clip_by_value:0' shape=(1, 4) dtype=float32>)
#this code does not work....
# import pdb; pdb.set_trace()
action -= gradient/(1.+i*0.2)*0.5
action = np.clip(action, -1, 1)
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(action))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
now = time.time()
return action, self.sess.run([ self.arm_loss, self.box_loss], feed_dict = feed_dict)
class SgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1e-1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
##re-construct the model
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.clipped_action = tf.clip_by_value(self.action, -1, 1)
# import pdb; pdb.set_trace()
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
#Adam optimizer has its own variables. Wrap it by a namescope
with tf.variable_scope('action_optimizer'):
self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.clipped_action])
# self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
self.sess.run(tf.initialize_variables(variables))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,150):
gradient = self.sess.run([self.action_opt], feed_dict = feed_dict)
#raises NotImplementedError: ('Trying to optimize unsupported type ', <tf.Tensor 'clip_by_value:0' shape=(1, 4) dtype=float32>)
#this code does not work....
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(self.sess.run(self.action)))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 50 gradient steps".format(time.time() - now))
now = time.time()
return self.sess.run([self.action, self.objective], feed_dict = feed_dict)
#debug API
def predict_next_state(self, current_state, action, goal_state):
feed_dict = {self.S_init_ph:current_state, self.S_goal_ph: goal_state}
old_action = self.sess.run(self.action)
#assign new action
self.sess.run(self.action.assign([action]))
next_state, S_init, S_goal, loss = self.sess.run([self.forward_model.output,\
self.encoder1.output,\
self.encoder2.output,\
self.objective], feed_dict = feed_dict)
#assign back the old action
self.sess.run(self.action.assign(old_action))
return next_state, S_init, S_goal, loss
class ClippedSgdForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
action_initializer = None,
init_lr = 1e-1,
sess = None,
pos_only = False,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
##re-construct the model
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.clipped_action = tf.clip_by_value(self.action, -1, 1)
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
#Adam optimizer has its own variables. Wrap it by a namescope
with tf.variable_scope('action_optimizer'):
self.action_opt = tf.train.AdamOptimizer(init_lr).minimize(self.objective, var_list = [self.action])
self.action_gradient = tf.train.AdamOptimizer(init_lr).compute_gradients(self.objective, var_list = [self.action])
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
variables = tf.get_collection(tf.GraphKeys.VARIABLES, scope='action_optimizer')
self.sess.run(tf.initialize_variables(variables))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# TODO: Find a good stop criteria
now = time.time()
for i in range(0,150):
#normal speed
self.sess.run([self.action_opt], feed_dict = feed_dict)
#slow and will be slower and slower
# self.sess.run([self.clipped_action, self.action.assign(self.clipped_action), self.action_opt], \
# feed_dict = feed_dict)
if i %50 == 0:
print("#########Optimizing action#########")
action_loss = self.sess.run(self.objective, feed_dict = feed_dict)
print("action_loss(sum_square_error(S_goal, S_next)) is {}".format(action_loss))
print("current_action is {}".format(self.sess.run(self.clipped_action)))
# print("current s_next is {}".format(self.sess.run(self.forward_model.output, feed_dict = feed_dict)))
print("{} sec elapsed for 100 gradient steps".format(time.time() - now))
now = time.time()
return self.sess.run([self.action, self.objective], feed_dict = feed_dict)
#debug API
def predict_next_state(self, current_state, action, goal_state):
feed_dict = {self.S_init_ph:current_state, self.S_goal_ph: goal_state}
old_action = self.sess.run(self.action)
#assign new action
self.sess.run(self.action.assign([action]))
next_state, S_init, S_goal, loss = self.sess.run([self.forward_model.output,\
self.encoder1.output,\
self.encoder2.output,\
self.objective], feed_dict = feed_dict)
#assign back the old action
self.sess.run(self.action.assign(old_action))
return next_state, S_init, S_goal, loss
from sandbox.rocky.tf.core.parameterized import Parameterized
class ParameterizedAction(Parameterized):
def __init__(self, env, sess, action_initializer = None):
Parameterized.__init__(self)
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1] + list(env.action_space.shape), initializer=action_initializer)
self.sess = sess
self.env = env
def get_action(self):
return self.sess.run(self.action)
def initalize_action(self):
self.sess.run(tf.initialize_variables(self.action))
return
class ConstrainedForwardModelPlanner(object):
def __init__(
self,
dynamic_model,
encoder,
env,
sess = None,
pos_only = False,
action_initializer = None,
optimizer = tf.contrib.opt.ScipyOptimizerInterface,
):
if sess == None:
sess =tf.get_default_session()
self.sess = sess
if action_initializer is None:
action_initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)
with tf.variable_scope('action_optimizer'):
self.action = tf.get_variable('planner_action', [1,4], initializer=action_initializer)
## rebuild the dynamic model
self.S_init_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.S_goal_ph = tf.placeholder(tf.float32, list(env.observation_space.shape))
self.encoder1 = encoder.get_weight_tied_copy(observation_input=[self.S_init_ph])
self.encoder2 = encoder.get_weight_tied_copy(observation_input=[self.S_goal_ph])
self.forward_model = dynamic_model.get_weight_tied_copy(feature_input=self.encoder1.output,
action_input=self.action)
## objective
if pos_only:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output[0][:6] - self.encoder2.output[0][:6]))
else:
self.objective = tf.reduce_sum(tf.square(self.forward_model.output - self.encoder2.output))
self.loss = self.objective
self.inequalities = []
for i in range(4):
self.inequalities.append(1-tf.square(self.action[0][i]))
# Our default SciPy optimization algorithm, L-BFGS-B, does not support
# general constraints. Thus we use SLSQP instead.
def get_action(self, S_init, S_goal):
#first re-initialize everyvariables in "action_optimizer"
self.sess.run(tf.initialize_variables([self.action]))
feed_dict = {self.S_init_ph:S_init, self.S_goal_ph:S_goal}
# need to re-initialize optimizer every time want to use it or it will optimize action without enforcing constrains.
optimizer = tf.contrib.opt.ScipyOptimizerInterface(
self.loss, var_list = [self.action], inequalities=self.inequalities, method='SLSQP')
now = time.time()
optimizer.minimize(self.sess, feed_dict = feed_dict)
print("it takes {} to optimize the action".format(time.time() - now))
return self.sess.run([self.action, self.loss], feed_dict = feed_dict)
|
[
"numpy.clip",
"tensorflow.shape",
"numpy.random.rand",
"tensorflow.get_variable",
"tensorflow.get_default_session",
"tensorflow.gradients",
"numpy.argsort",
"tensorflow.contrib.opt.ScipyOptimizerInterface",
"numpy.moveaxis",
"numpy.linalg.norm",
"numpy.cov",
"railrl.misc.pyhelper_fns.vis_utils.MyAnimationMulti",
"numpy.mean",
"tensorflow.placeholder",
"tensorflow.clip_by_value",
"tensorflow.square",
"tensorflow.convert_to_tensor",
"tensorflow.train.AdamOptimizer",
"numpy.ones",
"tensorflow.variable_scope",
"numpy.random.multivariate_normal",
"numpy.square",
"tensorflow.range",
"tensorflow.gather",
"numpy.isnan",
"tensorflow.reshape",
"sandbox.rocky.tf.core.parameterized.Parameterized.__init__",
"time.time",
"tensorflow.op_scope",
"tensorflow.initialize_variables",
"numpy.zeros",
"pdb.set_trace",
"tensorflow.random_uniform_initializer",
"tensorflow.get_collection"
] |
[((886, 937), 'tensorflow.op_scope', 'tf.op_scope', (['[params, indices]', 'name', '"""gather_cols"""'], {}), "([params, indices], name, 'gather_cols')\n", (897, 937), True, 'import tensorflow as tf\n'), ((987, 1030), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['params'], {'name': '"""params"""'}), "(params, name='params')\n", (1007, 1030), True, 'import tensorflow as tf\n'), ((1049, 1094), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['indices'], {'name': '"""indices"""'}), "(indices, name='indices')\n", (1069, 1094), True, 'import tensorflow as tf\n'), ((1425, 1441), 'tensorflow.shape', 'tf.shape', (['params'], {}), '(params)\n', (1433, 1441), True, 'import tensorflow as tf\n'), ((1459, 1483), 'tensorflow.reshape', 'tf.reshape', (['params', '[-1]'], {}), '(params, [-1])\n', (1469, 1483), True, 'import tensorflow as tf\n'), ((3592, 3641), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[max_length, None, 4]'], {}), '(tf.float32, [max_length, None, 4])\n', (3606, 3641), True, 'import tensorflow as tf\n'), ((3741, 3779), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 24]'], {}), '(tf.float32, [None, 24])\n', (3755, 3779), True, 'import tensorflow as tf\n'), ((3799, 3837), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 24]'], {}), '(tf.float32, [None, 24])\n', (3813, 3837), True, 'import tensorflow as tf\n'), ((6940, 6968), 'numpy.cov', 'np.cov', (['trans_best_actions.T'], {}), '(trans_best_actions.T)\n', (6946, 6968), True, 'import numpy as np\n'), ((6983, 7020), 'numpy.mean', 'np.mean', (['trans_best_actions.T'], {'axis': '(1)'}), '(trans_best_actions.T, axis=1)\n', (6990, 7020), True, 'import numpy as np\n'), ((8988, 9037), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[max_length, None, 4]'], {}), '(tf.float32, [max_length, None, 4])\n', (9002, 9037), True, 'import tensorflow as tf\n'), ((12640, 12668), 'numpy.cov', 'np.cov', (['trans_best_actions.T'], {}), '(trans_best_actions.T)\n', (12646, 12668), True, 'import numpy as np\n'), ((12683, 12720), 'numpy.mean', 'np.mean', (['trans_best_actions.T'], {'axis': '(1)'}), '(trans_best_actions.T, axis=1)\n', (12690, 12720), True, 'import numpy as np\n'), ((14843, 14889), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[max_length, 1, 4]'], {}), '(tf.float32, [max_length, 1, 4])\n', (14857, 14889), True, 'import tensorflow as tf\n'), ((16912, 17012), 'railrl.misc.pyhelper_fns.vis_utils.MyAnimationMulti', 'MyAnimationMulti', (['None'], {'numPlots': '(2)', 'isIm': '[0, 0]', 'axTitles': "['(S1-S_goal)^2', 'sum(S_i-S_goal)^2']"}), "(None, numPlots=2, isIm=[0, 0], axTitles=['(S1-S_goal)^2',\n 'sum(S_i-S_goal)^2'])\n", (16928, 17012), False, 'from railrl.misc.pyhelper_fns.vis_utils import MyAnimationMulti\n'), ((17187, 17220), 'numpy.zeros', 'np.zeros', (['[self.max_length, 1, 4]'], {}), '([self.max_length, 1, 4])\n', (17195, 17220), True, 'import numpy as np\n'), ((17314, 17325), 'time.time', 'time.time', ([], {}), '()\n', (17323, 17325), False, 'import time\n'), ((19509, 19555), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[max_length, 1, 4]'], {}), '(tf.float32, [max_length, 1, 4])\n', (19523, 19555), True, 'import tensorflow as tf\n'), ((21420, 21517), 'railrl.misc.pyhelper_fns.vis_utils.MyAnimationMulti', 'MyAnimationMulti', (['None'], {'numPlots': '(2)', 'isIm': '[0, 0]', 'axTitles': "['(S1-S_goal)^2', '(S_n-S_goal)^2']"}), "(None, numPlots=2, isIm=[0, 0], axTitles=['(S1-S_goal)^2',\n '(S_n-S_goal)^2'])\n", (21436, 21517), False, 'from railrl.misc.pyhelper_fns.vis_utils import MyAnimationMulti\n'), ((21691, 21724), 'numpy.zeros', 'np.zeros', (['[self.max_length, 1, 4]'], {}), '([self.max_length, 1, 4])\n', (21699, 21724), True, 'import numpy as np\n'), ((21818, 21829), 'time.time', 'time.time', ([], {}), '()\n', (21827, 21829), False, 'import time\n'), ((23449, 23486), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 4]'], {}), '(tf.float32, [None, 4])\n', (23463, 23486), True, 'import tensorflow as tf\n'), ((24520, 24564), 'tensorflow.gradients', 'tf.gradients', (['self.objective', 'self.action_ph'], {}), '(self.objective, self.action_ph)\n', (24532, 24564), True, 'import tensorflow as tf\n'), ((25165, 25176), 'time.time', 'time.time', ([], {}), '()\n', (25174, 25176), False, 'import time\n'), ((26815, 26851), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.action', '(-1)', '(1)'], {}), '(self.action, -1, 1)\n', (26831, 26851), True, 'import tensorflow as tf\n'), ((28070, 28137), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.VARIABLES'], {'scope': '"""action_optimizer"""'}), "(tf.GraphKeys.VARIABLES, scope='action_optimizer')\n", (28087, 28137), True, 'import tensorflow as tf\n'), ((28301, 28312), 'time.time', 'time.time', ([], {}), '()\n', (28310, 28312), False, 'import time\n'), ((30362, 30398), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.action', '(-1)', '(1)'], {}), '(self.action, -1, 1)\n', (30378, 30398), True, 'import tensorflow as tf\n'), ((31582, 31649), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.VARIABLES'], {'scope': '"""action_optimizer"""'}), "(tf.GraphKeys.VARIABLES, scope='action_optimizer')\n", (31599, 31649), True, 'import tensorflow as tf\n'), ((31810, 31821), 'time.time', 'time.time', ([], {}), '()\n', (31819, 31821), False, 'import time\n'), ((33453, 33481), 'sandbox.rocky.tf.core.parameterized.Parameterized.__init__', 'Parameterized.__init__', (['self'], {}), '(self)\n', (33475, 33481), False, 'from sandbox.rocky.tf.core.parameterized import Parameterized\n'), ((35902, 36027), 'tensorflow.contrib.opt.ScipyOptimizerInterface', 'tf.contrib.opt.ScipyOptimizerInterface', (['self.loss'], {'var_list': '[self.action]', 'inequalities': 'self.inequalities', 'method': '"""SLSQP"""'}), "(self.loss, var_list=[self.action],\n inequalities=self.inequalities, method='SLSQP')\n", (35940, 36027), True, 'import tensorflow as tf\n'), ((36038, 36049), 'time.time', 'time.time', ([], {}), '()\n', (36047, 36049), False, 'import time\n'), ((1652, 1677), 'tensorflow.gather', 'tf.gather', (['p_flat', 'i_flat'], {}), '(p_flat, i_flat)\n', (1661, 1677), True, 'import tensorflow as tf\n'), ((2319, 2343), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2341, 2343), True, 'import tensorflow as tf\n'), ((3495, 3519), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (3517, 3519), True, 'import tensorflow as tf\n'), ((6071, 6100), 'numpy.ones', 'np.ones', (['[init_batch_size, 1]'], {}), '([init_batch_size, 1])\n', (6078, 6100), True, 'import numpy as np\n'), ((6148, 6177), 'numpy.ones', 'np.ones', (['[init_batch_size, 1]'], {}), '([init_batch_size, 1])\n', (6155, 6177), True, 'import numpy as np\n'), ((6468, 6494), 'numpy.argsort', 'np.argsort', (['objective_list'], {}), '(objective_list)\n', (6478, 6494), True, 'import numpy as np\n'), ((7047, 7083), 'numpy.ones', 'np.ones', (['[self.sample_batch_size, 1]'], {}), '([self.sample_batch_size, 1])\n', (7054, 7083), True, 'import numpy as np\n'), ((7131, 7167), 'numpy.ones', 'np.ones', (['[self.sample_batch_size, 1]'], {}), '([self.sample_batch_size, 1])\n', (7138, 7167), True, 'import numpy as np\n'), ((7376, 7402), 'numpy.moveaxis', 'np.moveaxis', (['actions', '(0)', '(1)'], {}), '(actions, 0, 1)\n', (7387, 7402), True, 'import numpy as np\n'), ((7779, 7807), 'numpy.cov', 'np.cov', (['trans_best_actions.T'], {}), '(trans_best_actions.T)\n', (7785, 7807), True, 'import numpy as np\n'), ((7823, 7860), 'numpy.mean', 'np.mean', (['trans_best_actions.T'], {'axis': '(1)'}), '(trans_best_actions.T, axis=1)\n', (7830, 7860), True, 'import numpy as np\n'), ((8891, 8915), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (8913, 8915), True, 'import tensorflow as tf\n'), ((11772, 11801), 'numpy.ones', 'np.ones', (['[init_batch_size, 1]'], {}), '([init_batch_size, 1])\n', (11779, 11801), True, 'import numpy as np\n'), ((11849, 11878), 'numpy.ones', 'np.ones', (['[init_batch_size, 1]'], {}), '([init_batch_size, 1])\n', (11856, 11878), True, 'import numpy as np\n'), ((12169, 12195), 'numpy.argsort', 'np.argsort', (['objective_list'], {}), '(objective_list)\n', (12179, 12195), True, 'import numpy as np\n'), ((12747, 12783), 'numpy.ones', 'np.ones', (['[self.sample_batch_size, 1]'], {}), '([self.sample_batch_size, 1])\n', (12754, 12783), True, 'import numpy as np\n'), ((12831, 12867), 'numpy.ones', 'np.ones', (['[self.sample_batch_size, 1]'], {}), '([self.sample_batch_size, 1])\n', (12838, 12867), True, 'import numpy as np\n'), ((13076, 13102), 'numpy.moveaxis', 'np.moveaxis', (['actions', '(0)', '(1)'], {}), '(actions, 0, 1)\n', (13087, 13102), True, 'import numpy as np\n'), ((13479, 13507), 'numpy.cov', 'np.cov', (['trans_best_actions.T'], {}), '(trans_best_actions.T)\n', (13485, 13507), True, 'import numpy as np\n'), ((13523, 13560), 'numpy.mean', 'np.mean', (['trans_best_actions.T'], {'axis': '(1)'}), '(trans_best_actions.T, axis=1)\n', (13530, 13560), True, 'import numpy as np\n'), ((14081, 14145), 'numpy.square', 'np.square', (['(forward_models_outputs[steps - 1][0][:4] - S_goal[:4])'], {}), '(forward_models_outputs[steps - 1][0][:4] - S_goal[:4])\n', (14090, 14145), True, 'import numpy as np\n'), ((14164, 14230), 'numpy.square', 'np.square', (['(forward_models_outputs[steps - 1][0][4:7] - S_goal[4:7])'], {}), '(forward_models_outputs[steps - 1][0][4:7] - S_goal[4:7])\n', (14173, 14230), True, 'import numpy as np\n'), ((14721, 14745), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (14743, 14745), True, 'import tensorflow as tf\n'), ((19387, 19411), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (19409, 19411), True, 'import tensorflow as tf\n'), ((23208, 23232), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (23230, 23232), True, 'import tensorflow as tf\n'), ((24251, 24324), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(self.forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (24260, 24324), True, 'import tensorflow as tf\n'), ((24358, 24433), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(self.forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (24367, 24433), True, 'import tensorflow as tf\n'), ((25096, 25113), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (25110, 25113), True, 'import numpy as np\n'), ((25619, 25641), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (25626, 25641), True, 'import numpy as np\n'), ((26437, 26461), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (26459, 26461), True, 'import tensorflow as tf\n'), ((26568, 26622), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), '(minval=-0.1, maxval=0.1)\n', (26597, 26622), True, 'import tensorflow as tf\n'), ((26633, 26670), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (26650, 26670), True, 'import tensorflow as tf\n'), ((27685, 27722), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (27702, 27722), True, 'import tensorflow as tf\n'), ((28154, 28188), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['variables'], {}), '(variables)\n', (28177, 28188), True, 'import tensorflow as tf\n'), ((29984, 30008), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (30006, 30008), True, 'import tensorflow as tf\n'), ((30115, 30169), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), '(minval=-0.1, maxval=0.1)\n', (30144, 30169), True, 'import tensorflow as tf\n'), ((30180, 30217), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (30197, 30217), True, 'import tensorflow as tf\n'), ((31200, 31237), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (31217, 31237), True, 'import tensorflow as tf\n'), ((31666, 31700), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['variables'], {}), '(variables)\n', (31689, 31700), True, 'import tensorflow as tf\n'), ((33542, 33596), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), '(minval=-0.1, maxval=0.1)\n', (33571, 33596), True, 'import tensorflow as tf\n'), ((33608, 33645), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (33625, 33645), True, 'import tensorflow as tf\n'), ((33906, 33942), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['self.action'], {}), '(self.action)\n', (33929, 33942), True, 'import tensorflow as tf\n'), ((34231, 34255), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (34253, 34255), True, 'import tensorflow as tf\n'), ((34332, 34386), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.1)', 'maxval': '(0.1)'}), '(minval=-0.1, maxval=0.1)\n', (34361, 34386), True, 'import tensorflow as tf\n'), ((34397, 34434), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_optimizer"""'], {}), "('action_optimizer')\n", (34414, 34434), True, 'import tensorflow as tf\n'), ((34453, 34526), 'tensorflow.get_variable', 'tf.get_variable', (['"""planner_action"""', '[1, 4]'], {'initializer': 'action_initializer'}), "('planner_action', [1, 4], initializer=action_initializer)\n", (34468, 34526), True, 'import tensorflow as tf\n'), ((35665, 35703), 'tensorflow.initialize_variables', 'tf.initialize_variables', (['[self.action]'], {}), '([self.action])\n', (35688, 35703), True, 'import tensorflow as tf\n'), ((6220, 6271), 'numpy.random.rand', 'np.random.rand', (['self.max_length', 'init_batch_size', '(4)'], {}), '(self.max_length, init_batch_size, 4)\n', (6234, 6271), True, 'import numpy as np\n'), ((6869, 6900), 'numpy.moveaxis', 'np.moveaxis', (['best_actions', '(0)', '(1)'], {}), '(best_actions, 0, 1)\n', (6880, 6900), True, 'import numpy as np\n'), ((7598, 7624), 'numpy.argsort', 'np.argsort', (['objective_list'], {}), '(objective_list)\n', (7608, 7624), True, 'import numpy as np\n'), ((11921, 11972), 'numpy.random.rand', 'np.random.rand', (['self.max_length', 'init_batch_size', '(4)'], {}), '(self.max_length, init_batch_size, 4)\n', (11935, 11972), True, 'import numpy as np\n'), ((12569, 12600), 'numpy.moveaxis', 'np.moveaxis', (['best_actions', '(0)', '(1)'], {}), '(best_actions, 0, 1)\n', (12580, 12600), True, 'import numpy as np\n'), ((13298, 13324), 'numpy.argsort', 'np.argsort', (['objective_list'], {}), '(objective_list)\n', (13308, 13324), True, 'import numpy as np\n'), ((16859, 16892), 'tensorflow.gradients', 'tf.gradients', (['obj', 'self.action_ph'], {}), '(obj, self.action_ph)\n', (16871, 16892), True, 'import tensorflow as tf\n'), ((17997, 18012), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (18010, 18012), False, 'import pdb\n'), ((18182, 18204), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (18189, 18204), True, 'import numpy as np\n'), ((21367, 21400), 'tensorflow.gradients', 'tf.gradients', (['obj', 'self.action_ph'], {}), '(obj, self.action_ph)\n', (21379, 21400), True, 'import tensorflow as tf\n'), ((22499, 22514), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (22512, 22514), False, 'import pdb\n'), ((22692, 22714), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (22699, 22714), True, 'import numpy as np\n'), ((24037, 24110), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (24046, 24110), True, 'import tensorflow as tf\n'), ((24154, 24213), 'tensorflow.square', 'tf.square', (['(self.forward_model.output - self.encoder2.output)'], {}), '(self.forward_model.output - self.encoder2.output)\n', (24163, 24213), True, 'import tensorflow as tf\n'), ((26108, 26119), 'time.time', 'time.time', ([], {}), '()\n', (26117, 26119), False, 'import time\n'), ((27431, 27504), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (27440, 27504), True, 'import tensorflow as tf\n'), ((27548, 27607), 'tensorflow.square', 'tf.square', (['(self.forward_model.output - self.encoder2.output)'], {}), '(self.forward_model.output - self.encoder2.output)\n', (27557, 27607), True, 'import tensorflow as tf\n'), ((29064, 29075), 'time.time', 'time.time', ([], {}), '()\n', (29073, 29075), False, 'import time\n'), ((30946, 31019), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (30955, 31019), True, 'import tensorflow as tf\n'), ((31063, 31122), 'tensorflow.square', 'tf.square', (['(self.forward_model.output - self.encoder2.output)'], {}), '(self.forward_model.output - self.encoder2.output)\n', (31072, 31122), True, 'import tensorflow as tf\n'), ((32604, 32615), 'time.time', 'time.time', ([], {}), '()\n', (32613, 32615), False, 'import time\n'), ((35104, 35177), 'tensorflow.square', 'tf.square', (['(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(self.forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (35113, 35177), True, 'import tensorflow as tf\n'), ((35221, 35280), 'tensorflow.square', 'tf.square', (['(self.forward_model.output - self.encoder2.output)'], {}), '(self.forward_model.output - self.encoder2.output)\n', (35230, 35280), True, 'import tensorflow as tf\n'), ((5399, 5467), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (5408, 5467), True, 'import tensorflow as tf\n'), ((7234, 7310), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_list', 'cov_matrix', 'self.sample_batch_size'], {}), '(mean_list, cov_matrix, self.sample_batch_size)\n', (7263, 7310), True, 'import numpy as np\n'), ((7707, 7738), 'numpy.moveaxis', 'np.moveaxis', (['best_actions', '(0)', '(1)'], {}), '(best_actions, 0, 1)\n', (7718, 7738), True, 'import numpy as np\n'), ((12934, 13010), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_list', 'cov_matrix', 'self.sample_batch_size'], {}), '(mean_list, cov_matrix, self.sample_batch_size)\n', (12963, 13010), True, 'import numpy as np\n'), ((13407, 13438), 'numpy.moveaxis', 'np.moveaxis', (['best_actions', '(0)', '(1)'], {}), '(best_actions, 0, 1)\n', (13418, 13438), True, 'import numpy as np\n'), ((16599, 16659), 'tensorflow.square', 'tf.square', (['(forward_model.output[0] - self.encoder2.output[0])'], {}), '(forward_model.output[0] - self.encoder2.output[0])\n', (16608, 16659), True, 'import tensorflow as tf\n'), ((17857, 17875), 'numpy.isnan', 'np.isnan', (['gradient'], {}), '(gradient)\n', (17865, 17875), True, 'import numpy as np\n'), ((17896, 17933), 'numpy.random.rand', 'np.random.rand', (['self.max_length', '(1)', '(4)'], {}), '(self.max_length, 1, 4)\n', (17910, 17933), True, 'import numpy as np\n'), ((18029, 18053), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (18043, 18053), True, 'import numpy as np\n'), ((22359, 22377), 'numpy.isnan', 'np.isnan', (['gradient'], {}), '(gradient)\n', (22367, 22377), True, 'import numpy as np\n'), ((22398, 22435), 'numpy.random.rand', 'np.random.rand', (['self.max_length', '(1)', '(4)'], {}), '(self.max_length, 1, 4)\n', (22412, 22435), True, 'import numpy as np\n'), ((22531, 22555), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (22545, 22555), True, 'import numpy as np\n'), ((27745, 27776), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['init_lr'], {}), '(init_lr)\n', (27767, 27776), True, 'import tensorflow as tf\n'), ((31260, 31291), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['init_lr'], {}), '(init_lr)\n', (31282, 31291), True, 'import tensorflow as tf\n'), ((31369, 31400), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['init_lr'], {}), '(init_lr)\n', (31391, 31400), True, 'import tensorflow as tf\n'), ((35392, 35420), 'tensorflow.square', 'tf.square', (['self.action[0][i]'], {}), '(self.action[0][i])\n', (35401, 35420), True, 'import tensorflow as tf\n'), ((36162, 36173), 'time.time', 'time.time', ([], {}), '()\n', (36171, 36173), False, 'import time\n'), ((1523, 1546), 'tensorflow.range', 'tf.range', (['(0)', 'p_shape[0]'], {}), '(0, p_shape[0])\n', (1531, 1546), True, 'import tensorflow as tf\n'), ((5743, 5768), 'tensorflow.square', 'tf.square', (['self.action_ph'], {}), '(self.action_ph)\n', (5752, 5768), True, 'import tensorflow as tf\n'), ((10749, 10817), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (10758, 10817), True, 'import tensorflow as tf\n'), ((10864, 10934), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (10873, 10934), True, 'import tensorflow as tf\n'), ((11039, 11099), 'tensorflow.square', 'tf.square', (['(forward_model.output[0] - self.encoder2.output[0])'], {}), '(forward_model.output[0] - self.encoder2.output[0])\n', (11048, 11099), True, 'import tensorflow as tf\n'), ((11146, 11214), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (11155, 11214), True, 'import tensorflow as tf\n'), ((11261, 11331), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (11270, 11331), True, 'import tensorflow as tf\n'), ((11444, 11469), 'tensorflow.square', 'tf.square', (['self.action_ph'], {}), '(self.action_ph)\n', (11453, 11469), True, 'import tensorflow as tf\n'), ((16053, 16121), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (16062, 16121), True, 'import tensorflow as tf\n'), ((16167, 16235), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (16176, 16235), True, 'import tensorflow as tf\n'), ((16323, 16391), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (16332, 16391), True, 'import tensorflow as tf\n'), ((16438, 16508), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (16447, 16508), True, 'import tensorflow as tf\n'), ((20743, 20811), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:6] - self.encoder2.output[0][:6])'], {}), '(forward_model.output[0][:6] - self.encoder2.output[0][:6])\n', (20752, 20811), True, 'import tensorflow as tf\n'), ((20858, 20926), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][:4] - self.encoder2.output[0][:4])'], {}), '(forward_model.output[0][:4] - self.encoder2.output[0][:4])\n', (20867, 20926), True, 'import tensorflow as tf\n'), ((20973, 21043), 'tensorflow.square', 'tf.square', (['(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])'], {}), '(forward_model.output[0][4:6] - self.encoder2.output[0][4:6])\n', (20982, 21043), True, 'import tensorflow as tf\n'), ((21148, 21208), 'tensorflow.square', 'tf.square', (['(forward_model.output[0] - self.encoder2.output[0])'], {}), '(forward_model.output[0] - self.encoder2.output[0])\n', (21157, 21208), True, 'import tensorflow as tf\n'), ((26078, 26089), 'time.time', 'time.time', ([], {}), '()\n', (26087, 26089), False, 'import time\n'), ((29034, 29045), 'time.time', 'time.time', ([], {}), '()\n', (29043, 29045), False, 'import time\n'), ((32574, 32585), 'time.time', 'time.time', ([], {}), '()\n', (32583, 32585), False, 'import time\n'), ((18090, 18114), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (18104, 18114), True, 'import numpy as np\n'), ((22592, 22616), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (22606, 22616), True, 'import numpy as np\n')]
|
import os
import json
import math
from neuralparticles.tensorflow.tools.hyper_parameter import HyperParameter, ValueType, SearchType
from neuralparticles.tensorflow.tools.hyper_search import HyperSearch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import keras
from neuralparticles.tensorflow.models.PUNet import PUNet
from neuralparticles.tools.param_helpers import *
from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles
from neuralparticles.tensorflow.tools.eval_helpers import EvalCallback, EvalCompleteCallback
import numpy as np
data_path = getParam("data", "data/")
config_path = getParam("config", "config/version_00.txt")
verbose = int(getParam("verbose", 0)) != 0
gpu = getParam("gpu", "")
epochs = int(getParam("epochs", 0))
eval_cnt = int(getParam("eval_cnt", 5))
eval_dataset = getParam("eval_d", []) #'18,18,18,19,19'
eval_t = getParam("eval_t", []) #'5,5,6,6,7'
eval_var = getParam("eval_v", []) #'0,0,0,0,0'
eval_patch_idx = getParam("eval_i", []) #'11,77,16,21,45'
if len(eval_dataset) > 0:
eval_dataset = list(map(int, eval_dataset.split(',')))
if len(eval_t) > 0:
eval_t = list(map(int, eval_t.split(',')))
if len(eval_var) > 0:
eval_var = list(map(int, eval_var.split(',')))
if len(eval_patch_idx) > 0:
eval_patch_idx = list(map(float, eval_patch_idx.split(',')))
i=0
hyper_teams = []
while(True):
hyper_par = getParam("hyper%d"%i, None)
i += 1
if hyper_par is None:
break
else:
hyper_teams.append(HyperParameter.parse(hyper_par))
checkUnusedParams()
src_path = data_path + "patches/source/"
ref_path = data_path + "patches/reference/"
model_path = data_path + "models/"
if not os.path.exists(model_path):
os.mkdir(model_path)
tmp_folder = backupSources(data_path)
tmp_model_path = tmp_folder + "models/"
os.mkdir(tmp_model_path)
tmp_eval_path = tmp_folder + "eval/"
os.mkdir(tmp_eval_path)
if not gpu is "":
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
with open(config_path, 'r') as f:
config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['data'], 'r') as f:
data_config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['preprocess'], 'r') as f:
pre_config = json.loads(f.read())
with open(os.path.dirname(config_path) + '/' + config['train'], 'r') as f:
train_config = json.loads(f.read())
if verbose:
print("Config Loaded:")
print(config)
print(data_config)
print(pre_config)
print(train_config)
# copy config files into tmp
np.random.seed(data_config['seed'])
#tf.set_random_seed(data_config['seed'])
if epochs == 0:
epochs = train_config['epochs']
config_dict = {**data_config, **pre_config, **train_config}
punet = PUNet(**config_dict)
if len(eval_dataset) < eval_cnt:
eval_dataset.extend(np.random.randint(int(data_config['data_count'] * train_config['train_split']), data_config['data_count'], eval_cnt-len(eval_dataset)))
if len(eval_t) < eval_cnt:
t_start = min(train_config['t_start'], data_config['frame_count']-1)
t_end = min(train_config['t_end'], data_config['frame_count'])
eval_t.extend(np.random.randint(t_start, t_end, eval_cnt-len(eval_t)))
if len(eval_var) < eval_cnt:
eval_var.extend([0]*(eval_cnt-len(eval_var)))
if len(eval_patch_idx) < eval_cnt:
eval_patch_idx.extend(np.random.random(eval_cnt-len(eval_patch_idx)))
tmp_model_path = '%s%s_%s' % (tmp_model_path, data_config['prefix'], config['id'])
fig_path = '%s_loss' % tmp_model_path
src_path = "%s%s_%s-%s" % (src_path, data_config['prefix'], data_config['id'], pre_config['id']) + "_d%03d_var%02d_pvar%02d_%03d"
ref_path = "%s%s_%s-%s" % (ref_path, data_config['prefix'], data_config['id'], pre_config['id']) + "_d%03d_var%02d_pvar%02d_%03d"
print(src_path)
print(ref_path)
print("Load Training Data")
src_data, ref_data = load_patches_from_file(data_path, config_path)
idx = np.arange(src_data[0].shape[0])
np.random.shuffle(idx)
src_data = [s[idx] for s in src_data]
ref_data = ref_data[idx]
print("Load Eval Data")
factor_d = math.pow(pre_config['factor'], 1/data_config['dim'])
patch_size = pre_config['patch_size'] * data_config['res'] / factor_d
patch_size_ref = pre_config['patch_size_ref'] * data_config['res']
eval_patch_extractors = []
eval_ref_datas = []
eval_src_patches = []
eval_ref_patches = []
for i in range(len(eval_dataset)):
(eval_src_data, eval_sdf_data, eval_par_aux), (eval_ref_data, eval_ref_sdf_data) = get_data_pair(data_path, config_path, eval_dataset[i], eval_t[i], eval_var[i])
eval_ref_datas.append(eval_ref_data)
np.random.seed(100)
eval_patch_extractors.append(PatchExtractor(eval_src_data, eval_sdf_data, patch_size, pre_config['par_cnt'], pre_config['surf'], pre_config['stride'], aux_data=eval_par_aux, features=train_config['features'], pad_val=pre_config['pad_val'], bnd=data_config['bnd']/factor_d))
p_idx = int(eval_patch_idx[i] * len(eval_patch_extractors[i].positions))
eval_src_patches.append(eval_patch_extractors[i].get_patch(p_idx,False))
eval_ref_patches.append(extract_particles(eval_ref_data, eval_patch_extractors[i].positions[p_idx] * factor_d, pre_config['par_cnt_ref'], patch_size_ref/2, pre_config['pad_val'])[0])
print("Eval with dataset %d, timestep %d, var %d, patch idx %d" % (eval_dataset[i], eval_t[i], eval_var[i], p_idx))
print("Eval trunc src: %d" % (np.count_nonzero(eval_src_patches[i][0][:,:,:1] != pre_config['pad_val'])))
print("Eval trunc ref: %d" % (np.count_nonzero(eval_ref_patches[i][:,:1] != pre_config['pad_val'])))
config_dict['src'] = src_data
config_dict['ref'] = ref_data
config_dict['callbacks'] = [(EvalCallback(tmp_eval_path + "eval_patch", eval_src_patches, eval_ref_patches,
train_config['features'], multiple_runs=True, z=None if data_config['dim'] == 2 else 0, verbose=1)),
(EvalCompleteCallback(tmp_eval_path + "eval", eval_patch_extractors, eval_ref_datas,
factor_d, data_config['res'], multiple_runs=True, z=None if data_config['dim'] == 2 else data_config['res']//2, verbose=1))]
hs = HyperSearch(punet, hyper_teams, output_folder=tmp_folder)
del config_dict['epochs']
history = hs.search(epochs, **config_dict)
keras.utils.plot_model(punet.model, tmp_model_path + '.pdf')
|
[
"neuralparticles.tools.data_helpers.extract_particles",
"numpy.count_nonzero",
"neuralparticles.tensorflow.tools.eval_helpers.EvalCallback",
"numpy.arange",
"os.path.exists",
"keras.utils.plot_model",
"neuralparticles.tensorflow.models.PUNet.PUNet",
"neuralparticles.tools.data_helpers.load_patches_from_file",
"numpy.random.seed",
"os.mkdir",
"neuralparticles.tensorflow.tools.eval_helpers.EvalCompleteCallback",
"neuralparticles.tools.data_helpers.PatchExtractor",
"neuralparticles.tensorflow.tools.hyper_parameter.HyperParameter.parse",
"matplotlib.use",
"neuralparticles.tensorflow.tools.hyper_search.HyperSearch",
"os.path.dirname",
"math.pow",
"neuralparticles.tools.data_helpers.get_data_pair",
"numpy.random.shuffle"
] |
[((224, 245), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (238, 245), False, 'import matplotlib\n'), ((1881, 1905), 'os.mkdir', 'os.mkdir', (['tmp_model_path'], {}), '(tmp_model_path)\n', (1889, 1905), False, 'import os\n'), ((1943, 1966), 'os.mkdir', 'os.mkdir', (['tmp_eval_path'], {}), '(tmp_eval_path)\n', (1951, 1966), False, 'import os\n'), ((2608, 2643), 'numpy.random.seed', 'np.random.seed', (["data_config['seed']"], {}), "(data_config['seed'])\n", (2622, 2643), True, 'import numpy as np\n'), ((2807, 2827), 'neuralparticles.tensorflow.models.PUNet.PUNet', 'PUNet', ([], {}), '(**config_dict)\n', (2812, 2827), False, 'from neuralparticles.tensorflow.models.PUNet import PUNet\n'), ((3926, 3972), 'neuralparticles.tools.data_helpers.load_patches_from_file', 'load_patches_from_file', (['data_path', 'config_path'], {}), '(data_path, config_path)\n', (3948, 3972), False, 'from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles\n'), ((3980, 4011), 'numpy.arange', 'np.arange', (['src_data[0].shape[0]'], {}), '(src_data[0].shape[0])\n', (3989, 4011), True, 'import numpy as np\n'), ((4012, 4034), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (4029, 4034), True, 'import numpy as np\n'), ((4135, 4189), 'math.pow', 'math.pow', (["pre_config['factor']", "(1 / data_config['dim'])"], {}), "(pre_config['factor'], 1 / data_config['dim'])\n", (4143, 4189), False, 'import math\n'), ((6245, 6302), 'neuralparticles.tensorflow.tools.hyper_search.HyperSearch', 'HyperSearch', (['punet', 'hyper_teams'], {'output_folder': 'tmp_folder'}), '(punet, hyper_teams, output_folder=tmp_folder)\n', (6256, 6302), False, 'from neuralparticles.tensorflow.tools.hyper_search import HyperSearch\n'), ((6373, 6433), 'keras.utils.plot_model', 'keras.utils.plot_model', (['punet.model', "(tmp_model_path + '.pdf')"], {}), "(punet.model, tmp_model_path + '.pdf')\n", (6395, 6433), False, 'import keras\n'), ((1752, 1778), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (1766, 1778), False, 'import os\n'), ((1781, 1801), 'os.mkdir', 'os.mkdir', (['model_path'], {}), '(model_path)\n', (1789, 1801), False, 'import os\n'), ((4540, 4618), 'neuralparticles.tools.data_helpers.get_data_pair', 'get_data_pair', (['data_path', 'config_path', 'eval_dataset[i]', 'eval_t[i]', 'eval_var[i]'], {}), '(data_path, config_path, eval_dataset[i], eval_t[i], eval_var[i])\n', (4553, 4618), False, 'from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles\n'), ((4665, 4684), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (4679, 4684), True, 'import numpy as np\n'), ((5730, 5915), 'neuralparticles.tensorflow.tools.eval_helpers.EvalCallback', 'EvalCallback', (["(tmp_eval_path + 'eval_patch')", 'eval_src_patches', 'eval_ref_patches', "train_config['features']"], {'multiple_runs': '(True)', 'z': "(None if data_config['dim'] == 2 else 0)", 'verbose': '(1)'}), "(tmp_eval_path + 'eval_patch', eval_src_patches,\n eval_ref_patches, train_config['features'], multiple_runs=True, z=None if\n data_config['dim'] == 2 else 0, verbose=1)\n", (5742, 5915), False, 'from neuralparticles.tensorflow.tools.eval_helpers import EvalCallback, EvalCompleteCallback\n'), ((5981, 6198), 'neuralparticles.tensorflow.tools.eval_helpers.EvalCompleteCallback', 'EvalCompleteCallback', (["(tmp_eval_path + 'eval')", 'eval_patch_extractors', 'eval_ref_datas', 'factor_d', "data_config['res']"], {'multiple_runs': '(True)', 'z': "(None if data_config['dim'] == 2 else data_config['res'] // 2)", 'verbose': '(1)'}), "(tmp_eval_path + 'eval', eval_patch_extractors,\n eval_ref_datas, factor_d, data_config['res'], multiple_runs=True, z=\n None if data_config['dim'] == 2 else data_config['res'] // 2, verbose=1)\n", (6001, 6198), False, 'from neuralparticles.tensorflow.tools.eval_helpers import EvalCallback, EvalCompleteCallback\n'), ((4718, 4978), 'neuralparticles.tools.data_helpers.PatchExtractor', 'PatchExtractor', (['eval_src_data', 'eval_sdf_data', 'patch_size', "pre_config['par_cnt']", "pre_config['surf']", "pre_config['stride']"], {'aux_data': 'eval_par_aux', 'features': "train_config['features']", 'pad_val': "pre_config['pad_val']", 'bnd': "(data_config['bnd'] / factor_d)"}), "(eval_src_data, eval_sdf_data, patch_size, pre_config[\n 'par_cnt'], pre_config['surf'], pre_config['stride'], aux_data=\n eval_par_aux, features=train_config['features'], pad_val=pre_config[\n 'pad_val'], bnd=data_config['bnd'] / factor_d)\n", (4732, 4978), False, 'from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles\n'), ((1569, 1600), 'neuralparticles.tensorflow.tools.hyper_parameter.HyperParameter.parse', 'HyperParameter.parse', (['hyper_par'], {}), '(hyper_par)\n', (1589, 1600), False, 'from neuralparticles.tensorflow.tools.hyper_parameter import HyperParameter, ValueType, SearchType\n'), ((5145, 5310), 'neuralparticles.tools.data_helpers.extract_particles', 'extract_particles', (['eval_ref_data', '(eval_patch_extractors[i].positions[p_idx] * factor_d)', "pre_config['par_cnt_ref']", '(patch_size_ref / 2)', "pre_config['pad_val']"], {}), "(eval_ref_data, eval_patch_extractors[i].positions[p_idx] *\n factor_d, pre_config['par_cnt_ref'], patch_size_ref / 2, pre_config[\n 'pad_val'])\n", (5162, 5310), False, 'from neuralparticles.tools.data_helpers import load_patches_from_file, PatchExtractor, get_data_pair, extract_particles\n'), ((5459, 5534), 'numpy.count_nonzero', 'np.count_nonzero', (["(eval_src_patches[i][0][:, :, :1] != pre_config['pad_val'])"], {}), "(eval_src_patches[i][0][:, :, :1] != pre_config['pad_val'])\n", (5475, 5534), True, 'import numpy as np\n'), ((5569, 5638), 'numpy.count_nonzero', 'np.count_nonzero', (["(eval_ref_patches[i][:, :1] != pre_config['pad_val'])"], {}), "(eval_ref_patches[i][:, :1] != pre_config['pad_val'])\n", (5585, 5638), True, 'import numpy as np\n'), ((2111, 2139), 'os.path.dirname', 'os.path.dirname', (['config_path'], {}), '(config_path)\n', (2126, 2139), False, 'import os\n'), ((2225, 2253), 'os.path.dirname', 'os.path.dirname', (['config_path'], {}), '(config_path)\n', (2240, 2253), False, 'import os\n'), ((2344, 2372), 'os.path.dirname', 'os.path.dirname', (['config_path'], {}), '(config_path)\n', (2359, 2372), False, 'import os\n')]
|
# see https://www.spinningbytes.com/resources/germansentiment/ and https://github.com/aritter/twitter_download for obtaining the data.
import os
from pathlib import Path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from conversion import convert_examples_to_features, convert_text_to_examples
def load_datasets(data_dir, num_categories, test_size):
data = pd.read_csv(os.path.join(data_dir, "downloaded.tsv"), sep="\t", na_values="Not Available",
names=["id", "sentiment", "tweet_id", "?", "text"], index_col='id')
data = data.dropna(how='any')[['sentiment', 'text']]
data['sentiment'][data['sentiment'] == 'neutral'] = 2
data['sentiment'][data['sentiment'] == 'negative'] = 0
data['sentiment'][data['sentiment'] == 'positive'] = 1
if num_categories == 2:
data = data[np.logical_not(data.sentiment==2)]
X = data['text']
y = data['sentiment']
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=0)
return (X_train, y_train, X_test, y_test)
def get_tweets_data(data_dir, subtask, num_categories, tokenizer, max_seq_length, test_size):
fn = os.path.join(data_dir, "data_"+subtask+"_"+str(num_categories)+"cat_"+str(max_seq_length)+".npz")
if Path(fn).is_file():
f= np.load(fn)
train_input_ids = f['train_input_ids']
train_input_masks = f['train_input_masks']
train_segment_ids = f['train_segment_ids']
train_labels = f['train_labels']
test_input_ids = f['test_input_ids']
test_input_masks = f['test_input_masks']
test_segment_ids = f['test_segment_ids']
test_labels = f['test_labels']
f.close()
else:
X_train, y_train, X_test, y_test = load_datasets(data_dir, num_categories, test_size)
# Create datasets (Only take up to max_seq_length words for memory)
train_text = X_train.to_list()
train_text = [" ".join(t.split()[0:max_seq_length]) for t in train_text]
train_text = np.array(train_text, dtype=object)[:, np.newaxis]
train_label = y_train.tolist()
test_text = X_test.tolist()
test_text = [" ".join(t.split()[0:max_seq_length]) for t in test_text]
test_text = np.array(test_text, dtype=object)[:, np.newaxis]
test_label = y_test.tolist()
# Convert data to InputExample format
train_examples = convert_text_to_examples(train_text, train_label)
test_examples = convert_text_to_examples(test_text, test_label)
# Convert to features
(
train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
) = convert_examples_to_features(
tokenizer, train_examples, max_seq_length=max_seq_length
)
(
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels,
) = convert_examples_to_features(
tokenizer, test_examples, max_seq_length=max_seq_length
)
np.savez(fn,
train_input_ids=train_input_ids,
train_input_masks=train_input_masks,
train_segment_ids=train_segment_ids,
train_labels=train_labels,
test_input_ids=test_input_ids,
test_input_masks=test_input_masks,
test_segment_ids=test_segment_ids,
test_labels=test_labels
)
return (
train_input_ids,
train_input_masks,
train_segment_ids,
train_labels,
test_input_ids,
test_input_masks,
test_segment_ids,
test_labels
)
|
[
"numpy.savez",
"pathlib.Path",
"sklearn.model_selection.train_test_split",
"conversion.convert_text_to_examples",
"numpy.logical_not",
"os.path.join",
"numpy.array",
"conversion.convert_examples_to_features",
"numpy.load"
] |
[((990, 1049), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': '(0)'}), '(X, y, test_size=test_size, random_state=0)\n', (1006, 1049), False, 'from sklearn.model_selection import train_test_split\n'), ((421, 461), 'os.path.join', 'os.path.join', (['data_dir', '"""downloaded.tsv"""'], {}), "(data_dir, 'downloaded.tsv')\n", (433, 461), False, 'import os\n'), ((1345, 1356), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (1352, 1356), True, 'import numpy as np\n'), ((2460, 2509), 'conversion.convert_text_to_examples', 'convert_text_to_examples', (['train_text', 'train_label'], {}), '(train_text, train_label)\n', (2484, 2509), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((2534, 2581), 'conversion.convert_text_to_examples', 'convert_text_to_examples', (['test_text', 'test_label'], {}), '(test_text, test_label)\n', (2558, 2581), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((2756, 2847), 'conversion.convert_examples_to_features', 'convert_examples_to_features', (['tokenizer', 'train_examples'], {'max_seq_length': 'max_seq_length'}), '(tokenizer, train_examples, max_seq_length=\n max_seq_length)\n', (2784, 2847), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((3000, 3090), 'conversion.convert_examples_to_features', 'convert_examples_to_features', (['tokenizer', 'test_examples'], {'max_seq_length': 'max_seq_length'}), '(tokenizer, test_examples, max_seq_length=\n max_seq_length)\n', (3028, 3090), False, 'from conversion import convert_examples_to_features, convert_text_to_examples\n'), ((3125, 3417), 'numpy.savez', 'np.savez', (['fn'], {'train_input_ids': 'train_input_ids', 'train_input_masks': 'train_input_masks', 'train_segment_ids': 'train_segment_ids', 'train_labels': 'train_labels', 'test_input_ids': 'test_input_ids', 'test_input_masks': 'test_input_masks', 'test_segment_ids': 'test_segment_ids', 'test_labels': 'test_labels'}), '(fn, train_input_ids=train_input_ids, train_input_masks=\n train_input_masks, train_segment_ids=train_segment_ids, train_labels=\n train_labels, test_input_ids=test_input_ids, test_input_masks=\n test_input_masks, test_segment_ids=test_segment_ids, test_labels=\n test_labels)\n', (3133, 3417), True, 'import numpy as np\n'), ((868, 903), 'numpy.logical_not', 'np.logical_not', (['(data.sentiment == 2)'], {}), '(data.sentiment == 2)\n', (882, 903), True, 'import numpy as np\n'), ((1314, 1322), 'pathlib.Path', 'Path', (['fn'], {}), '(fn)\n', (1318, 1322), False, 'from pathlib import Path\n'), ((2069, 2103), 'numpy.array', 'np.array', (['train_text'], {'dtype': 'object'}), '(train_text, dtype=object)\n', (2077, 2103), True, 'import numpy as np\n'), ((2298, 2331), 'numpy.array', 'np.array', (['test_text'], {'dtype': 'object'}), '(test_text, dtype=object)\n', (2306, 2331), True, 'import numpy as np\n')]
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import numpy as np
from argparse import ArgumentParser
import tensorflow as tf
# from lpot.adaptor.tf_utils.util import write_graph
from nets_factory import TFSlimNetsFactory
import copy
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.disable_eager_execution()
from inception_v4 import inception_v4, inception_v4_arg_scope
def save(model, path):
from tensorflow.python.platform import gfile
f = gfile.GFile(path, 'wb')
try:
f.write(model.as_graph_def().SerializeToString())
except AttributeError as no_model:
print("None of the quantized models fits the \
accuracy criteria: {0}".format(no_model))
except Exception as exc:
print("Unexpected error while saving the model: {0}".format(exc))
def main(_):
arg_parser = ArgumentParser(description='Parse args')
arg_parser.add_argument("--input-graph",
help='Specify the slim model',
dest='input_graph')
arg_parser.add_argument("--output-graph",
help='Specify tune result model save dir',
dest='output_graph')
arg_parser.add_argument("--config", default=None, help="tuning config")
arg_parser.add_argument('--benchmark', dest='benchmark', action='store_true', help='run benchmark')
arg_parser.add_argument('--tune', dest='tune', action='store_true', help='use lpot to tune.')
args = arg_parser.parse_args()
factory = TFSlimNetsFactory()
# user specific model can register to slim net factory
input_shape = [None, 299, 299, 3]
factory.register('inception_v4', inception_v4, input_shape, inception_v4_arg_scope)
if args.input_graph.endswith('.ckpt'):
# directly get the topology name from input_graph
topology = args.input_graph.rsplit('/', 1)[-1].split('.', 1)[0]
# get the model func from net factory
assert topology in factory.default_slim_models, \
'only support topology {}'.format(factory.default_slim_models)
net = copy.deepcopy(factory.networks_map[topology])
model_func = net.pop('model')
arg_scope = net.pop('arg_scope')()
inputs_shape = net.pop('input_shape')
kwargs = net
images = tf.compat.v1.placeholder(name='input', dtype=tf.float32, \
shape=inputs_shape)
from lpot.adaptor.tf_utils.util import get_slim_graph
model = get_slim_graph(args.input_graph, model_func, arg_scope, images, **kwargs)
else:
model = args.input_graph
if args.tune:
from lpot import Quantization
quantizer = Quantization(args.config)
q_model = quantizer(model)
save(q_model, args.output_graph)
if args.benchmark:
from lpot import Benchmark
evaluator = Benchmark(args.config)
results = evaluator(model=model)
for mode, result in results.items():
acc, batch_size, result_list = result
latency = np.array(result_list).mean() / batch_size
print('\n{} mode benchmark result:'.format(mode))
print('Accuracy is {:.3f}'.format(acc))
print('Batch size = {}'.format(batch_size))
print('Latency: {:.3f} ms'.format(latency * 1000))
print('Throughput: {:.3f} images/sec'.format(1./ latency))
if __name__ == '__main__':
tf.compat.v1.app.run()
|
[
"tensorflow.compat.v1.placeholder",
"lpot.Benchmark",
"lpot.adaptor.tf_utils.util.get_slim_graph",
"argparse.ArgumentParser",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.compat.v1.app.run",
"tensorflow.compat.v1.disable_eager_execution",
"lpot.Quantization",
"numpy.array",
"copy.deepcopy",
"nets_factory.TFSlimNetsFactory",
"tensorflow.python.platform.gfile.GFile"
] |
[((805, 867), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (839, 867), True, 'import tensorflow as tf\n'), ((868, 906), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (904, 906), True, 'import tensorflow as tf\n'), ((1050, 1073), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (1061, 1073), False, 'from tensorflow.python.platform import gfile\n'), ((1424, 1464), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Parse args"""'}), "(description='Parse args')\n", (1438, 1464), False, 'from argparse import ArgumentParser\n'), ((2095, 2114), 'nets_factory.TFSlimNetsFactory', 'TFSlimNetsFactory', ([], {}), '()\n', (2112, 2114), False, 'from nets_factory import TFSlimNetsFactory\n'), ((3936, 3958), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (3956, 3958), True, 'import tensorflow as tf\n'), ((2648, 2693), 'copy.deepcopy', 'copy.deepcopy', (['factory.networks_map[topology]'], {}), '(factory.networks_map[topology])\n', (2661, 2693), False, 'import copy\n'), ((2849, 2925), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'name': '"""input"""', 'dtype': 'tf.float32', 'shape': 'inputs_shape'}), "(name='input', dtype=tf.float32, shape=inputs_shape)\n", (2873, 2925), True, 'import tensorflow as tf\n'), ((3038, 3111), 'lpot.adaptor.tf_utils.util.get_slim_graph', 'get_slim_graph', (['args.input_graph', 'model_func', 'arg_scope', 'images'], {}), '(args.input_graph, model_func, arg_scope, images, **kwargs)\n', (3052, 3111), False, 'from lpot.adaptor.tf_utils.util import get_slim_graph\n'), ((3223, 3248), 'lpot.Quantization', 'Quantization', (['args.config'], {}), '(args.config)\n', (3235, 3248), False, 'from lpot import Quantization\n'), ((3394, 3416), 'lpot.Benchmark', 'Benchmark', (['args.config'], {}), '(args.config)\n', (3403, 3416), False, 'from lpot import Benchmark\n'), ((3567, 3588), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (3575, 3588), True, 'import numpy as np\n')]
|
import numpy as np
from models import *
from datasets import *
from util import parse_funct_arguments
import pickle
import itertools
def mse(y_true, y_mdl):
return np.mean((y_true - y_mdl)**2)
def train(mdl, dset):
# Get train
u_train, y_train = dset.get_train()
# Fit
X_train, z_train = construct_linear_system(u_train, y_train, dset.nus, dset.nys)
mdl = mdl.fit(X_train, z_train)
return mdl
def evaluate(mdl, dset):
# Get test
u_train, y_train = dset.get_train()
u_test, y_test = dset.get_test()
X_train, z_train = construct_linear_system(u_train, y_train, dset.nus, dset.nys)
X_test, z_test = construct_linear_system(u_test, y_test, dset.nus, dset.nys)
# One-step-ahead prediction
y_pred_train = back_to_original_shape(mdl.predict(X_train), n_seq=y_train.shape[1], n_out=y_train.shape[2])
y_pred_test = back_to_original_shape(mdl.predict(X_test), n_seq=y_test.shape[1], n_out=y_test.shape[2])
# Free run simulation
simulate = DynamicalSystem(dset.nys, dset.nus, mdl.predict, sd_v=0, sd_w=0)
y_sim_train = simulate(u_train)[simulate.order:, ...]
y_sim_test = simulate(u_test)[simulate.order:, ...]
d = {'mdl': repr(mdl), 'dset': repr(dset),
'mse_pred_train': mse(y_train[simulate.order:, ...], y_pred_train),
'mse_pred_test': mse(y_test[simulate.order:, ...], y_pred_test),
'mse_sim_train': mse(y_train[simulate.order:, ...], y_sim_train),
'mse_sim_test': mse(y_test[simulate.order:, ...], y_sim_test)
}
if hasattr(mdl, 'param_norm'):
d['param_norm'] = mdl.param_norm
pred_train = {'z_pred_train': y_pred_train, 'z_sim_train': y_sim_train}
pred_test = {'z_pred_test': y_pred_test, 'z_sim_test': y_sim_test}
return d, pred_train, pred_test
# ---- Main script ----
if __name__ == "__main__":
from tqdm import tqdm
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser(description='Estimate NARX model for different n features / n samples rate.')
parser.add_argument('-r', '--repetitions', default=1, type=int,
help='number of repetitions')
parser.add_argument('-o', '--output', default='./performance.csv',
help='output csv file.')
parser.add_argument('-d', '--dset', type=str, default='ChenDSet',
help='number of repetitions')
parser.add_argument('-m', '--nonlinear_model', default='RBFSampler',
help='number of repetitions')
parser.add_argument('-n', '--num_points', default=60, type=int,
help='number of points')
parser.add_argument('-l', '--lower_proportion', default=-1, type=float,
help='the lowest value for the proportion (n features / n samples) is 10^l.')
parser.add_argument('-u', '--upper_proportion', default=2, type=float,
help='the upper value for the proportion (n features / n samples) is 10^u.')
parser.add_argument('-s', '--save_models', nargs='?', default='', const='./models',
help='save intermediary models.')
parser.add_argument('-w', '--reuse_weights', action='store_true',
help='use weights from previous model (with less features) when estimate the next one.')
args, unk = parser.parse_known_args()
# Saving models (when needed)
if args.save_models:
if not os.path.isdir(args.save_models):
os.mkdir(args.save_models)
def save_mdl(mdl):
fname = os.path.join(args.save_models, repr(mdl)+'.pkl')
with open(fname, 'wb') as f:
pickle.dump(mdl, f)
else:
def save_mdl(_mdl):
pass
# Get model (from command line)
ModelTmp = eval(args.nonlinear_model)
Model, _, unk = parse_funct_arguments(ModelTmp, unk, free_arguments=['n_features', 'random_state'])
# Get dataset (from the command line)
DatasetTmp = eval(args.dset)
Dataset, _, unk = parse_funct_arguments(DatasetTmp, unk)
dset = Dataset()
tqdm.write("Estimating baseline performance...")
baseline_mdl = Linear()
baseline_list = []
for seed in tqdm(range(args.repetitions)):
np.random.seed(seed)
d, pred_train, pred_test = evaluate(train(baseline_mdl, dset), dset)
d['seed'] = seed
d['proportion'] = 0 # To signal it is the baseline (n features being a constant)
baseline_list.append(d)
# Save model
save_mdl(baseline_mdl)
df = pd.DataFrame(baseline_list)
df.to_csv(args.output, index=False)
tqdm.write("Done")
tqdm.write("Estimating performance as a function of proportion...")
list_dict = []
underp = np.logspace(args.lower_proportion, 0, args.num_points // 2)
overp = np.logspace(0.00001, args.upper_proportion, args.num_points - args.num_points // 2)
proportions = np.concatenate((underp, overp))
run_instances = list(itertools.product(range(args.repetitions), proportions))
prev_mdl = None # used only if reuse_weights is True
num_samples = dset.effective_num_train_samples
for seed, proportion in tqdm(run_instances):
n_features = int(proportion * num_samples)
mdl = Model(n_features=n_features, random_state=seed)
if args.reuse_weights and hasattr(mdl, 'reuse_weights_from_mdl'):
if prev_mdl is not None:
mdl.reuse_weights_from_mdl(prev_mdl)
prev_mdl = mdl
d, pred_train, pred_test = evaluate(train(mdl, dset), dset)
d['proportion'] = proportion
d['seed'] = seed
df = df.append(d, ignore_index=True)
df.to_csv(args.output, index=False)
# Save model
save_mdl(mdl)
tqdm.write("Done")
|
[
"numpy.mean",
"pickle.dump",
"argparse.ArgumentParser",
"tqdm.tqdm.write",
"tqdm.tqdm",
"os.path.isdir",
"util.parse_funct_arguments",
"numpy.concatenate",
"numpy.random.seed",
"pandas.DataFrame",
"os.mkdir",
"numpy.logspace"
] |
[((170, 200), 'numpy.mean', 'np.mean', (['((y_true - y_mdl) ** 2)'], {}), '((y_true - y_mdl) ** 2)\n', (177, 200), True, 'import numpy as np\n'), ((1948, 2054), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Estimate NARX model for different n features / n samples rate."""'}), "(description=\n 'Estimate NARX model for different n features / n samples rate.')\n", (1971, 2054), False, 'import argparse\n'), ((3860, 3947), 'util.parse_funct_arguments', 'parse_funct_arguments', (['ModelTmp', 'unk'], {'free_arguments': "['n_features', 'random_state']"}), "(ModelTmp, unk, free_arguments=['n_features',\n 'random_state'])\n", (3881, 3947), False, 'from util import parse_funct_arguments\n'), ((4042, 4080), 'util.parse_funct_arguments', 'parse_funct_arguments', (['DatasetTmp', 'unk'], {}), '(DatasetTmp, unk)\n', (4063, 4080), False, 'from util import parse_funct_arguments\n'), ((4107, 4155), 'tqdm.tqdm.write', 'tqdm.write', (['"""Estimating baseline performance..."""'], {}), "('Estimating baseline performance...')\n", (4117, 4155), False, 'from tqdm import tqdm\n'), ((4568, 4595), 'pandas.DataFrame', 'pd.DataFrame', (['baseline_list'], {}), '(baseline_list)\n', (4580, 4595), True, 'import pandas as pd\n'), ((4640, 4658), 'tqdm.tqdm.write', 'tqdm.write', (['"""Done"""'], {}), "('Done')\n", (4650, 4658), False, 'from tqdm import tqdm\n'), ((4664, 4731), 'tqdm.tqdm.write', 'tqdm.write', (['"""Estimating performance as a function of proportion..."""'], {}), "('Estimating performance as a function of proportion...')\n", (4674, 4731), False, 'from tqdm import tqdm\n'), ((4764, 4823), 'numpy.logspace', 'np.logspace', (['args.lower_proportion', '(0)', '(args.num_points // 2)'], {}), '(args.lower_proportion, 0, args.num_points // 2)\n', (4775, 4823), True, 'import numpy as np\n'), ((4836, 4921), 'numpy.logspace', 'np.logspace', (['(1e-05)', 'args.upper_proportion', '(args.num_points - args.num_points // 2)'], {}), '(1e-05, args.upper_proportion, args.num_points - args.num_points //\n 2)\n', (4847, 4921), True, 'import numpy as np\n'), ((4938, 4969), 'numpy.concatenate', 'np.concatenate', (['(underp, overp)'], {}), '((underp, overp))\n', (4952, 4969), True, 'import numpy as np\n'), ((5189, 5208), 'tqdm.tqdm', 'tqdm', (['run_instances'], {}), '(run_instances)\n', (5193, 5208), False, 'from tqdm import tqdm\n'), ((5780, 5798), 'tqdm.tqdm.write', 'tqdm.write', (['"""Done"""'], {}), "('Done')\n", (5790, 5798), False, 'from tqdm import tqdm\n'), ((4262, 4282), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4276, 4282), True, 'import numpy as np\n'), ((3460, 3491), 'os.path.isdir', 'os.path.isdir', (['args.save_models'], {}), '(args.save_models)\n', (3473, 3491), False, 'import os\n'), ((3505, 3531), 'os.mkdir', 'os.mkdir', (['args.save_models'], {}), '(args.save_models)\n', (3513, 3531), False, 'import os\n'), ((3686, 3705), 'pickle.dump', 'pickle.dump', (['mdl', 'f'], {}), '(mdl, f)\n', (3697, 3705), False, 'import pickle\n')]
|
""" Plot data split by compartments
Classes:
* :py:class:`CompartmentPlot`: compartment plotting tool
"""
# Standard lib
from typing import Tuple, Optional, Dict
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# Our own imports
from .styling import set_plot_style
from .utils import bootstrap_ci, get_histogram
# Classes
class CompartmentPlot(object):
""" Plot data split by multiple compartments
:param int n_compartments:
How many compartments to split the data into
:param int topk:
How many samples to take from each compartment
"""
def __init__(self,
n_compartments: int,
topk: Optional[int] = None,
figsize: Tuple[int] = (8, 8),
plot_style: str = 'dark',
suffix: str = '.png'):
self.n_compartments = n_compartments
self.topk = topk
self.figsize = figsize
self.plot_style = plot_style
self.suffix = suffix
# Color palettes for the different compartments
self.colors = (['blue', 'orange', 'green', 'red', 'purple', 'grey'])[:n_compartments]
self.palletes = [sns.color_palette(c.capitalize()+'s', n_colors=10)
for c in self.colors]
# Calculated values
self._bin_indices = None
self._bin_values = None
self._xdata = None
self._xcolumn = None
self._ycolumn = None
self._plotdata = None
self._distdata = None
self._total_count = None
def calc_indices(self, values: np.ndarray):
""" Calculate the indicies for each bin
:param ndarray values:
The values to use to generate the bins
"""
if self.topk is None:
self.topk = values.shape[0] // self.n_compartments
if values.shape[0] < self.topk * self.n_compartments:
err = 'Got too few values for {} samples of {} compartments: {}'
err = err.format(self.topk, self.n_compartments, values.shape[0])
raise ValueError(err)
print(f'Spliting into {self.n_compartments} compartments of {self.topk} samples each')
# Sort all the indices
indices = np.argsort(values)
# Split into even bins of size topk
bin_start = np.floor(np.linspace(0, indices.shape[0]-self.topk, self.n_compartments))
bin_start[bin_start < 0] = 0
bin_end = bin_start + self.topk
bin_end[bin_end > indices.shape[0]] = indices.shape[0]
# Extract the sorted bins for each compartment
self._bin_indices = [indices[int(s):int(e)] for s, e in zip(bin_start, bin_end)]
def calc_bin(self,
bin_value: np.ndarray,
label: str,
total_count: int) -> Dict[str, float]:
""" Calculate all the stats for a single bin
:param ndarray bin_value:
The 2D array of n timepoints x k samples
:param str label:
The label for this category
:param int total_count:
The total number of samples in this bin
:returns:
A dictionary of bin stats for plotting
"""
bin_mean = np.nanmean(bin_value, axis=1)
bin_std = np.nanstd(bin_value, axis=1)
bin5, bin25, bin50, bin75, bin95 = np.nanpercentile(bin_value, [5, 25, 50, 75, 95], axis=1)
bin_mean_ci0, bin_mean_ci1 = bootstrap_ci(bin_value, func=np.nanmean, axis=1)
assert bin_mean_ci0.shape == bin_mean.shape
assert bin_mean_ci1.shape == bin_mean.shape
bin_median_ci0, bin_median_ci1 = bootstrap_ci(bin_value, func=np.nanmedian, axis=1)
assert bin_median_ci0.shape == bin50.shape
assert bin_median_ci0.shape == bin50.shape
# Work out how many samples/bin we have in each timepoint
bin_count = np.sum(~np.isnan(bin_value), axis=1)
bin_support = bin_count / total_count
bin_support[~np.isfinite(bin_support)] = 0
# Stash all the values for later
return {
'mean' + label: bin_mean,
'mean ci low' + label: bin_mean_ci0,
'mean ci high' + label: bin_mean_ci1,
'std' + label: bin_std,
'p5' + label: bin5,
'p25' + label: bin25,
'p50' + label: bin50,
'p50 ci low' + label: bin_median_ci0,
'p50 ci high' + label: bin_median_ci1,
'p75' + label: bin75,
'p95' + label: bin95,
'count' + label: bin_count,
'support' + label: bin_support,
}
def split_comparison(self,
data: Dict[str, np.ndarray],
xcolumn: str,
ycolumn: str,
integrate_values: bool = False):
""" Split the comparison by the bins
:param dict[str, Any] data:
A dictionary containing the xcolumn and ycolumn data
:param str xcolumn:
The column containing the shared time vector to plot along
:param str ycolumn:
The column containing the values to bin along
:param bool integrate_values:
If True, integrate the resulting statistics over the xdata range
"""
xdata = data[xcolumn]
plotdata = {
xcolumn: xdata,
}
values = np.stack(data[ycolumn], axis=1)
total_count = np.sum(~np.isnan(values), axis=1)
if values.shape[0] != xdata.shape[0]:
raise ValueError('Expected {} with shape {}, got {}'.format(ycolumn, xdata.shape[0], values.shape[0]))
bin_values = []
# Add a set for all the values
plotdata.update(self.calc_bin(values, f' {ycolumn} all', total_count))
for i, indices in enumerate(self._bin_indices):
bin_value = values[:, indices]
bin_values.append(bin_value)
label = f' {ycolumn} bin{i+1}'
plotdata.update(self.calc_bin(bin_value, label, total_count))
self._plotdata = plotdata
self._xdata = xdata
self._xcolumn = xcolumn
self._ycolumn = ycolumn
self._bin_values = bin_values
self._total_count = total_count
def calc_envelope(self, label: str, envelope: str = 'std') -> Tuple[float]:
""" Calculate the envelope (high/low) stats for a label
:param str label:
The label to calculate the envelope for
:param str envelope:
Which stats to calculate the envelope with
:returns:
A tuple of low, high values
"""
plotdata = self._plotdata
if envelope == 'std':
value_mean = plotdata['mean' + label]
value_std = plotdata['std' + label]
value_st = value_mean - value_std
value_ed = value_mean + value_std
elif envelope == 'mean ci':
value_st = plotdata['mean ci low' + label]
value_ed = plotdata['mean ci high' + label]
elif envelope == 'median ci':
value_st = plotdata['p50 ci low' + label]
value_ed = plotdata['p50 ci high' + label]
elif envelope == 'iqr':
value_st = plotdata['p25' + label]
value_ed = plotdata['p75' + label]
else:
raise ValueError('Unknown envelope function "{}"'.format(envelope))
return value_st, value_ed
def plot_raw_tracks(self, outfile=None, xlabel=None, ylabel=None):
""" Plot individual raw tracks """
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
for i, bin_value in enumerate(self._bin_values):
ax.set_prop_cycle(color=self.palletes[i])
ax.plot(self._xdata, bin_value, '-')
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_mean_tracks(self, outfile=None, xlabel=None, ylabel=None, envelope='std', mode='split'):
""" Mean and deviation envelope
:param Path outfile:
If not None, the file to write out
:param str xlabel:
Label for the x-axis (time)
:param str ylabel:
Label for the y-axis (category)
"""
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
if mode == 'split':
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
value_mean = plotdata['mean' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor=self.colors[i], alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color=self.colors[i], linewidth=2)
elif mode == 'all':
label = ' {} all'.format(self._ycolumn)
value_mean = plotdata['mean' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor='b', alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color='b', linewidth=2)
else:
raise ValueError('Unknown mode {}'.format(mode))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_median_tracks(self, outfile=None, xlabel=None, ylabel=None, envelope='iqr', mode='split'):
""" Median and 25/75% envelope """
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
if mode == 'split':
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
value_mid = plotdata['p50' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor=self.colors[i], alpha=0.5)
ax.plot(self._xdata, value_mid, '-', color=self.colors[i], linewidth=2)
elif mode == 'all':
label = ' {} all'.format(self._ycolumn)
value_mean = plotdata['p50' + label]
value_st, value_ed = self.calc_envelope(label, envelope)
ax.fill_between(self._xdata, value_st, value_ed,
facecolor='b', alpha=0.5)
ax.plot(self._xdata, value_mean, '-', color='b', linewidth=2)
else:
raise ValueError('Unknown mode {}'.format(mode))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
def plot_track_support(self, outfile=None, xlabel=None, ylabel=None):
""" Plot how many tracks are in a given bin at a given time """
plotdata = self._plotdata
with set_plot_style(self.plot_style) as style:
fig_x, fig_y = self.figsize
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(fig_x*2, fig_y))
ax1.plot(self._xdata, self._total_count, '-k', linewidth=2)
ax2.hlines([100], np.min(self._xdata), np.max(self._xdata), colors=['k'], linewidth=2)
for i in range(self.n_compartments):
label = ' {} bin{}'.format(self._ycolumn, i+1)
count = plotdata['count' + label]
support = plotdata['support' + label]
ax1.plot(self._xdata, count, '-', color=self.colors[i], linewidth=2)
ax2.plot(self._xdata, support*100, '-', color=self.colors[i], linewidth=2)
if xlabel is not None:
ax1.set_xlabel(xlabel)
ax2.set_xlabel(xlabel)
ax1.set_ylabel('Num Tracks')
ax2.set_ylabel('Percent Total Tracks')
ax1.set_ylim([0, np.max(self._total_count)*1.02])
ax2.set_ylim([0, 102])
style.show(outfile=outfile, fig=fig)
def plot_dist_histogram(self, values, outfile=None, xlabel=None, ylabel=None):
""" Plot where on the histogram each value occurs
:param ndarray values:
The values to generate a histogram for
:param Path outfile:
If not None, the path to save the plot to
"""
# Histogram the distribution and which compartments are being labeled
_, _, kernel_x, kernel_y = get_histogram(values, bins=10, kernel_smoothing=True)
compartment_values = [values[indices] for indices in self._bin_indices]
distdata = {
'compartment': [],
'value': [],
'density': [],
}
# Now, plot each compartment on the total histogram
with set_plot_style(self.plot_style) as style:
fig, ax = plt.subplots(1, 1, figsize=self.figsize)
ax.plot(kernel_x, kernel_y, '-', color='gray')
distdata['compartment'].extend(0 for _ in kernel_x)
distdata['value'].extend(kernel_x)
distdata['density'].extend(kernel_y)
for i, compartment_value in enumerate(compartment_values):
compartment_min = np.min(compartment_value)
compartment_max = np.max(compartment_value)
kernel_mask = np.logical_and(kernel_x >= compartment_min,
kernel_x <= compartment_max)
compartment_x = kernel_x[kernel_mask]
compartment_y = kernel_y[kernel_mask]
distdata['compartment'].extend(i+1 for _ in compartment_x)
distdata['value'].extend(compartment_x)
distdata['density'].extend(compartment_y)
ax.fill_between(compartment_x, 0, compartment_y,
facecolor=self.colors[i], alpha=0.5)
ax.plot(compartment_x, compartment_y, '-',
color=self.colors[i], linewidth=2)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
style.show(outfile=outfile, fig=fig)
self._distdata = distdata
def save_plotdata(self, outfile, suffix='.csv'):
""" Save the plot data """
if self._plotdata is None:
raise ValueError('No distribution data, call split_comparison first')
outfile = outfile.parent / (outfile.stem + suffix)
print('Writing distribution data to {}'.format(outfile))
plotdata = pd.DataFrame(self._plotdata)
if suffix == '.csv':
plotdata.to_csv(str(outfile), header=True, index=False)
elif suffix == '.xlsx':
plotdata.to_excel(str(outfile), header=True, index=False)
else:
raise KeyError('Unknown plot data output file type: {}'.format(outfile))
def save_distdata(self, outfile, suffix='.csv'):
""" Save the distribution data """
if self._distdata is None:
raise ValueError('No distribution data, call plot_dist_histogram first')
outfile = outfile.parent / (outfile.stem + suffix)
print('Writing distribution data to {}'.format(outfile))
distdata = pd.DataFrame(self._distdata)
if suffix == '.csv':
distdata.to_csv(str(outfile), header=True, index=False)
elif suffix == '.xlsx':
distdata.to_excel(str(outfile), header=True, index=False)
else:
raise KeyError('Unknown dist data output file type: {}'.format(outfile))
|
[
"numpy.nanstd",
"numpy.nanpercentile",
"numpy.logical_and",
"numpy.max",
"numpy.argsort",
"numpy.nanmean",
"numpy.stack",
"numpy.linspace",
"numpy.isnan",
"numpy.isfinite",
"numpy.min",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] |
[((2280, 2298), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (2290, 2298), True, 'import numpy as np\n'), ((3264, 3293), 'numpy.nanmean', 'np.nanmean', (['bin_value'], {'axis': '(1)'}), '(bin_value, axis=1)\n', (3274, 3293), True, 'import numpy as np\n'), ((3312, 3340), 'numpy.nanstd', 'np.nanstd', (['bin_value'], {'axis': '(1)'}), '(bin_value, axis=1)\n', (3321, 3340), True, 'import numpy as np\n'), ((3385, 3441), 'numpy.nanpercentile', 'np.nanpercentile', (['bin_value', '[5, 25, 50, 75, 95]'], {'axis': '(1)'}), '(bin_value, [5, 25, 50, 75, 95], axis=1)\n', (3401, 3441), True, 'import numpy as np\n'), ((5430, 5461), 'numpy.stack', 'np.stack', (['data[ycolumn]'], {'axis': '(1)'}), '(data[ycolumn], axis=1)\n', (5438, 5461), True, 'import numpy as np\n'), ((15139, 15167), 'pandas.DataFrame', 'pd.DataFrame', (['self._plotdata'], {}), '(self._plotdata)\n', (15151, 15167), True, 'import pandas as pd\n'), ((15828, 15856), 'pandas.DataFrame', 'pd.DataFrame', (['self._distdata'], {}), '(self._distdata)\n', (15840, 15856), True, 'import pandas as pd\n'), ((2373, 2438), 'numpy.linspace', 'np.linspace', (['(0)', '(indices.shape[0] - self.topk)', 'self.n_compartments'], {}), '(0, indices.shape[0] - self.topk, self.n_compartments)\n', (2384, 2438), True, 'import numpy as np\n'), ((7661, 7701), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'self.figsize'}), '(1, 1, figsize=self.figsize)\n', (7673, 7701), True, 'import matplotlib.pyplot as plt\n'), ((8551, 8591), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'self.figsize'}), '(1, 1, figsize=self.figsize)\n', (8563, 8591), True, 'import matplotlib.pyplot as plt\n'), ((10069, 10109), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'self.figsize'}), '(1, 1, figsize=self.figsize)\n', (10081, 10109), True, 'import matplotlib.pyplot as plt\n'), ((11630, 11676), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(fig_x * 2, fig_y)'}), '(1, 2, figsize=(fig_x * 2, fig_y))\n', (11642, 11676), True, 'import matplotlib.pyplot as plt\n'), ((13410, 13450), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'self.figsize'}), '(1, 1, figsize=self.figsize)\n', (13422, 13450), True, 'import matplotlib.pyplot as plt\n'), ((3922, 3941), 'numpy.isnan', 'np.isnan', (['bin_value'], {}), '(bin_value)\n', (3930, 3941), True, 'import numpy as np\n'), ((4018, 4042), 'numpy.isfinite', 'np.isfinite', (['bin_support'], {}), '(bin_support)\n', (4029, 4042), True, 'import numpy as np\n'), ((5492, 5508), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (5500, 5508), True, 'import numpy as np\n'), ((11777, 11796), 'numpy.min', 'np.min', (['self._xdata'], {}), '(self._xdata)\n', (11783, 11796), True, 'import numpy as np\n'), ((11798, 11817), 'numpy.max', 'np.max', (['self._xdata'], {}), '(self._xdata)\n', (11804, 11817), True, 'import numpy as np\n'), ((13775, 13800), 'numpy.min', 'np.min', (['compartment_value'], {}), '(compartment_value)\n', (13781, 13800), True, 'import numpy as np\n'), ((13835, 13860), 'numpy.max', 'np.max', (['compartment_value'], {}), '(compartment_value)\n', (13841, 13860), True, 'import numpy as np\n'), ((13891, 13963), 'numpy.logical_and', 'np.logical_and', (['(kernel_x >= compartment_min)', '(kernel_x <= compartment_max)'], {}), '(kernel_x >= compartment_min, kernel_x <= compartment_max)\n', (13905, 13963), True, 'import numpy as np\n'), ((12473, 12498), 'numpy.max', 'np.max', (['self._total_count'], {}), '(self._total_count)\n', (12479, 12498), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D
from tensorflow.keras.models import Model
import numpy as np
class SelfAttention(Model):
def __init__(self, d_model, spatial_dims, positional_encoding=True, name="self_attention"):
'''
d_model : number of output channels
spatial_dim : spatial dimensions of input tensor (x , y)
if positional_encoding: depth must correspond to input channel number
adapted from: https://www.tensorflow.org/tutorials/text/transformer
'''
super().__init__(name=name)
self.d_model = d_model
self.spatial_dims=spatial_dims
self.spatial_dim = np.prod(spatial_dims)
self.wq = Dense(self.d_model, name=name+"_q")
self.wk = Dense(self.d_model, name=name+"_k")
self.wv = Dense(self.d_model, name=name+"_w")
self.positional_encoding=positional_encoding
if positional_encoding:
self.pos_embedding = Embedding(self.spatial_dim, d_model, name=name+"pos_enc") # TODO test other positional encoding. in particular that encodes X and Y
def call(self, x):
'''
x : tensor with shape (batch_size, y, x, channels)
'''
shape = tf.shape(x)
batch_size = shape[0]
#spatial_dims = shape[1:-1]
#spatial_dim = tf.reduce_prod(spatial_dims)
depth_dim = shape[3]
if self.positional_encoding:
x_index = tf.range(self.spatial_dim, dtype=tf.int32)
pos_emb = self.pos_embedding(x_index) # (spa_dim, d_model)
pos_emb = tf.reshape(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.d_model)) #for broadcasting purpose
x = x + pos_emb # broadcast
q = self.wq(x) # (batch_size, *spa_dims, d_model)
k = self.wk(x) # (batch_size, *spa_dims, d_model)
v = self.wv(x) # (batch_size, *spa_dims, d_model)
q = tf.reshape(q, (batch_size, -1, depth_dim)) # (batch_size, spa_dim, d_model)
k = tf.reshape(k, (batch_size, -1, depth_dim))
v = tf.reshape(v, (batch_size, -1, depth_dim))
# scaled_attention.shape == (batch_size, spa_dims, depth)
# attention_weights.shape == (batch_size, spa_dims, spa_dims)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v)
output = tf.reshape(scaled_attention, (batch_size, self.spatial_dims[0], self.spatial_dims[1], self.d_model))
tf.identity(attention_weights, name=self.name+"_attention_weights")
return output, attention_weights
def compute_output_shape(self, input_shape):
return input_shape[:-1]+(self.d_model,), (input_shape[0],self.spatial_dim,self.spatial_dim)
def scaled_dot_product_attention(q, k, v):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
Returns:
output, attention_weights
from : https://www.tensorflow.org/tutorials/text/transformer
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
|
[
"numpy.prod",
"tensorflow.shape",
"tensorflow.math.sqrt",
"tensorflow.keras.layers.Embedding",
"tensorflow.range",
"tensorflow.keras.layers.Dense",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.identity"
] |
[((3406, 3439), 'tensorflow.matmul', 'tf.matmul', (['q', 'k'], {'transpose_b': '(True)'}), '(q, k, transpose_b=True)\n', (3415, 3439), True, 'import tensorflow as tf\n'), ((3719, 3766), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['scaled_attention_logits'], {'axis': '(-1)'}), '(scaled_attention_logits, axis=-1)\n', (3732, 3766), True, 'import tensorflow as tf\n'), ((3812, 3843), 'tensorflow.matmul', 'tf.matmul', (['attention_weights', 'v'], {}), '(attention_weights, v)\n', (3821, 3843), True, 'import tensorflow as tf\n'), ((736, 757), 'numpy.prod', 'np.prod', (['spatial_dims'], {}), '(spatial_dims)\n', (743, 757), True, 'import numpy as np\n'), ((776, 813), 'tensorflow.keras.layers.Dense', 'Dense', (['self.d_model'], {'name': "(name + '_q')"}), "(self.d_model, name=name + '_q')\n", (781, 813), False, 'from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D\n'), ((830, 867), 'tensorflow.keras.layers.Dense', 'Dense', (['self.d_model'], {'name': "(name + '_k')"}), "(self.d_model, name=name + '_k')\n", (835, 867), False, 'from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D\n'), ((884, 921), 'tensorflow.keras.layers.Dense', 'Dense', (['self.d_model'], {'name': "(name + '_w')"}), "(self.d_model, name=name + '_w')\n", (889, 921), False, 'from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D\n'), ((1297, 1308), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1305, 1308), True, 'import tensorflow as tf\n'), ((1989, 2031), 'tensorflow.reshape', 'tf.reshape', (['q', '(batch_size, -1, depth_dim)'], {}), '(q, (batch_size, -1, depth_dim))\n', (1999, 2031), True, 'import tensorflow as tf\n'), ((2077, 2119), 'tensorflow.reshape', 'tf.reshape', (['k', '(batch_size, -1, depth_dim)'], {}), '(k, (batch_size, -1, depth_dim))\n', (2087, 2119), True, 'import tensorflow as tf\n'), ((2132, 2174), 'tensorflow.reshape', 'tf.reshape', (['v', '(batch_size, -1, depth_dim)'], {}), '(v, (batch_size, -1, depth_dim))\n', (2142, 2174), True, 'import tensorflow as tf\n'), ((2413, 2518), 'tensorflow.reshape', 'tf.reshape', (['scaled_attention', '(batch_size, self.spatial_dims[0], self.spatial_dims[1], self.d_model)'], {}), '(scaled_attention, (batch_size, self.spatial_dims[0], self.\n spatial_dims[1], self.d_model))\n', (2423, 2518), True, 'import tensorflow as tf\n'), ((2522, 2591), 'tensorflow.identity', 'tf.identity', (['attention_weights'], {'name': "(self.name + '_attention_weights')"}), "(attention_weights, name=self.name + '_attention_weights')\n", (2533, 2591), True, 'import tensorflow as tf\n'), ((3582, 3598), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), '(dk)\n', (3594, 3598), True, 'import tensorflow as tf\n'), ((1038, 1097), 'tensorflow.keras.layers.Embedding', 'Embedding', (['self.spatial_dim', 'd_model'], {'name': "(name + 'pos_enc')"}), "(self.spatial_dim, d_model, name=name + 'pos_enc')\n", (1047, 1097), False, 'from tensorflow.keras.layers import Layer, Dense, Reshape, Embedding, Concatenate, Conv2D\n'), ((1516, 1558), 'tensorflow.range', 'tf.range', (['self.spatial_dim'], {'dtype': 'tf.int32'}), '(self.spatial_dim, dtype=tf.int32)\n', (1524, 1558), True, 'import tensorflow as tf\n'), ((1652, 1731), 'tensorflow.reshape', 'tf.reshape', (['pos_emb', '(self.spatial_dims[0], self.spatial_dims[1], self.d_model)'], {}), '(pos_emb, (self.spatial_dims[0], self.spatial_dims[1], self.d_model))\n', (1662, 1731), True, 'import tensorflow as tf\n'), ((3511, 3522), 'tensorflow.shape', 'tf.shape', (['k'], {}), '(k)\n', (3519, 3522), True, 'import tensorflow as tf\n')]
|
'''
Collect results in Quantum ESPRESSO
'''
import sys
import numpy as np
from pymatgen.core import Structure
from . import structure as qe_structure
from ... import utility
from ...IO import pkl_data
from ...IO import read_input as rin
def collect_qe(current_id, work_path):
# ---------- check optimization in previous stage
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
check_opt = 'not_yet'
for line in lines:
if 'End final coordinates' in line:
check_opt = 'done'
except Exception as e:
print(e)
check_opt = 'no_file'
# ---------- obtain energy and magmom
try:
with open(work_path+rin.qe_outfile, 'r') as fpout:
lines = fpout.readlines()
energy = np.nan
for line in reversed(lines):
if line.startswith('!'):
energy = float(line.split()[-2]) # in Ry
energy = energy * utility.ry2ev / float(rin.natot) # Ry/cell --> eV/atom
break
magmom = np.nan # implemented by <NAME> 2020/10/04
for line in reversed(lines):
if line.find("total magnetization") >= 0:
muB = line.split()
magmom = float(muB[3])
break
except Exception as e:
energy = np.nan # error
magmom = np.nan # error
print(e)
print(' Structure ID {0}, could not obtain energy from {1}'.format(
current_id, rin.qe_outfile))
# ---------- collect the last structure
try:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_outfile)
if lines_cell is None:
lines_cell = qe_structure.extract_cell_parameters(
work_path+rin.qe_infile)
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_outfile)
if lines_atom is None:
lines_atom = qe_structure.extract_atomic_positions(
work_path+rin.qe_infile)
opt_struc = qe_structure.from_lines(lines_cell, lines_atom)
# ------ opt_qe-structure
with open('./data/opt_qe-structure', 'a') as fstruc:
fstruc.write('# ID {0:d}\n'.format(current_id))
qe_structure.write(opt_struc, './data/opt_qe-structure', mode='a')
except Exception as e:
print(e)
opt_struc = None
# ---------- check
if np.isnan(energy):
opt_struc = None
if opt_struc is None:
energy = np.nan
magmom = np.nan
# ---------- return
return opt_struc, energy, magmom, check_opt
def get_energy_step_qe(energy_step_data, current_id, work_path):
'''
get energy step data in eV/atom
energy_step_data[ID][stage][step]
energy_step_data[ID][0] <-- stage 1
energy_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get energy step
energy_step = []
final_flag = False # End final coordinates
vc_flag = False # vc-relax
for line in lines:
if line.startswith('!'):
energy_step.append(line.split()[4])
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
energy_step.pop(-1)
# ------ list --> array, Ry/cell --> eV/atom
if not energy_step:
energy_step = None # if empty
print('#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
else:
energy_step = utility.ry2ev / rin.natot * np.array(energy_step,
dtype='float')
except Exception as e:
energy_step = None
print(e, '#### ID: {0}: failed to parse energy_step\n'.format(
current_id), file=sys.stderr)
# ---------- append energy_step
if energy_step_data.get(current_id) is None:
energy_step_data[current_id] = [] # initialize
energy_step_data[current_id].append(energy_step)
# ---------- save energy_step_data
pkl_data.save_energy_step(energy_step_data)
# ---------- return
return energy_step_data
def get_struc_step_qe(struc_step_data, current_id, work_path):
'''
get structure step data
# ---------- args
struc_step_data: (dict) the key is structure ID
struc_step_data[ID][stage][step]
struc_step_data[ID][0] <-- stage 1
struc_step_data[ID][1] <-- stage 2
'''
try:
struc_step = []
# ------ init struc from pwscf.in
_extract_struc_qe(work_path+rin.qe_infile, struc_step)
# ------ struc step from pwscf.out
_extract_struc_qe(work_path+rin.qe_outfile, struc_step)
# ------ delete last structure due to duplication
struc_step.pop(-1)
except Exception as e:
struc_step = None
print(e ,'#### ID: {0}: failed to parse in struc_step\n'.format(
current_id), file=sys.stderr)
# ---------- append struc_step_data
if struc_step_data.get(current_id) is None:
struc_step_data[current_id] = [] # initialize
struc_step_data[current_id].append(struc_step)
# ---------- save struc_step_data
pkl_data.save_struc_step(struc_step_data)
# ---------- return
return struc_step_data
def _extract_struc_qe(filename, struc_step):
# ---------- read a file
with open(filename, 'r') as f:
lines = f.readlines()
# ---------- extract struc
read_cell = False
read_coords = False
vc_flag = False # in case of vc-relax
for line in lines:
# ------ cell part
if read_cell:
lattice.append(line.split())
if len(lattice) == 3:
read_cell = False
lattice = np.array(lattice, dtype='float')
if 'CELL_PARAMETERS' in line:
read_cell = True
vc_flag = True
lattice = []
# ------ coords part
if read_coords:
lsplit = line.split()
species.append(lsplit[0])
coords.append(lsplit[1:])
if len(coords) == rin.natot:
read_coords = False
coords = np.array(coords, dtype='float')
# ---- gen struc
if not vc_flag: # empty lattice, use init lattice
lattice = struc_step[0].lattice
struc = Structure(lattice, species, coords)
struc_step.append(struc)
if 'ATOMIC_POSITIONS' in line:
read_coords = True
species = []
coords = []
def get_force_step_qe(force_step_data, current_id, work_path):
'''
get force step data in eV/angstrom
# ---------- args
force_step_data: (dict) the key is structure ID
force_step_data[ID][stage][step]
force_step_data[ID][0] <-- stage 1
force_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get force step
force_step = []
read_force = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if 'atom 1 type 1 force' in line:
read_force = True
force = []
if read_force:
force.append(line.split()[6:])
if len(force) == rin.natot:
read_force = False
force_step.append(utility.ry2ev / utility.bohr2ang * np.array(
force, dtype='float'))
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
force_step.pop(-1)
# ------ if empty
if len(force_step) == 0:
force_step = None
print('#### ID: {0}: failed to parse force_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
force_step = None
print(e, '#### ID: {0}: failed to parse in force_step\n'.format(
current_id), file=sys.stderr)
# ---------- append force_step
if force_step_data.get(current_id) is None:
force_step_data[current_id] = [] # initialize
force_step_data[current_id].append(force_step)
# ---------- save force_step_data
pkl_data.save_force_step(force_step_data)
# ---------- return
return force_step_data
def get_stress_step_qe(stress_step_data, current_id, work_path):
'''
get stress step data in eV/ang**3
# ---------- args
stress_step_data: (dict) the key is structure ID
stress_step_data[ID][stage][step]
stress_step_data[ID][0] <-- stage 1
stress_step_data[ID][1] <-- stage 2
'''
try:
# ---------- read output file
with open(work_path+rin.qe_outfile, 'r') as f:
lines = f.readlines()
# ---------- get stress step
stress_step = []
read_stress = False
final_flag = False # End final coordinates
vc_flag = False # in case of vc-relax
for line in lines:
if read_stress:
stress.append(line.split()[3:])
if len(stress) == 3:
read_stress = False
stress_step.append(utility.kbar2ev_ang3 * np.array(
stress, dtype='float'))
if 'total stress (Ry/bohr**3)' in line:
read_stress = True
stress = []
# ------ check opt and vc-relax
if 'End final coordinates' in line:
final_flag = True
if 'CELL_PARAMETERS' in line:
vc_flag = True
# ------ delete last energy (after End final coordinates)
if final_flag and vc_flag:
stress_step.pop(-1)
# ------ if empty
if len(stress_step) == 0:
stress_step = None
print('#### ID: {0}: failed to parse stress_step\n'.format(
current_id), file=sys.stderr)
except Exception as e:
stress_step = None
print(e, '#### ID: {0}: failed to parse in stress_step\n'.format(
current_id), file=sys.stderr)
# ---------- append stress_step
if stress_step_data.get(current_id) is None:
stress_step_data[current_id] = [] # initialize
stress_step_data[current_id].append(stress_step)
# ---------- save stress_step_data
pkl_data.save_stress_step(stress_step_data)
# ---------- return
return stress_step_data
|
[
"pymatgen.core.Structure",
"numpy.array",
"numpy.isnan"
] |
[((2478, 2494), 'numpy.isnan', 'np.isnan', (['energy'], {}), '(energy)\n', (2486, 2494), True, 'import numpy as np\n'), ((3957, 3993), 'numpy.array', 'np.array', (['energy_step'], {'dtype': '"""float"""'}), "(energy_step, dtype='float')\n", (3965, 3993), True, 'import numpy as np\n'), ((6166, 6198), 'numpy.array', 'np.array', (['lattice'], {'dtype': '"""float"""'}), "(lattice, dtype='float')\n", (6174, 6198), True, 'import numpy as np\n'), ((6583, 6614), 'numpy.array', 'np.array', (['coords'], {'dtype': '"""float"""'}), "(coords, dtype='float')\n", (6591, 6614), True, 'import numpy as np\n'), ((6793, 6828), 'pymatgen.core.Structure', 'Structure', (['lattice', 'species', 'coords'], {}), '(lattice, species, coords)\n', (6802, 6828), False, 'from pymatgen.core import Structure\n'), ((7997, 8027), 'numpy.array', 'np.array', (['force'], {'dtype': '"""float"""'}), "(force, dtype='float')\n", (8005, 8027), True, 'import numpy as np\n'), ((9976, 10007), 'numpy.array', 'np.array', (['stress'], {'dtype': '"""float"""'}), "(stress, dtype='float')\n", (9984, 10007), True, 'import numpy as np\n')]
|
import numpy as np
def projective(coords):
""" Convert 2D cartesian coordinates to homogeneus/projective. """
num = np.shape(coords)[0]
w = np.array([[1], ]*num)
return np.append(coords, w, axis=1)
def cartesian(coords):
""" Convert 2D homogeneus/projective coordinates to cartesian. """
return coords[:, :2]
def translate(x, y):
""" Return translation matrix. """
return np.array([
[1, 0, x],
[0, 1, y],
[0, 0, 1],
])
def rotate(a):
""" Return rotation matrix. """
return np.array([
[np.cos(a), -np.sin(a), 0],
[np.sin(a), np.cos(a), 0],
[0, 0, 1]
])
def transform_list(coords, matrix):
""" Apply transformation to a list of coordinates. """
return matrix.dot(coords.T).T
def transform_apply(coords, transforms):
""" Apply list of transformations to a list of coordinates. """
out = projective(coords)
for transform in transforms:
out = transform_list(out, transform)
return cartesian(out)
|
[
"numpy.append",
"numpy.array",
"numpy.cos",
"numpy.sin",
"numpy.shape"
] |
[((154, 175), 'numpy.array', 'np.array', (['([[1]] * num)'], {}), '([[1]] * num)\n', (162, 175), True, 'import numpy as np\n'), ((188, 216), 'numpy.append', 'np.append', (['coords', 'w'], {'axis': '(1)'}), '(coords, w, axis=1)\n', (197, 216), True, 'import numpy as np\n'), ((411, 454), 'numpy.array', 'np.array', (['[[1, 0, x], [0, 1, y], [0, 0, 1]]'], {}), '([[1, 0, x], [0, 1, y], [0, 0, 1]])\n', (419, 454), True, 'import numpy as np\n'), ((126, 142), 'numpy.shape', 'np.shape', (['coords'], {}), '(coords)\n', (134, 142), True, 'import numpy as np\n'), ((571, 580), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (577, 580), True, 'import numpy as np\n'), ((607, 616), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (613, 616), True, 'import numpy as np\n'), ((618, 627), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (624, 627), True, 'import numpy as np\n'), ((583, 592), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (589, 592), True, 'import numpy as np\n')]
|
import argparse
import os
import math
# import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
import progressbar
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import test_utils
from waymo_open_dataset.utils import box_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
from adapter_lib import *
import pdb
############################Config###########################################
# path to waymo dataset "folder" (all .tfrecord files in that folder will
# be converted)
DATA_PATH = '/media/trail/harddrive/datasets/Waymo/original/validation'
# path to save kitti dataset
KITTI_PATH = '/media/trail/harddrive/datasets/Waymo/waymo/validation'
# location filter, use this to convert your preferred location
LOCATION_FILTER = False
LOCATION_NAME = ['location_sf']
# max indexing length
INDEX_LENGTH = 15
# as name
IMAGE_FORMAT = 'png'
# do not change
LABEL_PATH = KITTI_PATH + '/label_0'
LABEL_ALL_PATH = KITTI_PATH + '/label_all'
IMAGE_PATH = KITTI_PATH + '/image_0'
CALIB_PATH = KITTI_PATH + '/calib'
LIDAR_PATH = KITTI_PATH + '/velodyne'
IMG_CALIB_PATH = KITTI_PATH + '/img_calib'
###############################################################################
class Adapter:
def __init__(self):
self.__lidar_list = ['_FRONT', '_FRONT_RIGHT',
'_FRONT_LEFT', '_SIDE_RIGHT', '_SIDE_LEFT']
self.__type_list = ['UNKNOWN', 'VEHICLE',
'PEDESTRIAN', 'SIGN', 'CYCLIST']
self.__file_names = []
self.T_front_cam_to_ref = []
self.T_vehicle_to_front_cam = []
def cvt(self, args, folder, start_ind):
""" convert dataset from Waymo to KITTI
Args:
return:
"""
self.start_ind = start_ind
self.get_file_names(DATA_PATH + '/' + folder)
print("Converting ..." + folder)
self.create_folder(args.camera_type)
bar = progressbar.ProgressBar(maxval=len(self.__file_names) + 1,
widgets=[progressbar.Percentage(), ' ',
progressbar.Bar(
marker='>', left='[', right=']'), ' ',
progressbar.ETA()])
tf.enable_eager_execution()
file_num = 1
frame_num = 0
frame_name = self.start_ind
label_exists = False
print("start converting ...")
bar.start()
for file_idx, file_name in enumerate(self.__file_names):
print('File {}/{}'.format(file_idx, len(self.__file_names)))
dataset = tf.data.TFRecordDataset(file_name, compression_type='')
for data in dataset:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
if (frame_num % args.keyframe) == 0:
if LOCATION_FILTER == True and frame.context.stats.location not in LOCATION_NAME:
continue
if args.test == False:
label_exists = self.save_label(frame, frame_name, args.camera_type, False, True)
if args.test == label_exists:
frame_num += 1
continue
self.save_calib(frame, frame_name)
self.save_label(
frame, frame_name, args.camera_type)
self.save_image(frame, frame_name, args.camera_type)
self.save_lidar(frame, frame_name)
self.save_image_calib(frame, frame_name)
# print("image:{}\ncalib:{}\nlidar:{}\nlabel:{}\n".format(str(s1-e1),str(s2-e2),str(s3-e3),str(s4-e4)))
frame_name += 1
frame_num += 1
bar.update(file_num)
file_num += 1
bar.finish()
print("\nfinished ...")
return frame_name
def save_image(self, frame, frame_num, cam_type):
""" parse and save the images in png format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
for img in frame.images:
if cam_type == 'all' or cam_type == str(img.name - 1):
img_path = IMAGE_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.' + IMAGE_FORMAT
img = cv2.imdecode(np.frombuffer(
img.image, np.uint8), cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
plt.imsave(img_path, rgb_img, format=IMAGE_FORMAT)
def save_calib(self, frame, frame_num, kitti_format=True):
""" parse and save the calibration data
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
fp_calib = open(CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
self.T_front_cam_to_ref = np.array([
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0],
[1.0, 0.0, 0.0]
])
camera_calib = []
R0_rect = ["%e" % i for i in np.eye(3).flatten()]
Tr_velo_to_cam = []
calib_context = ''
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = self.cart_to_homo(self.T_front_cam_to_ref) @ np.linalg.inv(tmp)
Tr_velo_to_cam.append(["%e" % i for i in tmp[:3,:].reshape(12)])
for cam in frame.context.camera_calibrations:
tmp = np.zeros((3, 4))
tmp[0, 0] = cam.intrinsic[0]
tmp[1, 1] = cam.intrinsic[1]
tmp[0, 2] = cam.intrinsic[2]
tmp[1, 2] = cam.intrinsic[3]
tmp[2, 2] = 1
tmp = list(tmp.reshape(12))
tmp = ["%e" % i for i in tmp]
camera_calib.append(tmp)
T_front_cam_to_vehicle = np.array(frame.context.camera_calibrations[0].extrinsic.transform).reshape(4, 4)
self.T_vehicle_to_front_cam = np.linalg.inv(T_front_cam_to_vehicle)
for i in range(5):
calib_context += "P" + str(i) + ": " + \
" ".join(camera_calib[i]) + '\n'
calib_context += "R0_rect" + ": " + " ".join(R0_rect) + '\n'
for i in range(5):
calib_context += "Tr_velo_to_cam_" + \
str(i) + ": " + " ".join(Tr_velo_to_cam[i]) + '\n'
calib_context += "timestamp_micros: " + \
str(frame.timestamp_micros) + '\n'
calib_context += "context_name: " + str(frame.context.name) + '\n'
fp_calib.write(calib_context)
fp_calib.close()
def save_lidar(self, frame, frame_num):
""" parse and save the lidar data in psd format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = np.concatenate(points, axis=0)
intensity_all = np.concatenate(intensity, axis=0)
point_cloud = np.column_stack((points_all, intensity_all))
pc_path = LIDAR_PATH + '/' + \
str(frame_num).zfill(INDEX_LENGTH) + '.bin'
point_cloud.tofile(pc_path)
def save_label(self, frame, frame_num, cam_type, kitti_format=False, check_label_exists = False):
""" parse and save the label data in .txt format
:param frame: open dataset frame proto
:param frame_num: the current frame number
:return:
"""
# get point cloud in the frame
range_images, range_image_top_pose = self.parse_range_image_and_camera_projection(
frame)
points, intensity = self.convert_range_image_to_point_cloud(
frame,
range_images,
range_image_top_pose)
points_all = tf.convert_to_tensor(
np.concatenate(points, axis=0), dtype=np.float32)
# preprocess bounding box data
id_to_bbox = dict()
id_to_name = dict()
for labels in frame.projected_lidar_labels:
name = labels.name
for label in labels.labels:
bbox = [label.box.center_x - label.box.length / 2, label.box.center_y - label.box.width / 2,
label.box.center_x + label.box.length / 2, label.box.center_y + label.box.width / 2]
id_to_bbox[label.id] = bbox
id_to_name[label.id] = name - 1
Tr_velo_to_cam = []
recorded_label = []
label_lines = ''
label_all_lines = ''
"""
if kitti_format:
for camera in frame.context.camera_calibrations:
tmp = np.array(camera.extrinsic.transform).reshape(4, 4)
tmp = np.linalg.inv(tmp)
axes_transformation = np.array([[0, -1, 0, 0],
[0, 0, -1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]])
tmp = np.matmul(axes_transformation, tmp)
Tr_velo_to_cam.append(tmp)
"""
for obj in frame.laser_labels:
# caculate bounding box
bounding_box = None
name = None
id = obj.id
for lidar in self.__lidar_list:
if id + lidar in id_to_bbox:
bounding_box = id_to_bbox.get(id + lidar)
name = str(id_to_name.get(id + lidar))
break
if bounding_box == None or name == None:
continue
box = tf.convert_to_tensor(
[obj.box.center_x, obj.box.center_y, obj.box.center_z, obj.box.length, obj.box.width, obj.box.height, obj.box.heading], dtype=np.float32)
box = tf.reshape(box, (1, 7))
num_points = box_utils.compute_num_points_in_box_3d(
points_all, box)
num_points = num_points.numpy()[0]
detection_difficulty = obj.detection_difficulty_level
my_type = self.__type_list[obj.type]
truncated = 0
occluded = 0
height = obj.box.height
width = obj.box.width
length = obj.box.length
x = obj.box.center_x
y = obj.box.center_y
z = obj.box.center_z - height/2
if check_label_exists == False:
pt_ref = self.cart_to_homo(self.T_front_cam_to_ref) @ self.T_vehicle_to_front_cam @ np.array([x,y,z,1]).reshape((4,1))
x, y, z, _ = pt_ref.flatten().tolist()
rotation_y = -obj.box.heading - np.pi/2
beta = math.atan2(x, z)
alpha = (rotation_y + beta - math.pi / 2) % (2 * math.pi)
# save the labels
line = my_type + ' {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format(round(truncated, 2),
occluded,
round(
alpha, 2),
round(
bounding_box[0], 2),
round(
bounding_box[1], 2),
round(
bounding_box[2], 2),
round(
bounding_box[3], 2),
round(
height, 2),
round(
width, 2),
round(
length, 2),
round(
x, 2),
round(
y, 2),
round(
z, 2),
round(
rotation_y, 2),
num_points,
detection_difficulty)
line_all = line[:-1] + ' ' + name + '\n'
# store the label
label_all_lines += line_all
if (name == cam_type):
label_lines += line
recorded_label.append(line)
if len(recorded_label) == 0:
return False
else:
fp_label_all = open(LABEL_ALL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label = open(LABEL_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
fp_label.write(label_lines)
fp_label.close()
fp_label_all.write(label_all_lines)
fp_label_all.close()
return True
def save_image_calib(self, frame, frame_num):
fp_image_calib = open(IMG_CALIB_PATH + '/' +
str(frame_num).zfill(INDEX_LENGTH) + '.txt', 'w+')
camera_calib = []
pose = []
velocity = []
timestamp = []
shutter = []
trigger_time = []
readout_done_time = []
calib_context = ''
for camera in frame.images:
tmp = np.array(camera.pose.transform).reshape((16,))
pose.append(["%e" % i for i in tmp])
tmp = np.zeros(6)
tmp[0] = camera.velocity.v_x
tmp[1] = camera.velocity.v_y
tmp[2] = camera.velocity.v_z
tmp[3] = camera.velocity.w_x
tmp[4] = camera.velocity.w_y
tmp[5] = camera.velocity.w_z
velocity.append(["%e" % i for i in tmp])
timestamp.append(camera.pose_timestamp)
shutter.append(camera.shutter)
trigger_time.append(camera.camera_trigger_time)
readout_done_time.append(camera.camera_readout_done_time)
for i in range(5):
calib_context += "Pose_" + str(i) + ": " + \
" ".join(pose[i]) + '\n'
for i in range(5):
calib_context += "Velocity_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Timestamp_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Shutter_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Trigger_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
for i in range(5):
calib_context += "Readout_" + str(i) + ": " + \
" ".join(velocity[i]) + '\n'
fp_image_calib.write(calib_context)
fp_image_calib.close()
def get_file_names(self, folder):
for i in os.listdir(folder):
if i.split('.')[-1] == 'tfrecord':
self.__file_names.append(folder + '/' + i)
def cart_to_homo(self, mat):
ret = np.eye(4)
if mat.shape == (3, 3):
ret[:3, :3] = mat
elif mat.shape == (3, 4):
ret[:3, :] = mat
else:
raise ValueError(mat.shape)
return ret
def create_folder(self, cam_type):
if not os.path.exists(KITTI_PATH):
os.mkdir(KITTI_PATH)
if not os.path.exists(CALIB_PATH):
os.mkdir(CALIB_PATH)
if not os.path.exists(LIDAR_PATH):
os.mkdir(LIDAR_PATH)
if not os.path.exists(LABEL_ALL_PATH):
os.mkdir(LABEL_ALL_PATH)
if not os.path.exists(IMG_CALIB_PATH):
os.mkdir(IMG_CALIB_PATH)
if not os.path.exists(IMAGE_PATH):
os.mkdir(IMAGE_PATH)
if not os.path.exists(LABEL_PATH):
os.mkdir(LABEL_PATH)
def extract_intensity(self, frame, range_images, lidar_num):
""" extract the intensity from the original range image
:param frame: open dataset frame proto
:param frame_num: the current frame number
:param lidar_num: the number of current lidar
:return:
"""
intensity_0 = np.array(range_images[lidar_num][0].data).reshape(-1, 4)
intensity_0 = intensity_0[:, 1]
intensity_1 = np.array(range_images[lidar_num][
1].data).reshape(-1, 4)[:, 1]
return intensity_0, intensity_1
def image_show(self, data, name, layout, cmap=None):
"""Show an image."""
plt.subplot(*layout)
plt.imshow(tf.image.decode_jpeg(data), cmap=cmap)
plt.title(name)
plt.grid(False)
plt.axis('off')
def parse_range_image_and_camera_projection(self, frame):
"""Parse range images and camera projections given a frame.
Args:
frame: open dataset frame proto
Returns:
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
"""
self.__range_images = {}
# camera_projections = {}
# range_image_top_pose = None
for laser in frame.lasers:
if len(laser.ri_return1.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name] = [ri]
if laser.name == open_dataset.LaserName.TOP:
range_image_top_pose_str_tensor = tf.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
range_image_top_pose = open_dataset.MatrixFloat()
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy()))
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return1.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name] = [cp]
if len(laser.ri_return2.range_image_compressed) > 0:
range_image_str_tensor = tf.decode_compressed(
laser.ri_return2.range_image_compressed, 'ZLIB')
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
self.__range_images[laser.name].append(ri)
#
# camera_projection_str_tensor = tf.decode_compressed(
# laser.ri_return2.camera_projection_compressed, 'ZLIB')
# cp = open_dataset.MatrixInt32()
# cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
# camera_projections[laser.name].append(cp)
return self.__range_images, range_image_top_pose
def plot_range_image_helper(self, data, name, layout, vmin=0, vmax=1, cmap='gray'):
"""Plots range image.
Args:
data: range image data
name: the image title
layout: plt layout
vmin: minimum value of the passed data
vmax: maximum value of the passed data
cmap: color map
"""
plt.subplot(*layout)
plt.imshow(data, cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(name)
plt.grid(False)
plt.axis('off')
def get_range_image(self, laser_name, return_index):
"""Returns range image given a laser name and its return index."""
return self.__range_images[laser_name][return_index]
def show_range_image(self, range_image, layout_index_start=1):
"""Shows range image.
Args:
range_image: the range image data from a given lidar of type MatrixFloat.
layout_index_start: layout offset
"""
range_image_tensor = tf.convert_to_tensor(range_image.data)
range_image_tensor = tf.reshape(
range_image_tensor, range_image.shape.dims)
lidar_image_mask = tf.greater_equal(range_image_tensor, 0)
range_image_tensor = tf.where(lidar_image_mask, range_image_tensor,
tf.ones_like(range_image_tensor) * 1e10)
range_image_range = range_image_tensor[..., 0]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
self.plot_range_image_helper(range_image_range.numpy(), 'range',
[8, 1, layout_index_start], vmax=75, cmap='gray')
self.plot_range_image_helper(range_image_intensity.numpy(), 'intensity',
[8, 1, layout_index_start + 1], vmax=1.5, cmap='gray')
self.plot_range_image_helper(range_image_elongation.numpy(), 'elongation',
[8, 1, layout_index_start + 2], vmax=1.5, cmap='gray')
def convert_range_image_to_point_cloud(self, frame, range_images, range_image_top_pose, ri_index=0):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name,
[range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
intensity: {[N, 1]} list of intensity of length 5 (number of lidars).
"""
calibrations = sorted(
frame.context.laser_calibrations, key=lambda c: c.name)
# lasers = sorted(frame.lasers, key=lambda laser: laser.name)
points = []
# cp_points = []
intensity = []
frame_pose = tf.convert_to_tensor(
np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data),
range_image_top_pose.shape.dims)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[...,
0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[
..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0:
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min,
c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == open_dataset.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(
beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
intensity_tensor = tf.gather_nd(range_image_tensor,
tf.where(range_image_mask))
# cp = camera_projections[c.name][0]
# cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
# cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points.append(points_tensor.numpy())
# cp_points.append(cp_points_tensor.numpy())
intensity.append(intensity_tensor.numpy()[:, 1])
return points, intensity
def rgba(self, r):
"""Generates a color based on range.
Args:
r: the range value of a given point.
Returns:
The color for a given range
"""
c = plt.get_cmap('jet')((r % 20.0) / 20.0)
c = list(c)
c[-1] = 0.5 # alpha
return c
def plot_image(self, camera_image):
"""Plot a cmaera image."""
plt.figure(figsize=(20, 12))
plt.imshow(tf.image.decode_jpeg(camera_image.image))
plt.grid("off")
def plot_points_on_image(self, projected_points, camera_image, rgba_func, point_size=5.0):
"""Plots points on a camera image.
Args:
projected_points: [N, 3] numpy array. The inner dims are
[camera_x, camera_y, range].
camera_image: jpeg encoded camera image.
rgba_func: a function that generates a color from a range value.
point_size: the point size.
"""
self.plot_image(camera_image)
xs = []
ys = []
colors = []
for point in projected_points:
xs.append(point[0]) # width, col
ys.append(point[1]) # height, row
colors.append(rgba_func(point[2]))
plt.scatter(xs, ys, c=colors, s=point_size, edgecolors="none")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Save Waymo dataset into Kitti format')
parser.add_argument('--keyframe',
type=int,
default=10,
help='Saves every specified # of scenes. Default is 1 and the program saves every scene')
parser.add_argument('--camera_type',
type=str,
default="0",
help='Select camera views to save. Input argument from 0 to 4 or all')
parser.add_argument('--start_ind',
type=int,
default=0,
help='File number starts counting from this index')
parser.add_argument('--test',
type=bool,
default=False,
help='if true, does not save any ground truth data')
args = parser.parse_args()
start_ind = args.start_ind
path, dirs, files = next(os.walk(DATA_PATH))
dirs.sort()
for directory in dirs:
adapter = Adapter()
last_ind = adapter.cvt(args, directory, start_ind)
start_ind = last_ind
|
[
"matplotlib.pyplot.grid",
"tensorflow.enable_eager_execution",
"numpy.column_stack",
"numpy.array",
"progressbar.Percentage",
"tensorflow.ones_like",
"os.walk",
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.mkdir",
"numpy.concatenate",
"matplotlib.pyplot.scatter",
"tensorflow.convert_to_tensor",
"waymo_open_dataset.utils.transform_utils.get_transform",
"matplotlib.pyplot.axis",
"numpy.frombuffer",
"waymo_open_dataset.utils.box_utils.compute_num_points_in_box_3d",
"numpy.eye",
"waymo_open_dataset.utils.transform_utils.get_rotation_matrix",
"waymo_open_dataset.dataset_pb2.Frame",
"tensorflow.reverse",
"waymo_open_dataset.dataset_pb2.MatrixFloat",
"tensorflow.where",
"math.atan2",
"cv2.cvtColor",
"tensorflow.reshape",
"progressbar.ETA",
"matplotlib.pyplot.title",
"tensorflow.expand_dims",
"matplotlib.pyplot.get_cmap",
"tensorflow.data.TFRecordDataset",
"progressbar.Bar",
"tensorflow.decode_compressed",
"matplotlib.pyplot.imsave",
"matplotlib.pyplot.figure",
"numpy.linalg.inv",
"numpy.zeros",
"tensorflow.constant",
"matplotlib.pyplot.subplot",
"tensorflow.squeeze",
"tensorflow.greater_equal",
"tensorflow.image.decode_jpeg"
] |
[((27285, 27360), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Save Waymo dataset into Kitti format"""'}), "(description='Save Waymo dataset into Kitti format')\n", (27308, 27360), False, 'import argparse\n'), ((2400, 2427), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (2425, 2427), True, 'import tensorflow as tf\n'), ((5226, 5289), 'numpy.array', 'np.array', (['[[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]]'], {}), '([[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]])\n', (5234, 5289), True, 'import numpy as np\n'), ((6314, 6351), 'numpy.linalg.inv', 'np.linalg.inv', (['T_front_cam_to_vehicle'], {}), '(T_front_cam_to_vehicle)\n', (6327, 6351), True, 'import numpy as np\n'), ((7471, 7501), 'numpy.concatenate', 'np.concatenate', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (7485, 7501), True, 'import numpy as np\n'), ((7526, 7559), 'numpy.concatenate', 'np.concatenate', (['intensity'], {'axis': '(0)'}), '(intensity, axis=0)\n', (7540, 7559), True, 'import numpy as np\n'), ((7582, 7626), 'numpy.column_stack', 'np.column_stack', (['(points_all, intensity_all)'], {}), '((points_all, intensity_all))\n', (7597, 7626), True, 'import numpy as np\n'), ((15130, 15148), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (15140, 15148), False, 'import os\n'), ((15304, 15313), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (15310, 15313), True, 'import numpy as np\n'), ((16826, 16846), 'matplotlib.pyplot.subplot', 'plt.subplot', (['*layout'], {}), '(*layout)\n', (16837, 16846), True, 'import matplotlib.pyplot as plt\n'), ((16913, 16928), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (16922, 16928), True, 'import matplotlib.pyplot as plt\n'), ((16937, 16952), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (16945, 16952), True, 'import matplotlib.pyplot as plt\n'), ((16961, 16976), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (16969, 16976), True, 'import matplotlib.pyplot as plt\n'), ((19991, 20011), 'matplotlib.pyplot.subplot', 'plt.subplot', (['*layout'], {}), '(*layout)\n', (20002, 20011), True, 'import matplotlib.pyplot as plt\n'), ((20020, 20069), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data'], {'cmap': 'cmap', 'vmin': 'vmin', 'vmax': 'vmax'}), '(data, cmap=cmap, vmin=vmin, vmax=vmax)\n', (20030, 20069), True, 'import matplotlib.pyplot as plt\n'), ((20078, 20093), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (20087, 20093), True, 'import matplotlib.pyplot as plt\n'), ((20102, 20117), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (20110, 20117), True, 'import matplotlib.pyplot as plt\n'), ((20126, 20141), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (20134, 20141), True, 'import matplotlib.pyplot as plt\n'), ((20617, 20655), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['range_image.data'], {}), '(range_image.data)\n', (20637, 20655), True, 'import tensorflow as tf\n'), ((20685, 20739), 'tensorflow.reshape', 'tf.reshape', (['range_image_tensor', 'range_image.shape.dims'], {}), '(range_image_tensor, range_image.shape.dims)\n', (20695, 20739), True, 'import tensorflow as tf\n'), ((20780, 20819), 'tensorflow.greater_equal', 'tf.greater_equal', (['range_image_tensor', '(0)'], {}), '(range_image_tensor, 0)\n', (20796, 20819), True, 'import tensorflow as tf\n'), ((23160, 23310), 'waymo_open_dataset.utils.transform_utils.get_rotation_matrix', 'transform_utils.get_rotation_matrix', (['range_image_top_pose_tensor[..., 0]', 'range_image_top_pose_tensor[..., 1]', 'range_image_top_pose_tensor[..., 2]'], {}), '(range_image_top_pose_tensor[..., 0],\n range_image_top_pose_tensor[..., 1], range_image_top_pose_tensor[..., 2])\n', (23195, 23310), False, 'from waymo_open_dataset.utils import transform_utils\n'), ((23510, 23622), 'waymo_open_dataset.utils.transform_utils.get_transform', 'transform_utils.get_transform', (['range_image_top_pose_tensor_rotation', 'range_image_top_pose_tensor_translation'], {}), '(range_image_top_pose_tensor_rotation,\n range_image_top_pose_tensor_translation)\n', (23539, 23622), False, 'from waymo_open_dataset.utils import transform_utils\n'), ((26349, 26377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 12)'}), '(figsize=(20, 12))\n', (26359, 26377), True, 'import matplotlib.pyplot as plt\n'), ((26447, 26462), 'matplotlib.pyplot.grid', 'plt.grid', (['"""off"""'], {}), "('off')\n", (26455, 26462), True, 'import matplotlib.pyplot as plt\n'), ((27180, 27242), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'c': 'colors', 's': 'point_size', 'edgecolors': '"""none"""'}), "(xs, ys, c=colors, s=point_size, edgecolors='none')\n", (27191, 27242), True, 'import matplotlib.pyplot as plt\n'), ((28259, 28277), 'os.walk', 'os.walk', (['DATA_PATH'], {}), '(DATA_PATH)\n', (28266, 28277), False, 'import os\n'), ((2754, 2809), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['file_name'], {'compression_type': '""""""'}), "(file_name, compression_type='')\n", (2777, 2809), True, 'import tensorflow as tf\n'), ((5835, 5851), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (5843, 5851), True, 'import numpy as np\n'), ((8436, 8466), 'numpy.concatenate', 'np.concatenate', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (8450, 8466), True, 'import numpy as np\n'), ((10205, 10372), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[obj.box.center_x, obj.box.center_y, obj.box.center_z, obj.box.length, obj.\n box.width, obj.box.height, obj.box.heading]'], {'dtype': 'np.float32'}), '([obj.box.center_x, obj.box.center_y, obj.box.center_z,\n obj.box.length, obj.box.width, obj.box.height, obj.box.heading], dtype=\n np.float32)\n', (10225, 10372), True, 'import tensorflow as tf\n'), ((10399, 10422), 'tensorflow.reshape', 'tf.reshape', (['box', '(1, 7)'], {}), '(box, (1, 7))\n', (10409, 10422), True, 'import tensorflow as tf\n'), ((10448, 10503), 'waymo_open_dataset.utils.box_utils.compute_num_points_in_box_3d', 'box_utils.compute_num_points_in_box_3d', (['points_all', 'box'], {}), '(points_all, box)\n', (10486, 10503), False, 'from waymo_open_dataset.utils import box_utils\n'), ((11271, 11287), 'math.atan2', 'math.atan2', (['x', 'z'], {}), '(x, z)\n', (11281, 11287), False, 'import math\n'), ((13674, 13685), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (13682, 13685), True, 'import numpy as np\n'), ((15571, 15597), 'os.path.exists', 'os.path.exists', (['KITTI_PATH'], {}), '(KITTI_PATH)\n', (15585, 15597), False, 'import os\n'), ((15611, 15631), 'os.mkdir', 'os.mkdir', (['KITTI_PATH'], {}), '(KITTI_PATH)\n', (15619, 15631), False, 'import os\n'), ((15647, 15673), 'os.path.exists', 'os.path.exists', (['CALIB_PATH'], {}), '(CALIB_PATH)\n', (15661, 15673), False, 'import os\n'), ((15687, 15707), 'os.mkdir', 'os.mkdir', (['CALIB_PATH'], {}), '(CALIB_PATH)\n', (15695, 15707), False, 'import os\n'), ((15723, 15749), 'os.path.exists', 'os.path.exists', (['LIDAR_PATH'], {}), '(LIDAR_PATH)\n', (15737, 15749), False, 'import os\n'), ((15763, 15783), 'os.mkdir', 'os.mkdir', (['LIDAR_PATH'], {}), '(LIDAR_PATH)\n', (15771, 15783), False, 'import os\n'), ((15799, 15829), 'os.path.exists', 'os.path.exists', (['LABEL_ALL_PATH'], {}), '(LABEL_ALL_PATH)\n', (15813, 15829), False, 'import os\n'), ((15843, 15867), 'os.mkdir', 'os.mkdir', (['LABEL_ALL_PATH'], {}), '(LABEL_ALL_PATH)\n', (15851, 15867), False, 'import os\n'), ((15883, 15913), 'os.path.exists', 'os.path.exists', (['IMG_CALIB_PATH'], {}), '(IMG_CALIB_PATH)\n', (15897, 15913), False, 'import os\n'), ((15927, 15951), 'os.mkdir', 'os.mkdir', (['IMG_CALIB_PATH'], {}), '(IMG_CALIB_PATH)\n', (15935, 15951), False, 'import os\n'), ((15967, 15993), 'os.path.exists', 'os.path.exists', (['IMAGE_PATH'], {}), '(IMAGE_PATH)\n', (15981, 15993), False, 'import os\n'), ((16007, 16027), 'os.mkdir', 'os.mkdir', (['IMAGE_PATH'], {}), '(IMAGE_PATH)\n', (16015, 16027), False, 'import os\n'), ((16043, 16069), 'os.path.exists', 'os.path.exists', (['LABEL_PATH'], {}), '(LABEL_PATH)\n', (16057, 16069), False, 'import os\n'), ((16083, 16103), 'os.mkdir', 'os.mkdir', (['LABEL_PATH'], {}), '(LABEL_PATH)\n', (16091, 16103), False, 'import os\n'), ((16866, 16892), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['data'], {}), '(data)\n', (16886, 16892), True, 'import tensorflow as tf\n'), ((22996, 23043), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['range_image_top_pose.data'], {}), '(range_image_top_pose.data)\n', (23016, 23043), True, 'import tensorflow as tf\n'), ((24143, 24183), 'tensorflow.reverse', 'tf.reverse', (['beam_inclinations'], {'axis': '[-1]'}), '(beam_inclinations, axis=[-1])\n', (24153, 24183), True, 'import tensorflow as tf\n'), ((25222, 25263), 'tensorflow.squeeze', 'tf.squeeze', (['range_image_cartesian'], {'axis': '(0)'}), '(range_image_cartesian, axis=0)\n', (25232, 25263), True, 'import tensorflow as tf\n'), ((26160, 26179), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (26172, 26179), True, 'import matplotlib.pyplot as plt\n'), ((26397, 26437), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['camera_image.image'], {}), '(camera_image.image)\n', (26417, 26437), True, 'import tensorflow as tf\n'), ((2867, 2887), 'waymo_open_dataset.dataset_pb2.Frame', 'open_dataset.Frame', ([], {}), '()\n', (2885, 2887), True, 'from waymo_open_dataset import dataset_pb2 as open_dataset\n'), ((4706, 4742), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (4718, 4742), False, 'import cv2\n'), ((4759, 4809), 'matplotlib.pyplot.imsave', 'plt.imsave', (['img_path', 'rgb_img'], {'format': 'IMAGE_FORMAT'}), '(img_path, rgb_img, format=IMAGE_FORMAT)\n', (4769, 4809), True, 'import matplotlib.pyplot as plt\n'), ((5666, 5684), 'numpy.linalg.inv', 'np.linalg.inv', (['tmp'], {}), '(tmp)\n', (5679, 5684), True, 'import numpy as np\n'), ((6195, 6261), 'numpy.array', 'np.array', (['frame.context.camera_calibrations[0].extrinsic.transform'], {}), '(frame.context.camera_calibrations[0].extrinsic.transform)\n', (6203, 6261), True, 'import numpy as np\n'), ((16477, 16518), 'numpy.array', 'np.array', (['range_images[lidar_num][0].data'], {}), '(range_images[lidar_num][0].data)\n', (16485, 16518), True, 'import numpy as np\n'), ((17786, 17855), 'tensorflow.decode_compressed', 'tf.decode_compressed', (['laser.ri_return1.range_image_compressed', '"""ZLIB"""'], {}), "(laser.ri_return1.range_image_compressed, 'ZLIB')\n", (17806, 17855), True, 'import tensorflow as tf\n'), ((17898, 17924), 'waymo_open_dataset.dataset_pb2.MatrixFloat', 'open_dataset.MatrixFloat', ([], {}), '()\n', (17922, 17924), True, 'from waymo_open_dataset import dataset_pb2 as open_dataset\n'), ((18925, 18994), 'tensorflow.decode_compressed', 'tf.decode_compressed', (['laser.ri_return2.range_image_compressed', '"""ZLIB"""'], {}), "(laser.ri_return2.range_image_compressed, 'ZLIB')\n", (18945, 18994), True, 'import tensorflow as tf\n'), ((19037, 19063), 'waymo_open_dataset.dataset_pb2.MatrixFloat', 'open_dataset.MatrixFloat', ([], {}), '()\n', (19061, 19063), True, 'from waymo_open_dataset import dataset_pb2 as open_dataset\n'), ((20934, 20966), 'tensorflow.ones_like', 'tf.ones_like', (['range_image_tensor'], {}), '(range_image_tensor)\n', (20946, 20966), True, 'import tensorflow as tf\n'), ((22873, 22903), 'numpy.array', 'np.array', (['frame.pose.transform'], {}), '(frame.pose.transform)\n', (22881, 22903), True, 'import numpy as np\n'), ((24077, 24109), 'tensorflow.constant', 'tf.constant', (['c.beam_inclinations'], {}), '(c.beam_inclinations)\n', (24088, 24109), True, 'import tensorflow as tf\n'), ((24219, 24250), 'numpy.array', 'np.array', (['c.extrinsic.transform'], {}), '(c.extrinsic.transform)\n', (24227, 24250), True, 'import numpy as np\n'), ((24322, 24360), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['range_image.data'], {}), '(range_image.data)\n', (24342, 24360), True, 'import tensorflow as tf\n'), ((24609, 24649), 'tensorflow.expand_dims', 'tf.expand_dims', (['pixel_pose_local'], {'axis': '(0)'}), '(pixel_pose_local, axis=0)\n', (24623, 24649), True, 'import tensorflow as tf\n'), ((24685, 24719), 'tensorflow.expand_dims', 'tf.expand_dims', (['frame_pose'], {'axis': '(0)'}), '(frame_pose, axis=0)\n', (24699, 24719), True, 'import tensorflow as tf\n'), ((24890, 24940), 'tensorflow.expand_dims', 'tf.expand_dims', (['range_image_tensor[..., 0]'], {'axis': '(0)'}), '(range_image_tensor[..., 0], axis=0)\n', (24904, 24940), True, 'import tensorflow as tf\n'), ((24958, 24991), 'tensorflow.expand_dims', 'tf.expand_dims', (['extrinsic'], {'axis': '(0)'}), '(extrinsic, axis=0)\n', (24972, 24991), True, 'import tensorflow as tf\n'), ((25369, 25395), 'tensorflow.where', 'tf.where', (['range_image_mask'], {}), '(range_image_mask)\n', (25377, 25395), True, 'import tensorflow as tf\n'), ((25505, 25531), 'tensorflow.where', 'tf.where', (['range_image_mask'], {}), '(range_image_mask)\n', (25513, 25531), True, 'import tensorflow as tf\n'), ((2139, 2163), 'progressbar.Percentage', 'progressbar.Percentage', ([], {}), '()\n', (2161, 2163), False, 'import progressbar\n'), ((2217, 2265), 'progressbar.Bar', 'progressbar.Bar', ([], {'marker': '""">"""', 'left': '"""["""', 'right': '"""]"""'}), "(marker='>', left='[', right=']')\n", (2232, 2265), False, 'import progressbar\n'), ((2371, 2388), 'progressbar.ETA', 'progressbar.ETA', ([], {}), '()\n', (2386, 2388), False, 'import progressbar\n'), ((4605, 4639), 'numpy.frombuffer', 'np.frombuffer', (['img.image', 'np.uint8'], {}), '(img.image, np.uint8)\n', (4618, 4639), True, 'import numpy as np\n'), ((5552, 5588), 'numpy.array', 'np.array', (['camera.extrinsic.transform'], {}), '(camera.extrinsic.transform)\n', (5560, 5588), True, 'import numpy as np\n'), ((13560, 13591), 'numpy.array', 'np.array', (['camera.pose.transform'], {}), '(camera.pose.transform)\n', (13568, 13591), True, 'import numpy as np\n'), ((16596, 16637), 'numpy.array', 'np.array', (['range_images[lidar_num][1].data'], {}), '(range_images[lidar_num][1].data)\n', (16604, 16637), True, 'import numpy as np\n'), ((18174, 18248), 'tensorflow.decode_compressed', 'tf.decode_compressed', (['laser.ri_return1.range_image_pose_compressed', '"""ZLIB"""'], {}), "(laser.ri_return1.range_image_pose_compressed, 'ZLIB')\n", (18194, 18248), True, 'import tensorflow as tf\n'), ((18317, 18343), 'waymo_open_dataset.dataset_pb2.MatrixFloat', 'open_dataset.MatrixFloat', ([], {}), '()\n', (18341, 18343), True, 'from waymo_open_dataset import dataset_pb2 as open_dataset\n'), ((23873, 23934), 'tensorflow.constant', 'tf.constant', (['[c.beam_inclination_min, c.beam_inclination_max]'], {}), '([c.beam_inclination_min, c.beam_inclination_max])\n', (23884, 23934), True, 'import tensorflow as tf\n'), ((25024, 25063), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['beam_inclinations'], {}), '(beam_inclinations)\n', (25044, 25063), True, 'import tensorflow as tf\n'), ((5400, 5409), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5406, 5409), True, 'import numpy as np\n'), ((11107, 11129), 'numpy.array', 'np.array', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (11115, 11129), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 13:52:52 2022
@author: sarangbhagwat
"""
from biorefineries.TAL.system_TAL_adsorption_glucose import *
from matplotlib import pyplot as plt
import numpy as np
column = AC401
#%% Across regeneration fluid velocity and cycle time
def MPSP_at_adsorption_design(v, t):
column.regeneration_velocity = v
column.cycle_time = t
return get_SA_MPSP(), AC401.installed_cost/1e6
regen_vels = np.linspace(3., 20., 40)
cycle_times = np.linspace(1., 4., 40)
MPSPs_ads_ds = []
column_costs_ads_r_t = []
#%%
for i in regen_vels:
MPSPs_ads_ds.append([])
column_costs_ads_r_t.append([])
for j in cycle_times:
MPSP, cost = None, None
try:
MPSP, cost = MPSP_at_adsorption_design(i, j)
except:
print(i, j)
MPSP, cost = np.nan, np.nan
MPSPs_ads_ds[-1].append(MPSP)
column_costs_ads_r_t[-1].append(cost)
#%% Set parameters to optimal
min_MPSP = np.min(MPSPs_ads_ds)
opt_indices = np.where(MPSPs_ads_ds==min_MPSP)
column.regeneration_velocity = regen_vels[opt_indices[0][0]]
column.cycle_time = cycle_times[opt_indices[1][0]]
print(min_MPSP, get_SA_MPSP())
#%% Plot MPSP
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, regen_vels, MPSPs_ads_ds, levels=[4., 4.5, 5, 5.5, 6., 6.5, 7.])
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Regeneration solvent velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('MPSP [$/kg]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Plot column cost
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, regen_vels, column_costs_ads_r_t,
levels=[0, 0.25, 0.5, 0.75, 1., 1.25, 1.5, 1.75, 2., 2.25, 2.5],
)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Regeneration solvent velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%%
AC401.regeneration_velocity = 14.4
AC401.target_recovery=None
superficial_velocities = np.linspace(4., 15., 9)
cycle_times = np.linspace(1., 4., 10)
MPSPs = []
column_costs = []
for m in superficial_velocities:
AC401.superficial_velocity = m
MPSPs.append([])
column_costs.append([])
for t in cycle_times:
AC401.cycle_time = t
MPSPs[-1].append(get_SA_MPSP())
column_costs[-1].append(AC401.installed_cost/1e6)
#%% Plot column cost
# plt.contourf(superficial_velocities, cycle_times, MPSPs)
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, superficial_velocities, column_costs)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Superficial feed velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Plot MPSP
# plt.contourf(superficial_velocities, cycle_times, MPSPs)
fig1, ax2 = plt.subplots(constrained_layout=True)
CS = ax2.contourf(cycle_times, superficial_velocities, MPSPs)
CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# ax2.set_title('Nonsense (3 masked regions)')
ax2.set_ylabel('Superficial feed velocity [m/s]')
ax2.set_xlabel('Cycle time [h]')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig1.colorbar(CS)
cbar.ax.set_ylabel('MPSP [$/kg]')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
#%% Across titer
AC401.regeneration_velocity = 14.4
AC401.target_recovery = 0.99
AC401.cycle_time = 2.
titers = np.linspace(2., 25., 10)
MPSPs_titer_only = []
costs_titer_only = []
for t in titers:
spec.load_specifications(spec_1=spec.baseline_yield, spec_2=t, spec_3=spec.baseline_productivity)
MPSPs.append(get_SA_MPSP())
costs_titer_only.append(AC401.installed_cost)
spec.load_specifications(spec_1=spec.baseline_yield, spec_2=spec.baseline_titer, spec_3=spec.baseline_productivity)
#%% Plot MPSP
plt.plot(titers, MPSPs_titer_only)
#%% Plot column cost
plt.plot(titers, costs_titer_only)
#%% Across titer and target recovery
# AC401.regeneration_velocity = 14.4
# AC401.target_recovery = 0.99
# # def MPSP_at_titer(t):
# # spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
# # column.regeneration_velocity = 3. + (17./25.)*t
# # return get_SA_MPSP()
# titers = np.linspace(2., 25., 10)
# recoveries = np.linspace(0.5, 0.99, 10)
# # MPSPs_titer = []
# #%%
# MPSPs_titer = []
# costs_titer = []
# for t in titers:
# MPSPs_titer.append([])
# costs_titer.append([])
# for r in recoveries:
# spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
# AC401.target_recovery = r
# MPSPs_titer[-1].append(get_SA_MPSP())
# costs_titer[-1].append(AC401.installed_cost)
# spec.load_specifications(spec.baseline_yield, spec.baseline_titer, spec.baseline_productivity)
# #%% Plot MPSP
# fig1, ax2 = plt.subplots(constrained_layout=True)
# CS = ax2.contourf(recoveries, titers, MPSPs_titer,
# # levels=[0., 2.5, 5., 7.5, 10, 12.5, 15, 17.5, 20],
# )
# CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# # ax2.set_title('Nonsense (3 masked regions)')
# ax2.set_ylabel('Fermentation titer [g/L]')
# ax2.set_xlabel('Target adsorbate recovery [% of influent]')
# # Make a colorbar for the ContourSet returned by the contourf call.
# cbar = fig1.colorbar(CS)
# cbar.ax.set_ylabel('MPSP [$/kg]')
# # Add the contour line levels to the colorbar
# cbar.add_lines(CS2)
# #%% Plot column cost
# fig1, ax2 = plt.subplots(constrained_layout=True)
# CS = ax2.contourf(recoveries, titers, costs_titer,
# # levels=[0, 2, 4, 6, 8, 10, 12, 14, 16, 18 ,20],
# )
# CS2 = ax2.contour(CS, levels=CS.levels[::2], colors='black', origin='lower')
# # ax2.set_title('Nonsense (3 masked regions)')
# ax2.set_ylabel('Regeneration solvent velocity [m/s]')
# ax2.set_xlabel('Cycle time [h]')
# # Make a colorbar for the ContourSet returned by the contourf call.
# cbar = fig1.colorbar(CS)
# cbar.ax.set_ylabel('Column installed cost [10^6 USD]')
# # Add the contour line levels to the colorbar
# cbar.add_lines(CS2)
#%% Across titer with rigorous adsorption design optimization
AC401.regeneration_velocity = 14.4
AC401.target_recovery = 0.99
AC401.cycle_time = 2.
regen_vels = np.linspace(1., 14.4, 20)
# cycle_times = np.linspace(0.5, 4., 20)
opt_regen_vels = []
opt_cycle_times = []
def MPSP_and_cost_at_regen_vel(v):
column.regeneration_velocity = v
return get_SA_MPSP(), AC401.installed_cost/1e6
def MPSP_at_titer(t):
spec.load_specifications(spec_1=spec.spec_1, spec_2=t, spec_3=spec.spec_3)
MPSPs_ads_ds = []
costs_ads_ds = []
for i in regen_vels:
m, c = MPSP_and_cost_at_regen_vel(i)
MPSPs_ads_ds.append(m)
costs_ads_ds.append(c)
min_MPSP = np.min(MPSPs_ads_ds)
opt_indices = np.where(MPSPs_ads_ds==min_MPSP)
opt_regen_vels.append(regen_vels[opt_indices[0][0]])
# opt_cycle_times.append(cycle_times[opt_indices[1][0]])
column.regeneration_velocity = opt_regen_vels[-1]
# column.cycle_time = opt_cycle_times[-1]
print('titer =', t)
print(min_MPSP, column.ins[1].F_mass, column.regeneration_velocity, column.cycle_time)
print('\n')
return min_MPSP
titers = np.linspace(3., 30, 20)
#%%
MPSPs_titer = []
for i in titers:
MPSPs_titer.append(MPSP_at_titer(i))
spec.load_specifications(spec.baseline_yield, spec.baseline_titer, spec.baseline_productivity)
#%% Plot MPSP
plt.plot(titers, MPSPs_titer)
#%% Plot optimum regeneration velocity
plt.plot(titers, opt_regen_vels)
#%% Plot
|
[
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.min",
"matplotlib.pyplot.subplots"
] |
[((448, 474), 'numpy.linspace', 'np.linspace', (['(3.0)', '(20.0)', '(40)'], {}), '(3.0, 20.0, 40)\n', (459, 474), True, 'import numpy as np\n'), ((487, 512), 'numpy.linspace', 'np.linspace', (['(1.0)', '(4.0)', '(40)'], {}), '(1.0, 4.0, 40)\n', (498, 512), True, 'import numpy as np\n'), ((977, 997), 'numpy.min', 'np.min', (['MPSPs_ads_ds'], {}), '(MPSPs_ads_ds)\n', (983, 997), True, 'import numpy as np\n'), ((1012, 1046), 'numpy.where', 'np.where', (['(MPSPs_ads_ds == min_MPSP)'], {}), '(MPSPs_ads_ds == min_MPSP)\n', (1020, 1046), True, 'import numpy as np\n'), ((1214, 1251), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (1226, 1251), True, 'from matplotlib import pyplot as plt\n'), ((1789, 1826), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (1801, 1826), True, 'from matplotlib import pyplot as plt\n'), ((2517, 2542), 'numpy.linspace', 'np.linspace', (['(4.0)', '(15.0)', '(9)'], {}), '(4.0, 15.0, 9)\n', (2528, 2542), True, 'import numpy as np\n'), ((2555, 2580), 'numpy.linspace', 'np.linspace', (['(1.0)', '(4.0)', '(10)'], {}), '(1.0, 4.0, 10)\n', (2566, 2580), True, 'import numpy as np\n'), ((2972, 3009), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (2984, 3009), True, 'from matplotlib import pyplot as plt\n'), ((3590, 3627), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (3602, 3627), True, 'from matplotlib import pyplot as plt\n'), ((4208, 4234), 'numpy.linspace', 'np.linspace', (['(2.0)', '(25.0)', '(10)'], {}), '(2.0, 25.0, 10)\n', (4219, 4234), True, 'import numpy as np\n'), ((4611, 4645), 'matplotlib.pyplot.plot', 'plt.plot', (['titers', 'MPSPs_titer_only'], {}), '(titers, MPSPs_titer_only)\n', (4619, 4645), True, 'from matplotlib import pyplot as plt\n'), ((4668, 4702), 'matplotlib.pyplot.plot', 'plt.plot', (['titers', 'costs_titer_only'], {}), '(titers, costs_titer_only)\n', (4676, 4702), True, 'from matplotlib import pyplot as plt\n'), ((7068, 7094), 'numpy.linspace', 'np.linspace', (['(1.0)', '(14.4)', '(20)'], {}), '(1.0, 14.4, 20)\n', (7079, 7094), True, 'import numpy as np\n'), ((8065, 8089), 'numpy.linspace', 'np.linspace', (['(3.0)', '(30)', '(20)'], {}), '(3.0, 30, 20)\n', (8076, 8089), True, 'import numpy as np\n'), ((8285, 8314), 'matplotlib.pyplot.plot', 'plt.plot', (['titers', 'MPSPs_titer'], {}), '(titers, MPSPs_titer)\n', (8293, 8314), True, 'from matplotlib import pyplot as plt\n'), ((8355, 8387), 'matplotlib.pyplot.plot', 'plt.plot', (['titers', 'opt_regen_vels'], {}), '(titers, opt_regen_vels)\n', (8363, 8387), True, 'from matplotlib import pyplot as plt\n'), ((7604, 7624), 'numpy.min', 'np.min', (['MPSPs_ads_ds'], {}), '(MPSPs_ads_ds)\n', (7610, 7624), True, 'import numpy as np\n'), ((7643, 7677), 'numpy.where', 'np.where', (['(MPSPs_ads_ds == min_MPSP)'], {}), '(MPSPs_ads_ds == min_MPSP)\n', (7651, 7677), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import shutil
import argparse
import subprocess
import numpy as np
import contextlib
import onnx
from cvi_toolkit.utils.mlir_shell import *
from cvi_toolkit.utils.intermediate_file import IntermediateFile
@contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(previous_dir)
class ModelTest(object):
def __init__(self, chip, model_path, batch_size):
self.chip = chip
self.model_path = model_path
self.batch_size = batch_size
self.model_name = os.path.split(model_path)[-1].split(".")[0]
self.fp32_mlir = self.model_name + ".mlir"
self.cvimodel = self.model_name + ".cvimodel"
self.input_path = "./input.npz"
def __make_test_calibration_table__(self, table_name):
blobs_interp_npz = IntermediateFile(self.model_name, 'full_precision_interp.npz', False)
ret = mlir_inference(self.fp32_mlir, self.input_path, None, str(blobs_interp_npz))
if ret != 0:
raise RuntimeError("{} mlir inference failed".format(self.model_path))
tensors = np.load(str(blobs_interp_npz))
with open(table_name, "w") as f:
for name in tensors:
threshold = np.abs(np.max(tensors[name]))
if np.isnan(threshold):
threshold = 10.0
elif threshold >= 127.0:
threshold = 127.0
elif threshold <= 0.001:
threshold = 1.0
else:
pass
f.write("{} {}\n".format(name, threshold))
def run(self, quant_mode, input=None):
if self.model_path.endswith(".onnx"):
onnx_model = onnx.load(self.model_path)
input_nodes = onnx_model.graph.input
self.__gen_onnx_input__(input_nodes)
transform_cmd = [
'model_transform.py', '--model_type', 'onnx', '--model_name', self.model_name, '--model_def', self.model_path,
'--image', self.input_path, '--net_input_dims', '1,100', '--tolerance', '0.99,0.99,0.99', '--mlir',
self.fp32_mlir
]
subprocess.run(transform_cmd)
elif self.model_path.endswith(".mlir"):
tmp_mlir_file = IntermediateFile(self.model_name, 'fp32.mlir.tmp', False)
op_info_csv = IntermediateFile(self.model_name, 'op_info.csv', True)
ret = mlir_pseudo_weight(self.model_path, str(tmp_mlir_file))
ret = mlir_opt(str(tmp_mlir_file), self.fp32_mlir, str(op_info_csv))
if ret != 0:
raise RuntimeError("{} opt failed".format(self.model_path))
if quant_mode in ['bf16', 'mix_bf16']:
deploy_cmd = [
'model_deploy.py', '--model_name', self.model_name, '--mlir', self.fp32_mlir, '--quantize',
quant_mode.upper(), '--chip', self.chip, '--image', self.input_path, '--inputs_type', 'SAME',
'--outputs_type', 'SAME', '--tolerance', '0.99,0.99,0.87', '--correctness', '0.99,0.99,0.95', '--debug',
'--cvimodel', self.cvimodel
]
elif "int8" == quant_mode:
# simple cali and convert to cvimodel
table_file = IntermediateFile(self.model_name, 'calibration_table', True)
self.__make_test_calibration_table__(str(table_file))
deploy_cmd = [
'model_deploy.py', '--model_name', self.model_name, '--mlir', self.fp32_mlir, '--calibration_table',
str(table_file), '--chip', self.chip, '--image', self.input_path, '--inputs_type', 'SAME',
'--outputs_type', 'SAME', '--tolerance', '0.10,0.10,0.1', '--correctness', '0.99,0.99,0.93', '--debug',
'--cvimodel', self.cvimodel
]
else:
raise ValueError("Now just support bf16/int8")
subprocess.run(deploy_cmd)
def __gen_onnx_input__(self, input_nodes):
self.input_data = {}
for input in input_nodes:
input_shape = []
for i, dim in enumerate(input.type.tensor_type.shape.dim):
if i == 0 and dim.dim_value <= 0 and self.batch_size != 0:
input_shape.append(self.batch_size)
else:
input_shape.append(dim.dim_value)
if 1 == input.type.tensor_type.elem_type: # 1 for np.float32
self.input_data[input.name] = np.random.randn(*input_shape).astype(np.float32)
# self.input_data[input.name] = np.random.uniform(1, 6, input_shape).astype(np.float32)
elif 7 == input.type.tensor_type.elem_type: # 7 for np.int64 / torch.long
self.input_data[input.name] = np.random.randint(0, 3, input_shape).astype(np.int64)
elif 9 == input.type.tensor_type.elem_type: # 9 for boolean
self.input_data[input.name] = np.random.randint(0, 2, input_shape).astype(np.float32)
else:
raise ValueError("Not support now, add here")
np.savez("input.npz", **self.input_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model", help="model definition file, mlir or onnx")
parser.add_argument("--quantize", choices=['bf16', 'int8', 'mix_bf16'], default="bf16", help="quant mode")
parser.add_argument("--batch_size", type=int, default=1, help="batch size")
parser.add_argument("--chip", type=str, default="cv182x", help="chip type")
parser.add_argument("--out_dir", type=str, default="tmp", help="out folder")
# parser.add_argument("--excepts", default='-', help="excepts")
# parser.add_argument("--graph", action='store_true', help="generate graph to pb file")
args = parser.parse_args()
if os.path.exists(args.out_dir):
shutil.rmtree(args.out_dir)
os.makedirs(args.out_dir)
tmp_model_file = os.path.split(args.model)[-1]
shutil.copy(args.model, os.path.join(args.out_dir, tmp_model_file))
with pushd(args.out_dir):
tool = ModelTest(args.chip, tmp_model_file, args.batch_size)
tool.run(args.quantize)
|
[
"os.path.exists",
"numpy.savez",
"os.makedirs",
"argparse.ArgumentParser",
"subprocess.run",
"os.path.join",
"os.getcwd",
"os.chdir",
"os.path.split",
"numpy.max",
"numpy.random.randint",
"numpy.isnan",
"onnx.load",
"shutil.rmtree",
"cvi_toolkit.utils.intermediate_file.IntermediateFile",
"numpy.random.randn"
] |
[((327, 338), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (336, 338), False, 'import os\n'), ((343, 360), 'os.chdir', 'os.chdir', (['new_dir'], {}), '(new_dir)\n', (351, 360), False, 'import os\n'), ((5242, 5267), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5265, 5267), False, 'import argparse\n'), ((5898, 5926), 'os.path.exists', 'os.path.exists', (['args.out_dir'], {}), '(args.out_dir)\n', (5912, 5926), False, 'import os\n'), ((5968, 5993), 'os.makedirs', 'os.makedirs', (['args.out_dir'], {}), '(args.out_dir)\n', (5979, 5993), False, 'import os\n'), ((405, 427), 'os.chdir', 'os.chdir', (['previous_dir'], {}), '(previous_dir)\n', (413, 427), False, 'import os\n'), ((909, 978), 'cvi_toolkit.utils.intermediate_file.IntermediateFile', 'IntermediateFile', (['self.model_name', '"""full_precision_interp.npz"""', '(False)'], {}), "(self.model_name, 'full_precision_interp.npz', False)\n", (925, 978), False, 'from cvi_toolkit.utils.intermediate_file import IntermediateFile\n'), ((3985, 4011), 'subprocess.run', 'subprocess.run', (['deploy_cmd'], {}), '(deploy_cmd)\n', (3999, 4011), False, 'import subprocess\n'), ((5936, 5963), 'shutil.rmtree', 'shutil.rmtree', (['args.out_dir'], {}), '(args.out_dir)\n', (5949, 5963), False, 'import shutil\n'), ((6016, 6041), 'os.path.split', 'os.path.split', (['args.model'], {}), '(args.model)\n', (6029, 6041), False, 'import os\n'), ((6074, 6116), 'os.path.join', 'os.path.join', (['args.out_dir', 'tmp_model_file'], {}), '(args.out_dir, tmp_model_file)\n', (6086, 6116), False, 'import os\n'), ((1810, 1836), 'onnx.load', 'onnx.load', (['self.model_path'], {}), '(self.model_path)\n', (1819, 1836), False, 'import onnx\n'), ((2265, 2294), 'subprocess.run', 'subprocess.run', (['transform_cmd'], {}), '(transform_cmd)\n', (2279, 2294), False, 'import subprocess\n'), ((5159, 5199), 'numpy.savez', 'np.savez', (['"""input.npz"""'], {}), "('input.npz', **self.input_data)\n", (5167, 5199), True, 'import numpy as np\n'), ((1375, 1394), 'numpy.isnan', 'np.isnan', (['threshold'], {}), '(threshold)\n', (1383, 1394), True, 'import numpy as np\n'), ((2371, 2428), 'cvi_toolkit.utils.intermediate_file.IntermediateFile', 'IntermediateFile', (['self.model_name', '"""fp32.mlir.tmp"""', '(False)'], {}), "(self.model_name, 'fp32.mlir.tmp', False)\n", (2387, 2428), False, 'from cvi_toolkit.utils.intermediate_file import IntermediateFile\n'), ((2455, 2509), 'cvi_toolkit.utils.intermediate_file.IntermediateFile', 'IntermediateFile', (['self.model_name', '"""op_info.csv"""', '(True)'], {}), "(self.model_name, 'op_info.csv', True)\n", (2471, 2509), False, 'from cvi_toolkit.utils.intermediate_file import IntermediateFile\n'), ((3348, 3408), 'cvi_toolkit.utils.intermediate_file.IntermediateFile', 'IntermediateFile', (['self.model_name', '"""calibration_table"""', '(True)'], {}), "(self.model_name, 'calibration_table', True)\n", (3364, 3408), False, 'from cvi_toolkit.utils.intermediate_file import IntermediateFile\n'), ((1333, 1354), 'numpy.max', 'np.max', (['tensors[name]'], {}), '(tensors[name])\n', (1339, 1354), True, 'import numpy as np\n'), ((633, 658), 'os.path.split', 'os.path.split', (['model_path'], {}), '(model_path)\n', (646, 658), False, 'import os\n'), ((4552, 4581), 'numpy.random.randn', 'np.random.randn', (['*input_shape'], {}), '(*input_shape)\n', (4567, 4581), True, 'import numpy as np\n'), ((4838, 4874), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)', 'input_shape'], {}), '(0, 3, input_shape)\n', (4855, 4874), True, 'import numpy as np\n'), ((5011, 5047), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'input_shape'], {}), '(0, 2, input_shape)\n', (5028, 5047), True, 'import numpy as np\n')]
|
import numpy as np
def validate_1d_array(x, size=None):
'''Validate type and dimensions of an object x.'''
assert isinstance(x, np.ndarray), 'Expecting a numpy array.'
assert x.ndim == 1, 'Expecting a one-dimensional array.'
if size is not None:
assert x.size == size, 'Array size is different from expected.'
def validate_2d_array(x, n_cols=None, n_rows=None):
'''Validate type and dimensions of an object x.'''
assert isinstance(x, np.ndarray), 'Expecting a numpy array.'
assert x.ndim == 2, 'Expecting a two-dimensional array.'
if n_rows is not None:
assert x.shape[0] == n_rows, 'Array size is different from expected.'
if n_cols is not None:
assert x.shape[1] == n_cols, 'Number of columns is different from expected.'
def validate_integer_array(x):
'''Validate array elements are integers.'''
assert (np.round(x) == x).all(), 'Expecting an array of integers.'
def validate_positive_array(x):
'''Validate array elements are positive.'''
assert (x > 0).all(), 'Expecting array of positive elements.'
def validate_non_negative_array(x):
'''Validate array elements are non-negative.'''
assert (x >= 0).all(), 'Expecting array of non-negative elements.'
|
[
"numpy.round"
] |
[((885, 896), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (893, 896), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import heapq
import numpy as np
from math import sqrt
from .spline_segment import SplineSegment
class SegmentList(object):
def __init__(self, closest_point_search_accuracy=0.001, closest_point_search_max_iterations=5000, segments=None):
self.segments = segments
self.closest_point_search_accuracy = closest_point_search_accuracy
self.closest_point_search_max_iterations = closest_point_search_max_iterations
def construct_from_spline(self, spline, min_arc_length=0, max_arc_length=-1, granularity=1000):
""" Constructs line segments out of the evualated points
with the given granularity
Returns
-------
* segments : list of tuples
Each entry defines a line segment and contains
start,center and end points
Returns
-------
True if successful else if not
"""
points = []
step_size = 1.0 / granularity
if max_arc_length <= 0:
max_arc_length = spline.full_arc_length
if abs(min_arc_length-max_arc_length) > step_size:
u = 0
while u <= 1.0:
arc_length = spline.get_absolute_arc_length(u)
# TODO make more efficient by looking up min_u
if arc_length >= min_arc_length and arc_length <= max_arc_length:
point = spline.query_point_by_parameter(u)
points.append(point)
u += step_size
self.segments = []
index = 0
while index < len(points) - 1:
start = np.array(points[index])
end = np.array(points[index + 1])
center = 0.5 * (end - start) + start
segment = SplineSegment(start, center, end)
self.segments.append(segment)
index += 1
return index > 0
else:
return False
def find_closest_point(self, point):
if self.segments is None or len(self.segments) == 0:
return None, -1
candidates = self.find_two_closest_segments(point)
if len(candidates) >= 2:
closest_point_1, distance_1 = self._find_closest_point_on_segment(candidates[0][1], point)
closest_point_2, distance_2 = self._find_closest_point_on_segment(candidates[1][1], point)
if distance_1 < distance_2:
return closest_point_1, distance_1
else:
return closest_point_2, distance_2
elif len(candidates) == 1:
closest_point, distance = self._find_closest_point_on_segment(candidates[0][1], point)
return closest_point, distance
def find_closest_segment(self, point):
"""
Returns
-------
* closest_segment : Tuple
Defines line segment. Contains start,center and end
* min_distance : float
distance to this segments center
"""
closest_segment = None
min_distance = np.inf
for s in self.segments:
distance = np.linalg.norm(s.center-point)
if distance < min_distance:
closest_segment = s
min_distance = distance
return closest_segment, min_distance
def find_two_closest_segments(self, point):
""" Ueses a heap queue to find the two closest segments
Returns
-------
* closest_segments : List of Tuples
distance to the segment center
Defineiation of a line segment. Contains start,center and end points
"""
heap = [] # heap queue
idx = 0
while idx < len(self.segments):
distance = np.linalg.norm(self.segments[idx].center-point)
# print point,distance,segments[index]
# #Push the value item onto the heap, maintaining the heap invariant.
heapq.heappush(heap, (distance, idx))
idx += 1
closest_segments = []
count = 0
while idx-count > 0 and count < 2:
distance, index = heapq.heappop(heap)
segment = (distance, self.segments[index])
closest_segments.append(segment)
count += 1
return closest_segments
def _find_closest_point_on_segment(self, segment, point):
""" Find closest point by dividing the segment until the
difference in the distance gets smaller than the accuracy
Returns
-------
* closest_point : np.ndarray
point on the spline
* distance : float
distance to input point
"""
segment_length = np.inf
distance = np.inf
segment_list = SegmentList(self.closest_point_search_accuracy, self.closest_point_search_max_iterations, segment.divide())
iteration = 0
while segment_length > self.closest_point_search_accuracy and distance > self.closest_point_search_accuracy and iteration < self.closest_point_search_max_iterations:
closest_segment, distance = segment_list.find_closest_segment(point)
segment_length = np.linalg.norm(closest_segment.end-closest_segment.start)
segment_list = SegmentList(self.closest_point_search_accuracy, self.closest_point_search_max_iterations, closest_segment.divide())
iteration += 1
closest_point = closest_segment.center # extract center of closest segment
return closest_point, distance
|
[
"numpy.array",
"heapq.heappush",
"heapq.heappop",
"numpy.linalg.norm"
] |
[((4192, 4224), 'numpy.linalg.norm', 'np.linalg.norm', (['(s.center - point)'], {}), '(s.center - point)\n', (4206, 4224), True, 'import numpy as np\n'), ((4819, 4868), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.segments[idx].center - point)'], {}), '(self.segments[idx].center - point)\n', (4833, 4868), True, 'import numpy as np\n'), ((5010, 5047), 'heapq.heappush', 'heapq.heappush', (['heap', '(distance, idx)'], {}), '(heap, (distance, idx))\n', (5024, 5047), False, 'import heapq\n'), ((5191, 5210), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (5204, 5210), False, 'import heapq\n'), ((6300, 6359), 'numpy.linalg.norm', 'np.linalg.norm', (['(closest_segment.end - closest_segment.start)'], {}), '(closest_segment.end - closest_segment.start)\n', (6314, 6359), True, 'import numpy as np\n'), ((2711, 2734), 'numpy.array', 'np.array', (['points[index]'], {}), '(points[index])\n', (2719, 2734), True, 'import numpy as np\n'), ((2757, 2784), 'numpy.array', 'np.array', (['points[index + 1]'], {}), '(points[index + 1])\n', (2765, 2784), True, 'import numpy as np\n')]
|
from sklearn.metrics import mean_squared_error, log_loss
from keras.models import Model
from keras.models import load_model
from keras.layers import Input, Dense
from keras.layers.recurrent import SimpleRNN
from keras.layers.merge import multiply, concatenate, add
from keras import backend as K
from keras import initializers
from keras.callbacks import EarlyStopping
from keras.layers.wrappers import TimeDistributed
from keras.callbacks import Callback
from keras import optimizers
import pandas as pd
import numpy as np
from keras.constraints import max_norm, non_neg, unit_norm
np.random.seed(42)
from math import sqrt
import os
import sys
from collections import defaultdict
class DeepAFM:
def __init__(self):
pass
def custom_bce(self, y_true, y_pred):
b = K.not_equal(y_true, -K.ones_like(y_true))
b = K.cast(b, dtype='float32')
ans = K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1) * K.mean(b, axis=-1)
ans = K.cast(ans, dtype='float32')
return K.sum(ans)
def custom_activation(self, x):
if self.activation.split('-')[0] == "custom":
a = float(self.activation.split('-')[1])
return 1.0 / ( 1 + K.exp(-a*x) )
elif self.activation.split('-')[0] == "rounded":
K.minimum(K.maximum(K.round(K.sigmoid(x)), 0), 1)
def custom_init(self, shape, dtype=None):
return K.cast_to_floatx(self.Q_jk_initialize)
def custom_random(self, shape, dtype=None):
if self.random_init == "normal":
return K.random_normal(shape, 0.5, 0.05, dtype=dtype, seed=22)
else:
return K.random_uniform(shape, 0, 1, dtype=dtype, seed=22)
def f(self, x):
def custom_init(shape, dtype=None):
return K.cast_to_floatx(np.reshape(x, shape))
return custom_init
def build(self, dafm_type="dafm-afm", optimizer="rmsprop", learning_rate=0.01, activation="linear", Q_jk_initialize=0, section="", section_count=0, model1="", stateful=False, theta_student="False", student_count=0, binary="False"):
skills = np.shape(Q_jk_initialize)[1]
steps = np.shape(Q_jk_initialize)[0]
self.activation = activation
if '-' in self.activation:
activation = self.custom_activation
if dafm_type.split("_")[-1] == "different":
skills = int( float(dafm_type.split("_")[-2])*skills )
dafm_type = dafm_type.split('_')[0]
if dafm_type.split("_")[0] == "round-fine-tuned":
try:
self.round_threshold = float(dafm_type.split("_")[-1])
dafm_type = dafm_type.split("_")[0]
except:
pass
q_jk_size = skills
if '^' in dafm_type:
q_jk_size = skills
skills = int (float(dafm_type.split('^')[-1]) * skills)
dafm_type = dafm_type.split('^')[0]
self.dafm_type = dafm_type
if dafm_type == "random-uniform" or dafm_type == "random-normal":
qtrainable, finetuning, randomize = True, False, True
self.random_init = dafm_type.split('-')[-1]
elif dafm_type == "dafm-afm":
qtrainable, finetuning, randomize = False, False, False
elif dafm_type == "fine-tuned":
qtrainable, finetuning, randomize = True, True, False
elif dafm_type == "kcinitialize":
qtrainable, finetuning, randomize = True, False, False
elif dafm_type== "round-fine-tuned":
# if not self.round_threshold == -1:
# rounded_Qjk = np.abs(Q_jk1 - Q_jk_initialize)
# Q_jk1[rounded_Qjk <= self.round_threshold] = Q_jk_initialize[rounded_Qjk <= self.round_threshold]
# Q_jk1[rounded_Qjk > self.round_threshold] = np.ones(np.shape(Q_jk_initialize[rounded_Qjk > self.round_threshold])) - Q_jk_initialize[rounded_Qjk > self.round_threshold]
# else:
Q_jk1 = model1.get_layer("Q_jk").get_weights()[0]
Q_jk1 = np.minimum(np.ones(np.shape(Q_jk1)), np.maximum(np.round(Q_jk1), np.zeros(np.shape(Q_jk1))))
model1.get_layer("Q_jk").set_weights([Q_jk1])
return model1
elif dafm_type == "qjk-dense":
qtrainable, finetuning, randomize = False, False, False
activation_dense = activation
elif dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
qtrainable, finetuning, randomize = False, False, True
self.random_init = dafm_type.split('-')[-1]
activation_dense = activation
else:
print ("No Valid Model Found")
sys.exit()
if section == "onehot":
section_input = Input(batch_shape=(None, None, section_count), name='section_input')
if not theta_student=="False":
student_input = Input(batch_shape=(None, None, student_count), name='student_input')
virtual_input1 = Input(batch_shape=(None, None, 1), name='virtual_input1')
if finetuning:
B_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("B_k").get_weights()[0]), use_bias=False), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', kernel_initializer=self.f(model1.get_layer("T_k").get_weights()[0]), use_bias=False), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=self.f(model1.get_layer("bias").get_weights()[0]), trainable=True), name="bias")(virtual_input1)
else:
B_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="B_k")(virtual_input1)
T_k = TimeDistributed(Dense(skills, activation='linear', use_bias=False, trainable=True), name="T_k")(virtual_input1)
bias_layer = TimeDistributed(Dense(1, activation='linear', use_bias=False, kernel_initializer=initializers.Zeros(), trainable=True), name="bias")(virtual_input1)
step_input = Input(batch_shape=(None, None, steps), name='step_input')
if randomize:
if binary=="False":
Q_jk = TimeDistributed(Dense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random), trainable=qtrainable ,name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(q_jk_size, use_bias=False, activation=activation, kernel_initializer=self.custom_random),trainable=qtrainable, name="Q_jk")(step_input)
else:
if binary=="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize), use_bias=False,trainable=qtrainable), trainable=qtrainable, name="Q_jk")(step_input)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation, kernel_initializer=self.f(Q_jk_initialize),trainable=qtrainable,
use_bias=False), name="Q_jk", trainable=qtrainable)(step_input)
if dafm_type == "random-qjk-dense-normal" or dafm_type == "random-qjk-dense-uniform":
if binary =="False":
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=self.custom_random, trainable=True), name="Q_jk_dense")(Q_jk)
elif dafm_type == "qjk-dense":
if binary =='False':
Q_jk = TimeDistributed(Dense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
Q_jk = TimeDistributed(BinaryDense(skills, activation=activation_dense, use_bias=False, kernel_initializer=initializers.Identity(), trainable=True), name="Q_jk_dense")(Q_jk)
else:
pass
Qjk_mul_Bk = multiply([Q_jk, B_k])
sum_Qjk_Bk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False,name="sum_Qjk_Bk")(Qjk_mul_Bk)
P_k = SimpleRNN(skills, kernel_initializer=initializers.Identity(), recurrent_initializer=initializers.Identity() , use_bias=False, trainable=False, activation='linear', return_sequences=True, name="P_k")(Q_jk)
Qjk_mul_Pk_mul_Tk = multiply([Q_jk, P_k, T_k])
sum_Qjk_Pk_Tk = TimeDistributed(Dense(1, activation='linear', trainable=False, kernel_initializer=initializers.Ones(), use_bias=False),trainable=False, name="sum_Qjk_Pk_Tk")(Qjk_mul_Pk_mul_Tk)
Concatenate = concatenate([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])
if not (theta_student=="False"):
if finetuning:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("theta").get_weights()[0])), name='theta')(student_input)
else:
theta = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='theta')(student_input)
Concatenate = concatenate([Concatenate, theta])
if section == "onehot":
if finetuning:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False, kernel_initializer=self.f(model1.get_layer("S_k").get_weights()[0])), name='S_k')(section_input)
else:
S_k = TimeDistributed(Dense(1, activation="linear", use_bias=False), name='S_k')(section_input)
Concatenate = concatenate([Concatenate, S_k])
output = TimeDistributed(Dense(1, activation="sigmoid", trainable=False, kernel_initializer=initializers.Ones(), use_bias=False), trainable=False, name="output")(Concatenate)
if section == "onehot" and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, section_input, student_input], outputs=output)
elif section == "onehot" and theta_student=="False":
model = Model(inputs=[virtual_input1, step_input, section_input], outputs=output)
elif not (section == "onehot") and not (theta_student=="False"):
model = Model(inputs=[virtual_input1, step_input, student_input], outputs=output)
else:
model = Model(inputs=[virtual_input1, step_input], outputs=output)
d_optimizer = {"rmsprop":optimizers.RMSprop(lr=learning_rate), "adam":optimizers.Adam(lr=learning_rate), "adagrad":optimizers.Adagrad(lr=learning_rate) }
model.compile( optimizer = d_optimizer[optimizer],
loss = self.custom_bce)
return model
def fit(self, x_train, y_train, x_train_section, x_train_student, x_test, y_test, x_test_section, x_test_student, model, epochs=5, batch_size=32, loaded=False, validation=True):
loss_epoch = {"epoch":[], "loss":[], "val_loss":[], 'patience':[]}
print ("Max Epochs", epochs)
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
patience, epoch = 0 , 1
prev_best_val_loss = np.inf
counter = 0
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
virtual_input1_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if not validation:
earlyStopping = EarlyStopping(monitor='loss', patience=2)
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=epochs , callbacks=[earlyStopping], verbose=1, shuffle=True)
else:
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=epochs, callbacks=[earlyStopping], verbose=1, shuffle=True)
# print ("Epoch Number:", counter, "Patience:", 0, "val loss:", current_val_loss)
loss_epoch["loss"].extend(history_callback.history["loss"])
loss_epoch["val_loss"].extend(history_callback.history["loss"])
loss_epoch["epoch"].extend(list(range(epochs)))
loss_epoch["patience"].extend(list(range(epochs)))
best_model = model
epoch = epochs
else:
while (patience <=5 and epoch <= epochs and (not self.dafm_type == "round-fine-tuned") and (loaded == False)):
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
counter += 1
if len(x_train_student) == 0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section], y_test), verbose=0, shuffle=True)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_student], y_test), verbose=0, shuffle=True)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, validation_data=([virtual_input1_test, x_test, x_test_section, x_test_student], y_test), verbose=0, shuffle=True)
current_val_loss = history_callback.history["val_loss"][0]
print ("Epoch Number:", counter, "Patience:", patience, "val loss:", current_val_loss)
loss_epoch["val_loss"].append(history_callback.history["val_loss"][0])
loss_epoch["loss"].append(history_callback.history["loss"][0])
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
if (prev_best_val_loss - current_val_loss) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_val_loss
else:
patience += 1
if len(x_train_student)==0:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section]), x_train)
else:
if len(x_train_section)==0:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_student]), x_train)
else:
x = self.bce_loss(y_train, best_model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
print ("PARAM", model_param)
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
B_k = best_model.get_layer("B_k").get_weights()[0]
T_k = best_model.get_layer("T_k").get_weights()[0]
return best_model, AIC, BIC, epoch, loss_epoch
def fit_batches(self, dafmdata_obj, model, max_epochs=30, earlyStop="val_loss", loaded=False):
print ("Max Epochs", max_epochs)
loss_epoch = {"epoch":[], "loss":[], earlyStop:[], 'patience':[]}
patience, epoch = 0, 1
prev_best_val_loss = np.inf
counter = 0
if self.dafm_type == "round-fine-tuned" or loaded:
best_model = model
while (patience <= 2 and epoch <= max_epochs and loaded==False and (not self.dafm_type == "round-fine-tuned")):
counter += 1
current_val_loss = 0
total_loss, total_train_samples = 0, 0
train = dafmdata_obj.data_generator1("train")
test = dafmdata_obj.data_generator1("test")
bc = 0
for x_train, y_train, x_train_section, x_train_student, batch_size in train:
permutation = np.random.permutation(x_train.shape[0])
x_train = x_train[permutation]
y_train = y_train[permutation]
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
print ("Batch Number:", bc, np.shape(x_train))
if len(x_train_student)==0:
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section], y_train, batch_size=batch_size, epochs=1, verbose=1)
else:
x_train_student = x_train_student[permutation]
if len(x_train_section) == 0:
history_callback = model.fit([virtual_input1, x_train, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=0)
else:
x_train_section = x_train_section[permutation]
history_callback = model.fit([virtual_input1, x_train, x_train_section, x_train_student], y_train, batch_size=batch_size, epochs=1, verbose=1)
total_loss += history_callback.history["loss"][0] * len(x_train)
total_train_samples += len(x_train)
bc += 1
if earlyStop == "rmse":
current_avg_rmse = self.predict_batches(dafmdata_obj, model)
loss_epoch["rmse"].append(current_avg_rmse)
else:
current_avg_rmse = np.mean(self.bce_loss_batches(dafmdata_obj, model, utype="test"))
loss_epoch["val_loss"].append(current_avg_rmse)
loss_epoch["loss"].append(float(total_loss)/float(total_train_samples))
loss_epoch["epoch"].append(counter)
loss_epoch["patience"].append(patience)
print ("Epoch Number:", counter, "Patience:", patience, earlyStop, current_avg_rmse, "Loss:", loss_epoch["loss"][-1])
if (prev_best_val_loss - current_avg_rmse) > 0:
best_model = model
epoch += patience + 1
patience = 0
prev_best_val_loss = current_avg_rmse
else:
patience += 1
x = self.bce_loss_batches(dafmdata_obj, best_model, utype="train")
L, N = -np.sum(x), len(x)
model_param = best_model.count_params()
AIC = 2 * model_param - 2 * L
BIC = model_param * np.log(N) - 2 * L
return best_model, AIC, BIC, epoch, loss_epoch
def L(self, y_true, y_pred, x_train):
mask_matrix = np.sum(x_train, axis=2).flatten()
num_users, max_responses = np.shape(x_train)[0], np.shape(x_train)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
rmse = []
SSR = 0
response = 0
L = 0
N = 0
c = 0
for user in range(num_users):
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
break
if y_pred[i] < 1 and y_pred[i] > 0:
L += ( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) )
else:
c += 1
eps = 1e-4
if y_pred[i] == y_true[i]:
pass
else:
y_pred[i] = max(eps, min(1 - eps, y_pred[i]))
L += ( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) )
response += 1
N += 1
return L, N
def L_batches(self, dafmdata_obj, model):
L = 0
N = 0
train_generator = dafmdata_obj.data_generator1("train")
for x_train, y_train, x_train_section, x_train_student, batch_size in train_generator:
virtual_input1 = np.ones([np.shape(x_train)[0], np.shape(x_train)[1], 1])
if len(x_train_student)==0:
if len(x_train_section) == 0:
l, x= self.L(y_train, model.predict([virtual_input1, x_train]), x_train)
L += l
else:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_section]), x_train)
L += l
else:
if len(x_train_section) == 0:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_student]), x_train)
L += l
else:
l, x= self.L(y_train, model.predict([virtual_input1, x_train, x_train_section, x_train_student]), x_train)
L += l
N += len(x_train)
return L, N
def predict(self, x_test, y_test, x_test_section, x_test_student, model, batch_size=32):
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student)==0:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section] , batch_size=batch_size)
else:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section, x_test_student] , batch_size=batch_size)
rmse = self.rmse_masking(y_test, y_pred, x_test)
return rmse
def prediction(self, x_test, x_test_section, x_test_student, model, batch_size=32):
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student)==0:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section] , batch_size=batch_size)
else:
if len(x_test_section)==0:
y_pred = model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size)
else:
y_pred = model.predict([virtual_input_test, x_test, x_test_section, x_test_student], batch_size=batch_size)
return y_pred
def predict_batches(self, dafmdata_obj, model):
test_generator = dafmdata_obj.data_generator1("test")
avg_rmse = 0
t_users = 0
for x_test, y_test, x_test_section, x_test_student, batch_size in test_generator:
avg_rmse = avg_rmse + len(x_test)*self.predict(x_test, y_test, x_test_section, x_test_student, model, batch_size)
t_users = t_users + len(x_test)
return avg_rmse/float(t_users)
def bce_loss_batches(self, dafmdata_obj, model, utype="train"):
ll = []
test_generator = dafmdata_obj.data_generator1(utype)
for x_test, y_test, x_test_section, x_test_student, batch_size in test_generator:
virtual_input_test = np.ones([np.shape(x_test)[0], np.shape(x_test)[1], 1])
if len(x_test_student) == 0:
if len(x_test_section) == 0:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test], batch_size=batch_size), x_test))
else:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_section], batch_size=batch_size), x_test))
else:
if len(x_test_section) == 0:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_student], batch_size=batch_size), x_test))
else:
ll.extend(self.bce_loss(y_test, model.predict([virtual_input_test, x_test, x_test_section, x_test_student], batch_size=batch_size), x_test))
return ll
def bce_loss(self, y_true, y_pred, x_test):
mask_matrix = np.sum(x_test, axis=2).flatten()
num_users, max_responses = np.shape(x_test)[0], np.shape(x_test)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
ll = []
response = 0
for user in range(num_users):
log_loss = []
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
break
response += 1
eps = 1e-7
y_pred[i] = max(eps, min(1 - eps, y_pred[i]))
log_loss.append( -( y_true[i] * np.log(y_pred[i]) + (1 - y_true[i]) * np.log(1 - y_pred[i]) ) )
ll.extend(log_loss)
return ll
def rmse_masking(self, y_true, y_pred, x_test):
mask_matrix = np.sum(x_test, axis=2).flatten()
num_users, max_responses = np.shape(x_test)[0], np.shape(x_test)[1]
y_pred = y_pred.flatten()
y_true = y_true.flatten()
rmse = []
for user in range(num_users):
diff_sq, response = 0, 0
for i in range(user * max_responses, (user + 1) * max_responses):
if mask_matrix[i] == 0 or y_true[i] == -1:
continue
# continue for response level evaluation
diff_sq += (y_true[i] - y_pred[i]) ** 2
response += 1
rmse.append(sqrt(diff_sq/float(response)))
return np.mean(rmse)
if __name__ == "__main__":
x_train = [ [ [0, 0, 1], [0, 0, 1], [1, 0, 0], [0, 0, 0] ],
[ [1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0] ],
[ [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1] ],
[ [1, 0, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0] ] ]
x_test = [ [ [ 1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1] ] ]
y_test = [ [ [-1], [-1], [-1], [-1] ] ]
y_train = [ [ [0], [0], [1], [-1] ],
[ [1], [0], [1], [-1] ],
[ [0], [0], [0], [0] ],
[ [0], [1], [0], [0] ] ]
Q_jk_initialize = np.random.rand(3,2)
Q_jk_initialize = np.array([[1, 0], [0, 1], [1, 1]])
obj = DAFM(np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test), Q_jk_initialize, skills=2, steps=3)
model = obj.build(qtrainable=False, finetuning=False, loaded=False, dftype="")
obj.predict(np.array(x_test), np.array(y_test), model)
|
[
"numpy.random.rand",
"keras.backend.sum",
"keras.initializers.Identity",
"keras.backend.cast_to_floatx",
"numpy.log",
"numpy.array",
"sys.exit",
"keras.layers.Dense",
"numpy.mean",
"keras.layers.merge.multiply",
"numpy.reshape",
"keras.layers.merge.concatenate",
"keras.initializers.Ones",
"keras.backend.ones_like",
"numpy.random.seed",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"keras.optimizers.Adagrad",
"keras.backend.exp",
"keras.backend.sigmoid",
"numpy.round",
"numpy.random.permutation",
"keras.optimizers.Adam",
"keras.backend.cast",
"keras.initializers.Zeros",
"numpy.shape",
"keras.backend.random_uniform",
"keras.backend.mean",
"keras.backend.binary_crossentropy",
"keras.layers.Input",
"numpy.sum",
"keras.backend.random_normal",
"keras.optimizers.RMSprop"
] |
[((583, 601), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (597, 601), True, 'import numpy as np\n'), ((27585, 27605), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)'], {}), '(3, 2)\n', (27599, 27605), True, 'import numpy as np\n'), ((27627, 27661), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [1, 1]]'], {}), '([[1, 0], [0, 1], [1, 1]])\n', (27635, 27661), True, 'import numpy as np\n'), ((845, 871), 'keras.backend.cast', 'K.cast', (['b'], {'dtype': '"""float32"""'}), "(b, dtype='float32')\n", (851, 871), True, 'from keras import backend as K\n'), ((976, 1004), 'keras.backend.cast', 'K.cast', (['ans'], {'dtype': '"""float32"""'}), "(ans, dtype='float32')\n", (982, 1004), True, 'from keras import backend as K\n'), ((1020, 1030), 'keras.backend.sum', 'K.sum', (['ans'], {}), '(ans)\n', (1025, 1030), True, 'from keras import backend as K\n'), ((1401, 1439), 'keras.backend.cast_to_floatx', 'K.cast_to_floatx', (['self.Q_jk_initialize'], {}), '(self.Q_jk_initialize)\n', (1417, 1439), True, 'from keras import backend as K\n'), ((4977, 5034), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, 1)', 'name': '"""virtual_input1"""'}), "(batch_shape=(None, None, 1), name='virtual_input1')\n", (4982, 5034), False, 'from keras.layers import Input, Dense\n'), ((6097, 6154), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, steps)', 'name': '"""step_input"""'}), "(batch_shape=(None, None, steps), name='step_input')\n", (6102, 6154), False, 'from keras.layers import Input, Dense\n'), ((8137, 8158), 'keras.layers.merge.multiply', 'multiply', (['[Q_jk, B_k]'], {}), '([Q_jk, B_k])\n', (8145, 8158), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((8596, 8622), 'keras.layers.merge.multiply', 'multiply', (['[Q_jk, P_k, T_k]'], {}), '([Q_jk, P_k, T_k])\n', (8604, 8622), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((8846, 8898), 'keras.layers.merge.concatenate', 'concatenate', (['[bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk]'], {}), '([bias_layer, sum_Qjk_Bk, sum_Qjk_Pk_Tk])\n', (8857, 8898), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((26990, 27003), 'numpy.mean', 'np.mean', (['rmse'], {}), '(rmse)\n', (26997, 27003), True, 'import numpy as np\n'), ((27677, 27694), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (27685, 27694), True, 'import numpy as np\n'), ((27696, 27713), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (27704, 27713), True, 'import numpy as np\n'), ((27715, 27731), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (27723, 27731), True, 'import numpy as np\n'), ((27733, 27749), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (27741, 27749), True, 'import numpy as np\n'), ((27886, 27902), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (27894, 27902), True, 'import numpy as np\n'), ((27904, 27920), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (27912, 27920), True, 'import numpy as np\n'), ((943, 961), 'keras.backend.mean', 'K.mean', (['b'], {'axis': '(-1)'}), '(b, axis=-1)\n', (949, 961), True, 'from keras import backend as K\n'), ((1549, 1604), 'keras.backend.random_normal', 'K.random_normal', (['shape', '(0.5)', '(0.05)'], {'dtype': 'dtype', 'seed': '(22)'}), '(shape, 0.5, 0.05, dtype=dtype, seed=22)\n', (1564, 1604), True, 'from keras import backend as K\n'), ((1638, 1689), 'keras.backend.random_uniform', 'K.random_uniform', (['shape', '(0)', '(1)'], {'dtype': 'dtype', 'seed': '(22)'}), '(shape, 0, 1, dtype=dtype, seed=22)\n', (1654, 1689), True, 'from keras import backend as K\n'), ((2095, 2120), 'numpy.shape', 'np.shape', (['Q_jk_initialize'], {}), '(Q_jk_initialize)\n', (2103, 2120), True, 'import numpy as np\n'), ((2140, 2165), 'numpy.shape', 'np.shape', (['Q_jk_initialize'], {}), '(Q_jk_initialize)\n', (2148, 2165), True, 'import numpy as np\n'), ((4746, 4814), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, section_count)', 'name': '"""section_input"""'}), "(batch_shape=(None, None, section_count), name='section_input')\n", (4751, 4814), False, 'from keras.layers import Input, Dense\n'), ((4882, 4950), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, student_count)', 'name': '"""student_input"""'}), "(batch_shape=(None, None, student_count), name='student_input')\n", (4887, 4950), False, 'from keras.layers import Input, Dense\n'), ((9315, 9348), 'keras.layers.merge.concatenate', 'concatenate', (['[Concatenate, theta]'], {}), '([Concatenate, theta])\n', (9326, 9348), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((9746, 9777), 'keras.layers.merge.concatenate', 'concatenate', (['[Concatenate, S_k]'], {}), '([Concatenate, S_k])\n', (9757, 9777), False, 'from keras.layers.merge import multiply, concatenate, add\n'), ((10047, 10139), 'keras.models.Model', 'Model', ([], {'inputs': '[virtual_input1, step_input, section_input, student_input]', 'outputs': 'output'}), '(inputs=[virtual_input1, step_input, section_input, student_input],\n outputs=output)\n', (10052, 10139), False, 'from keras.models import Model\n'), ((10585, 10621), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (10603, 10621), False, 'from keras import optimizers\n'), ((10630, 10663), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (10645, 10663), False, 'from keras import optimizers\n'), ((10675, 10711), 'keras.optimizers.Adagrad', 'optimizers.Adagrad', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (10693, 10711), False, 'from keras import optimizers\n'), ((11539, 11580), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""loss"""', 'patience': '(2)'}), "(monitor='loss', patience=2)\n", (11552, 11580), False, 'from keras.callbacks import EarlyStopping\n'), ((812, 831), 'keras.backend.ones_like', 'K.ones_like', (['y_true'], {}), '(y_true)\n', (823, 831), True, 'from keras import backend as K\n'), ((893, 930), 'keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (914, 930), True, 'from keras import backend as K\n'), ((1791, 1811), 'numpy.reshape', 'np.reshape', (['x', 'shape'], {}), '(x, shape)\n', (1801, 1811), True, 'import numpy as np\n'), ((10217, 10290), 'keras.models.Model', 'Model', ([], {'inputs': '[virtual_input1, step_input, section_input]', 'outputs': 'output'}), '(inputs=[virtual_input1, step_input, section_input], outputs=output)\n', (10222, 10290), False, 'from keras.models import Model\n'), ((13136, 13175), 'numpy.random.permutation', 'np.random.permutation', (['x_train.shape[0]'], {}), '(x_train.shape[0])\n', (13157, 13175), True, 'import numpy as np\n'), ((16067, 16076), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (16073, 16076), True, 'import numpy as np\n'), ((16237, 16246), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (16243, 16246), True, 'import numpy as np\n'), ((17303, 17342), 'numpy.random.permutation', 'np.random.permutation', (['x_train.shape[0]'], {}), '(x_train.shape[0])\n', (17324, 17342), True, 'import numpy as np\n'), ((19802, 19811), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (19808, 19811), True, 'import numpy as np\n'), ((19935, 19944), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (19941, 19944), True, 'import numpy as np\n'), ((20074, 20097), 'numpy.sum', 'np.sum', (['x_train'], {'axis': '(2)'}), '(x_train, axis=2)\n', (20080, 20097), True, 'import numpy as np\n'), ((20143, 20160), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (20151, 20160), True, 'import numpy as np\n'), ((20165, 20182), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (20173, 20182), True, 'import numpy as np\n'), ((25539, 25561), 'numpy.sum', 'np.sum', (['x_test'], {'axis': '(2)'}), '(x_test, axis=2)\n', (25545, 25561), True, 'import numpy as np\n'), ((25607, 25623), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (25615, 25623), True, 'import numpy as np\n'), ((25628, 25644), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (25636, 25644), True, 'import numpy as np\n'), ((26337, 26359), 'numpy.sum', 'np.sum', (['x_test'], {'axis': '(2)'}), '(x_test, axis=2)\n', (26343, 26359), True, 'import numpy as np\n'), ((26405, 26421), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (26413, 26421), True, 'import numpy as np\n'), ((26426, 26442), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (26434, 26442), True, 'import numpy as np\n'), ((1206, 1219), 'keras.backend.exp', 'K.exp', (['(-a * x)'], {}), '(-a * x)\n', (1211, 1219), True, 'from keras import backend as K\n'), ((5675, 5741), 'keras.layers.Dense', 'Dense', (['skills'], {'activation': '"""linear"""', 'use_bias': '(False)', 'trainable': '(True)'}), "(skills, activation='linear', use_bias=False, trainable=True)\n", (5680, 5741), False, 'from keras.layers import Input, Dense\n'), ((5805, 5871), 'keras.layers.Dense', 'Dense', (['skills'], {'activation': '"""linear"""', 'use_bias': '(False)', 'trainable': '(True)'}), "(skills, activation='linear', use_bias=False, trainable=True)\n", (5810, 5871), False, 'from keras.layers import Input, Dense\n'), ((8399, 8422), 'keras.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (8420, 8422), False, 'from keras import initializers\n'), ((8446, 8469), 'keras.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (8467, 8469), False, 'from keras import initializers\n'), ((10384, 10457), 'keras.models.Model', 'Model', ([], {'inputs': '[virtual_input1, step_input, student_input]', 'outputs': 'output'}), '(inputs=[virtual_input1, step_input, student_input], outputs=output)\n', (10389, 10457), False, 'from keras.models import Model\n'), ((10492, 10550), 'keras.models.Model', 'Model', ([], {'inputs': '[virtual_input1, step_input]', 'outputs': 'output'}), '(inputs=[virtual_input1, step_input], outputs=output)\n', (10497, 10550), False, 'from keras.models import Model\n'), ((11351, 11368), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (11359, 11368), True, 'import numpy as np\n'), ((11373, 11390), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (11381, 11390), True, 'import numpy as np\n'), ((11438, 11454), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (11446, 11454), True, 'import numpy as np\n'), ((11459, 11475), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (11467, 11475), True, 'import numpy as np\n'), ((17571, 17588), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (17579, 17588), True, 'import numpy as np\n'), ((22381, 22397), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (22389, 22397), True, 'import numpy as np\n'), ((22402, 22418), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (22410, 22418), True, 'import numpy as np\n'), ((23230, 23246), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (23238, 23246), True, 'import numpy as np\n'), ((23251, 23267), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (23259, 23267), True, 'import numpy as np\n'), ((6248, 6347), 'keras.layers.Dense', 'Dense', (['q_jk_size'], {'use_bias': '(False)', 'activation': 'activation', 'kernel_initializer': 'self.custom_random'}), '(q_jk_size, use_bias=False, activation=activation, kernel_initializer=\n self.custom_random)\n', (6253, 6347), False, 'from keras.layers import Input, Dense\n'), ((7276, 7393), 'keras.layers.Dense', 'Dense', (['skills'], {'activation': 'activation_dense', 'use_bias': '(False)', 'kernel_initializer': 'self.custom_random', 'trainable': '(True)'}), '(skills, activation=activation_dense, use_bias=False,\n kernel_initializer=self.custom_random, trainable=True)\n', (7281, 7393), False, 'from keras.layers import Input, Dense\n'), ((8262, 8281), 'keras.initializers.Ones', 'initializers.Ones', ([], {}), '()\n', (8279, 8281), False, 'from keras import initializers\n'), ((8729, 8748), 'keras.initializers.Ones', 'initializers.Ones', ([], {}), '()\n', (8746, 8748), False, 'from keras import initializers\n'), ((9213, 9258), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""', 'use_bias': '(False)'}), "(1, activation='linear', use_bias=False)\n", (9218, 9258), False, 'from keras.layers import Input, Dense\n'), ((9646, 9691), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""', 'use_bias': '(False)'}), "(1, activation='linear', use_bias=False)\n", (9651, 9691), False, 'from keras.layers import Input, Dense\n'), ((9879, 9898), 'keras.initializers.Ones', 'initializers.Ones', ([], {}), '()\n', (9896, 9898), False, 'from keras import initializers\n'), ((21408, 21425), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (21416, 21425), True, 'import numpy as np\n'), ((21430, 21447), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (21438, 21447), True, 'import numpy as np\n'), ((24630, 24646), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (24638, 24646), True, 'import numpy as np\n'), ((24651, 24667), 'numpy.shape', 'np.shape', (['x_test'], {}), '(x_test)\n', (24659, 24667), True, 'import numpy as np\n'), ((1317, 1329), 'keras.backend.sigmoid', 'K.sigmoid', (['x'], {}), '(x)\n', (1326, 1329), True, 'from keras import backend as K\n'), ((6007, 6027), 'keras.initializers.Zeros', 'initializers.Zeros', ([], {}), '()\n', (6025, 6027), False, 'from keras import initializers\n'), ((17479, 17496), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (17487, 17496), True, 'import numpy as np\n'), ((17501, 17518), 'numpy.shape', 'np.shape', (['x_train'], {}), '(x_train)\n', (17509, 17518), True, 'import numpy as np\n'), ((20643, 20660), 'numpy.log', 'np.log', (['y_pred[i]'], {}), '(y_pred[i])\n', (20649, 20660), True, 'import numpy as np\n'), ((20681, 20702), 'numpy.log', 'np.log', (['(1 - y_pred[i])'], {}), '(1 - y_pred[i])\n', (20687, 20702), True, 'import numpy as np\n'), ((21000, 21017), 'numpy.log', 'np.log', (['y_pred[i]'], {}), '(y_pred[i])\n', (21006, 21017), True, 'import numpy as np\n'), ((21038, 21059), 'numpy.log', 'np.log', (['(1 - y_pred[i])'], {}), '(1 - y_pred[i])\n', (21044, 21059), True, 'import numpy as np\n'), ((26147, 26164), 'numpy.log', 'np.log', (['y_pred[i]'], {}), '(y_pred[i])\n', (26153, 26164), True, 'import numpy as np\n'), ((26185, 26206), 'numpy.log', 'np.log', (['(1 - y_pred[i])'], {}), '(1 - y_pred[i])\n', (26191, 26206), True, 'import numpy as np\n'), ((4037, 4052), 'numpy.shape', 'np.shape', (['Q_jk1'], {}), '(Q_jk1)\n', (4045, 4052), True, 'import numpy as np\n'), ((4066, 4081), 'numpy.round', 'np.round', (['Q_jk1'], {}), '(Q_jk1)\n', (4074, 4081), True, 'import numpy as np\n'), ((4674, 4684), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4682, 4684), False, 'import sys\n'), ((7809, 7832), 'keras.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (7830, 7832), False, 'from keras import initializers\n'), ((8017, 8040), 'keras.initializers.Identity', 'initializers.Identity', ([], {}), '()\n', (8038, 8040), False, 'from keras import initializers\n'), ((4092, 4107), 'numpy.shape', 'np.shape', (['Q_jk1'], {}), '(Q_jk1)\n', (4100, 4107), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.animation as animation
class animacija2D:
def __init__(self, f, xInterval, yInterval, fN=20):
""" Priprava grafa in skiciranje funkcije. """
self.f = f
self.xlim = xInterval
self.ylim = yInterval
self.fN = fN
self.runs = []
x = np.linspace(self.xlim[0], self.xlim[1], 30)
y = np.linspace(self.ylim[0], self.ylim[1], 30)
X, Y = np.meshgrid(x, y)
fxy = np.zeros(X.shape)
for i in range(len(fxy)):
for j in range(len(fxy[0])):
fxy[i,j] = self.f([X[i,j], Y[i,j]])
self.fig = plt.figure()
self.ax = self.fig.add_subplot(projection='3d')
self.ax.plot_surface(X, Y, fxy, cmap=cm.coolwarm, linewidth=0, antialiased=False, alpha=0.5)
self.ax.set_xlabel('x')
self.ax.set_ylabel('y')
self.ax.set_zlabel('f(x,y)')
self.ax.set_xlim(self.xlim)
self.ax.set_ylim(self.ylim)
zlim = [np.amin(fxy), np.amax(fxy)]
self.zlim = (zlim[0]-0.1*abs(zlim[1]-zlim[0]), zlim[1]+0.1*abs(zlim[1]-zlim[0]))
self.ax.set_zlim(self.zlim)
def racunaj(self, metoda, x0, y0, par, N=10, eps=1e-3, konv=False):
""" Priročna funkcija za iteriranje oz. večkratno korakanje.
Funkcija se lahko uporablja za končno število korakov: konv = False,
ali pa dokler ne konvergira za dano vrednost eps: konv = True """
tabPoints = []
count = 0
if konv:
minimum = self.f([x0, y0])
while True and count < 1000:
xN, yN, par = metoda(self.f, x0, y0, par)
tabPoints.append( [x0, y0, self.f([x0, y0])] )
x0 = xN
y0 = yN
fxyN = self.f([x0, y0])
if abs(minimum-fxyN) < eps: break
minimum = min(minimum, fxyN)
count += 1
else:
for i in range(N+1):
xN, yN, par = metoda(self.f, x0, y0, par)
tabPoints.append( [x0, y0, self.f([x0, y0])] )
x0 = xN
y0 = yN
count += 1
self.runs.append( tabPoints )
print((x0, y0), self.f([x0, y0]), count)
return x0, y0
def zacetekAnimacije(self):
""" Podmetoda za zacetek animacije. """
self.fig.suptitle("0")
self.artists = []
artists = []
for j in range(len(self.runs)):
sc, = self.ax.plot( self.runs[j][0][0], self.runs[j][0][1], self.runs[j][0][2], linestyle="", marker="o" )
self.artists.append( sc )
artists.append(sc)
return artists
def animiraj(self, i):
""" Podmetoda za animiranje. """
self.fig.suptitle(str(i))
artists = []
for j in range(len(self.runs)):
col = self.artists[j].get_color()
if i == len(self.runs[j])-1:
vline = self.ax.plot([self.runs[j][-1][0],self.runs[j][-1][0]], [self.runs[j][-1][1],self.runs[j][-1][1]], [self.zlim[0], self.zlim[1]], linestyle="--", color=col)
artists.append(vline)
elif i >= len(self.runs[j]): continue
if self.verbose == 0:
self.artists[j].set_data( self.runs[j][i][0], self.runs[j][i][1])
self.artists[j].set_3d_properties( self.runs[j][i][2] )
artists.append( self.artists[j] )
elif self.verbose == 1:
arw = self.ax.quiver( self.runs[j][i-1][0], self.runs[j][i-1][1], self.runs[j][i-1][2], self.runs[j][i][0]-self.runs[j][i-1][0], self.runs[j][i][1]-self.runs[j][i-1][1], self.runs[j][i][2]-self.runs[j][i-1][2], color=col)
self.artists.append( arw )
artists.append(arw)
return artists
def maxIteration(self):
""" Podmetoda za izračun števila slik. """
maxN = 0
for i in range(len(self.runs)):
maxN = max(maxN, len(self.runs[i]))
return maxN
def narisi(self, casAnimacije=500, verbose=0, save=False):
""" Funkcija za risanje animacij. """
self.verbose = verbose
ani = animation.FuncAnimation(self.fig, self.animiraj, np.arange(1, self.maxIteration()), interval=casAnimacije, init_func=self.zacetekAnimacije, repeat=False)
if save != False: ani.save(save+".gif", dpi=80, writer="imagemagick")
plt.show()
|
[
"numpy.amin",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.meshgrid",
"numpy.amax",
"matplotlib.pyplot.show"
] |
[((386, 429), 'numpy.linspace', 'np.linspace', (['self.xlim[0]', 'self.xlim[1]', '(30)'], {}), '(self.xlim[0], self.xlim[1], 30)\n', (397, 429), True, 'import numpy as np\n'), ((442, 485), 'numpy.linspace', 'np.linspace', (['self.ylim[0]', 'self.ylim[1]', '(30)'], {}), '(self.ylim[0], self.ylim[1], 30)\n', (453, 485), True, 'import numpy as np\n'), ((501, 518), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (512, 518), True, 'import numpy as np\n'), ((533, 550), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (541, 550), True, 'import numpy as np\n'), ((698, 710), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (708, 710), True, 'import matplotlib.pyplot as plt\n'), ((4477, 4487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4485, 4487), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1070), 'numpy.amin', 'np.amin', (['fxy'], {}), '(fxy)\n', (1065, 1070), True, 'import numpy as np\n'), ((1072, 1084), 'numpy.amax', 'np.amax', (['fxy'], {}), '(fxy)\n', (1079, 1084), True, 'import numpy as np\n')]
|
'''
本模块用于数据预处理
This module is used for data preproccessing
'''
import numpy as np
from maysics.utils import e_distances
from matplotlib import pyplot as plt
plt.rcParams['font.sans-serif'] = ['FangSong']
plt.rcParams['axes.unicode_minus'] = False
from io import BytesIO
from lxml import etree
import base64
import math
def _rc(arg):
cov_mat = np.cov(arg)
var_mat = np.diagonal(cov_mat)**0.5
var_mat[var_mat == 0] = 1
for i in range(cov_mat.shape[0]):
cov_mat[i] /= var_mat[i]
cov_mat[:, i] /= var_mat[i]
return cov_mat
def _preview_process(data, value_round):
'''
预览处理
'''
data = np.array(data, dtype=float)
name_list = ['平均值', '中位数', '方差', '标准差', '最大值', '最小值', '偏度', '峰度']
value_list = []
mean_ = data.mean(axis=0)
value_list.append(np.round(mean_, value_round))
value_list.append(np.round(np.median(data, axis=0), value_round))
value_list.append(np.round(data.var(axis=0), value_round))
value_list.append(np.round(data.std(axis=0), value_round))
value_list.append(np.round(data.max(axis=0), value_round))
value_list.append(np.round(data.min(axis=0), value_round))
value_list.append(np.round(((data - mean_)**3).mean(axis=0), value_round))
value_list.append(np.round(((data - mean_)**4).mean(axis=0), value_round))
value_list = np.array(value_list).flatten()
style = '''
<style>
table{
border-collapse: collapse;
}
table, table tr td {
border:1px solid #ccc;
}
table tr td{
padding: 5px 10px;
}
</style>
'''
table = '<h2 style="padding-left:50px; border-top:1px solid #ccc">数值特征</h2>' + style + '<table align="center"><caption></caption>'
for i in range(8):
table += '<tr><td>' + name_list[i] + '</td>' + '<td>%s</td>' * data.shape[1] + '</tr>'
table = '<h1 style="padding-left:50px;">数据信息</h1>' + table % tuple(value_list) + '</table>'
data = np.ascontiguousarray(data.T)
num = data.shape[0]
plt.figure(figsize=(9, 3 * num))
for i in range(num):
q1, q2, q3 = np.percentile(data[i], [25, 50, 75])
plt.scatter(mean_[i], i+1, marker='o', color='white', s=30, zorder=3)
plt.hlines(i+1, q1, q3, color='k', linestyle='-', lw=1)
bx = plt.violinplot(data.tolist(), showextrema=False, vert=False)
plt.title('分布图')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = 'data:image/png;base64,' + ims
im1 = '<div align="center"><img src="%s"></div>' % imd
im1 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">密度分布</h2>' + im1
cov_mat = _rc(data)
matrix = '<table border="0"><caption></caption>'
for i in range(num):
matrix += '<tr>' + '<td>%s</td>' * num + '</tr>'
matrix = matrix % tuple(np.round(cov_mat.flatten(), value_round)) + '</table>'
plt.figure(figsize=(8, 8))
plt.matshow(cov_mat, fignum=0, cmap='Blues')
plt.colorbar()
plt.title('相关系数图')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = 'data:image/png;base64,' + ims
im2 = '<div style="display:flex;flex-direction:row;vertical-align:middle;justify-content:center;width:100%;height:80vh"><div style="margin:auto 0;white-space:pre-wrap;max-width:50%">'
im2 = im2 +'相关矩阵:'+ matrix + '</div><img style="object-fit:contain;max-width:45%;max-height:80vh" src="{}"/></div>'.format(imd)
im2 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">相关性</h2>' + im2
plt.figure(figsize=(2.5 * num, 2.5 * num))
for i in range(num * num):
ax = plt.subplot(num, num, i+1)
ax.plot(data[i//num], data[i%num], 'o')
buffer = BytesIO()
plt.savefig(buffer)
plt.close()
plot_data = buffer.getvalue()
imb = base64.b64encode(plot_data)
ims = imb.decode()
imd = "data:image/png;base64," + ims
im3 = '<div align="center"><img src="%s"></div>' % imd
im3 = '<br></br><h2 style="padding-left:50px; border-top:1px solid #ccc">散点关系</h2>' + im3
return '<title>数据信息预览</title>' + table + im1 + im2 + im3
def preview_file(filename, data, value_round=3):
'''
生成数据预览报告的html文件
参数
----
filename:字符串类型,文件名
data:二维数组,数据
value_round:整型,数字特征保留的小数点后的位数
Generate preview report with html file
Parameters
----------
filename: str, file name
data: 2-D array, data
value_round: int, the number of digits after the decimal point retained by numeric features
'''
root = _preview_process(data=data, value_round=value_round)
html = etree.HTML(root)
tree = etree.ElementTree(html)
tree.write(filename)
def preview(data, value_round=3):
'''
在jupyter中显示数据预览报告
参数
----
data:二维数组,数据
value_round:整型,数字特征保留的小数点后的位数
Display preview report in jupyter
Parameters
----------
data: 2-D array, data
value_round: int, the number of digits after the decimal point retained by numeric features
'''
root = _preview_process(data=data, value_round=value_round)
from IPython.core.display import display, HTML
display(HTML(root))
def length_pad(seq, maxlen=None, value=0, padding='pre', dtype=float):
'''
填充二维列表,使得每行长度都为maxlen
参数
----
seq:二维列表,需要填充的对象
maxlen:整型,可选,每行的最大长度,默认为原二维列表最大的长度
value:数类型,可选,填充值,默认为0
padding:字符串类型,可选,填充位置,'pre'代表从前面填充,'post'代表从后面填充,默认为'pre'
dtype:可选,输出的元素类型,默认为float
返回
----
二维ndarray
Pad the 2-D list so that every row is 'maxlen' in length
Parameters
----------
seq: 2-D list, objects that need to be padded
maxlen: int, callable, the maximum length of each row, default = the maximum length of the original 2-D list
value: num, callable, padding value, default=0
padding: str, callable, padding location, 'pre' means padding from the front and 'post' from the back, default='pre'
dtype: callable, the element type of the output, default=float
Return
------
2-D ndarray
'''
seq = list(seq)
if not maxlen:
maxlen = 0
for i in seq:
if len(i) > maxlen:
maxlen = len(i)
if padding == 'pre':
for i in range(len(seq)):
if maxlen > len(seq[i]):
seq[i] = [value] * (maxlen - len(seq[i])) + seq[i]
elif maxlen < len(seq[i]):
seq[i] = seq[i][-1 * maxlen:]
elif padding == 'post':
for i in range(len(seq)):
if maxlen > len(seq[i]):
seq[i] += [value] * (maxlen - len(seq[i]))
elif maxlen < len(seq[i]):
seq[i] = seq[i][:maxlen]
return np.array(seq, dtype=dtype)
def sample_pad(data, index=0, padding=None):
'''
对二维数据进行样本填充
先对data中的每个二维数据进行遍历,以各个index列的值作为全集,再对data的每个二维数据进行填充
如:data1 = [[0, 1],
[1, 2],
[2, 3]]
data2 = [[2, 3],
[3, 4],
[4, 5]]
data = (data1, data2)
则得到输出:
output = [array([[0, 1],
[1, 2],
[2, 3],
[3, nan],
[4, nan]]),
array([[0, nan],
[1,nan],
[2, 3],
[3, 4],
[4, 5]])]
data:元组或列表类型,数据
index:整型,作为扩充全集的标准列的索引
padding:填充值,可选,默认为None
Sample filling for 2D data
Values of each index column will be taken as the complete set, then each two-dimensional data of data is padded
e.g. data1 = [[0, 1],
[1, 2],
[2, 3]]
data2 = [[2, 3],
[3, 4],
[4, 5]]
data = (data1, data2)
output = [array([[0, 1],
[1, 2],
[2, 3],
[3, nan],
[4, nan]]),
array([[0, nan],
[1,nan],
[2, 3],
[3, 4],
[4, 5]])]
data: tuple or list, data
index: int, the index of a standard column as an extended complete set
padding: padding value, optional, default=None
'''
time_set = set()
result = []
if not padding:
padding = [np.nan] * (len(data[0][0]) - 1)
else:
padding = list([padding])
for i in range(len(data)):
data_part = np.array(data[i], dtype=np.object)
result.append(data_part)
time_set = time_set | set(data_part[:, index])
for i in range(len(result)):
different_set_list = np.array([list(time_set - set(result[i][:, index]))], dtype=np.object).T
num = len(different_set_list)
padding_new = np.array(padding * num, dtype=np.object).reshape(num, -1)
different_set_list = np.hstack((padding_new[:, :index], different_set_list, padding_new[:, index:]))
result[i] = np.vstack((result[i], different_set_list))
return result
def shuffle(*arg):
'''
打乱一个序列或以相同方法打乱多个序列
返回
----
一个ndarray
Shuffle a sequence or shuffle multiple sequences in the same way
Return
------
a ndarray
'''
state = np.random.get_state()
a_new_list = []
for li in arg:
np.random.set_state(state)
np.random.shuffle(li)
a_new_list.append(li)
return np.array(a_new_list)
def data_split(data, targets, train_size=None, test_size=None, shuffle=True, random_state=None):
'''
分离数据
参数
----
data:数据
targets:指标
train_size:浮点数类型,可选,训练集占总数据量的比,取值范围为(0, 1],默认为0.75
test_size:浮点数类型,可选,测试集占总数据量的比,取值范围为[0, 1),当train_size被定义时,该参数无效
shuffle:布尔类型,可选,True表示打乱数据,False表示不打乱数据,默认为True
random_state:整型,可选,随机种子
返回
----
元组,(数据测试集, 指标测试集, 数据验证集, 指标验证集)
split the data
Parameters
----------
data: data
targets: targets
train_size: float, callable, ratio of training set to total data, value range is (0, 1], default=0.75
test_size: float, callable, ratio of test set to total data, value range is [0, 1)
shuffle: bool, callable, 'True' will shuffle the data, 'False' will not, default = True
random_state: int, callable, random seed
Return
------
tuple, (train_data, train_target, validation_data, validation_target)
'''
data = np.array(data)
targets = np.array(targets)
if not (train_size or test_size):
train_size = 0.75
elif test_size:
train_size = 1 - test_size
if train_size <= 0 or train_size > 1:
raise Exception("'train_size' should be in (0, 1], 'test_size' should be in [0, 1)")
if shuffle:
np.random.seed(random_state)
state = np.random.get_state()
np.random.shuffle(data)
np.random.set_state(state)
np.random.shuffle(targets)
num_of_data = len(data)
train_data = data[:int(num_of_data * train_size)]
train_target = targets[:int(num_of_data * train_size)]
validation_data = data[int(num_of_data * train_size):]
validation_target = targets[int(num_of_data * train_size):]
return train_data, train_target, validation_data, validation_target
def kfold(data, targets, n, k=5):
'''
参数
----
data:数据
targets:指标
n:整型,表示将第n折作为验证集,从0开始
k:整型,可选,k折验证的折叠数,默认k=5
返回
----
元组,(数据测试集, 指标测试集, 数据验证集, 指标验证集)
Parameters
----------
data: data
targets: targets
n: int, take the nth part as validation set, starting from 0
k: int, callable, the number of k-fold, default = 5
Return
------
tuple, (train_data, train_target, validation_data, validation_target)
'''
data = np.array(data)
targets = np.array(targets)
num_validation_samples = len(data) // k
validation_data = data[num_validation_samples * n:
num_validation_samples * (n + 1)]
validation_targets = targets[num_validation_samples * n:
num_validation_samples * (n + 1)]
train_data = np.concatenate([data[: num_validation_samples * n],
data[num_validation_samples * (n + 1):]])
train_targets = np.concatenate([targets[: num_validation_samples * n],
targets[num_validation_samples * (n + 1):]])
return train_data, train_targets, validation_data, validation_targets
def dataloader(data, targets, choose_rate=0.3, shuffle=True, random_state=None):
'''
数据随机生成器
参数
----
data:数据
targets:指标
choose_rate:浮点数类型,可选,生成率,即一次生成数据量在原数据量的占比,范围为[0, 1],默认为0.3
shuffle:布尔类型,可选,True表示打乱数据,False表示不打乱数据,默认为True
random_state:整型,可选,随机种子
返回
----
生成器
Data Random Generator
Parameters
----------
data: data
targets: targets
choose_rate: float, callable, generation rate (the proportion of data generated at one time in the original data) whose range is [0, 1], default=0.3
shuffle: bool, callable, 'True' will shuffle the data, 'False' will not, default = True
random_state: int, callable, random seed
Return
------
generator
'''
data = np.array(data)
targets = np.array(targets)
if shuffle:
np.random.seed(random_state)
state = np.random.get_state()
np.random.shuffle(data)
np.random.set_state(state)
np.random.shuffle(targets)
num = len(data)
choose_rate = int(num * choose_rate)
times = int(math.ceil(num / choose_rate))
for i in range(times):
loc_1 = i * choose_rate
loc_2 = (i + 1) * choose_rate
yield data[loc_1: loc_2], targets[loc_1: loc_2]
def standard(data, mean=True, var=True, index=None):
'''
标准化数据
z = (x - u) / s
z:新数据; x:原数据; u:均值; s:方差
如果某一列数据完全相同(即方差s=0),则该列数据全部归零
参数
----
data:2-D的ndarray数据
mean:布尔类型或ndarray,可选,布尔类型决定是否将均值调整为0,ndarray用于设定不同的均值
var:布尔类型或ndarray,可选,是否将方差调整为1,ndarray用于设定不同的方差
index:列表类型,可选,需要进行标准化的列的索引,默认为全部
返回
----
tuple
Standardize data
z = (x - u) / s
z: new data; x: origin data; u: mean value; s: variance
if data in one column are the same(s=0), data in this column will be turned to 0
Parameters
----------
data: 2-D ndarray
mean: bool or ndarray, callable, bool decides if adjust the mean value to 0, ndarray is used to set different means
var: bool or ndarray, callable, bool decides if adjust the variance to 0, ndarray is used to set different variances
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
2-D ndarray
'''
data=np.array(data, dtype=float)
if index:
if mean is True:
mean = data[:, index].mean(axis=0)
elif mean is None or mean is False:
mean = np.zeros(len(index))
data[:, index] -= mean
if not var is None and not var is False:
if var is True:
var = data[:, index].std(axis=0)
else:
var = np.array(var)
std_zero_indices = np.nonzero(var == 0)
std = var.copy()
std[std==0] = 1.0
data[:, index] /= std
if list(std_zero_indices[0]):
for i in std_zero_indices[0]:
data[:, index][:, i] *= 0
else:
std = 0
else:
if mean is True:
mean = data.mean(axis=0)
elif mean is None or mean is False:
mean = np.zeros(data.shape[1])
else:
mean = np.array(mean)
data -= mean
if not var is None and not var is False:
if var is True:
var = data.std(axis=0)
else:
var = np.array(var)
std_zero_indices = np.nonzero(var == 0)
std = var.copy()
std[std==0] = 1.0
data /= std
if list(std_zero_indices[0]):
for i in std_zero_indices[0]:
data[:, i] *= 0
else:
std = 0
return data, mean, std
def minmax(data, feature_range=(0, 1), min_max=None, index=None):
'''
归一化数据
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_new = X_std * (feature_range[1] - feature_range[0]) + feature_range[0]
参数
----
data:2-D的ndarray数据
feature_range:元组类型,可选,需要转换的范围,默认为(0, 1)
min_max:元组类型,可选,用于设定最大最小值
index:列表类型,可选,需要进行标准化的列的索引,默认为全部
返回
----
元组,(归一化后的数据, (最小值,最大值))
Normalize data
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_new = X_std * (feature_range[1] - feature_range[0]) + feature_range[0]
Parameters
----------
data: 2-D的ndarray数据
feature_range: tuple, callabel, final range of transformed data
min_max: tuple, callable, set min and max values
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
tuple, (Normalized data, (miniment, maximent))
'''
data=np.array(data, dtype=float)
if index:
if not min_max:
min_max = (data[:, index].min(axis=0), data[:, index].max(axis=0))
length = min_max[1] - min_max[0]
data[:, index] = (data[:, index] - min_max[0]) / length
data[:, index] = data[:, index] * (feature_range[1] - feature_range[0]) + feature_range[0]
else:
if not min_max:
min_max = (data.min(axis=0), data.max(axis=0))
length = min_max[1] - min_max[0]
data = (data - min_max[0]) / length
data = data * (feature_range[1] - feature_range[0]) + feature_range[0]
return data, min_max
def normalizer(data, index=None):
'''
使每个数据的模为1
参数
----
data:2-D的ndarray数据
index:列表形式,可选,需要进行标准化的列的索引,默认为全部
返回
----
2-D ndarray
Making the moduli of data equal 1
Parameters
----------
data: 2-D的ndarray数据
index: list, callable, index of columns need to be standardized, defalut to all
Return
------
2-D ndarray
'''
data = np.array(data, dtype=float)
if index:
distance_list = e_distances(data[:, index])
distance_list[distance_list == 0] = 1
data[:, index] /= np.array([distance_list]).T
else:
distance_list = e_distances(data)
distance_list[distance_list == 0] = 1
data /= np.array([distance_list]).T
return data
def pca(data, n=None, eig_vector=None):
'''
主成分分析
参数
----
data:二维数组,数据
n:整型或浮点数类型,可选,当n >= 1时,表示降至n维,当0< n < 1时,表示降维至累计方差贡献率 >= n,默认不降维
eig_vector:元组类型,可选,用于设定不同的特征向量,当设置该参数时,累计方差贡献率无意义,默认为None
返回
----
元组,(转换后的数据, (累计方差贡献率, 各主成分方差, 各主成分方差贡献率), 特征向量)
Principal Component Analysis
Parameters
----------
data:2-D array, data
n: int or float, callable, when n > = 1, it means to reduce the dimension to n; when 0 < n < 1, it means to reduce the dimension to cumulative variance ratio > = n, and it is not reduced by default
eig_value_vector: tuple, callable, set different eigenvectors, when this parameter is set, the cumulative variance ratio is meaningless, default=None
Return
------
tuple, (transformed data, (cumulative variance ratio, variance, variance ratio), eigenvectors)
'''
data = np.array(data, dtype=float)
if eig_vector is None or eig_vector is False:
cov_mat = np.cov(data.T)
eig_value, eig_vector = np.linalg.eig(cov_mat)
sort_index = np.flipud(np.argsort(eig_value))
eig_value = eig_value[sort_index]
eig_vector = eig_vector[:, sort_index]
eig_ratio = eig_value / eig_value.sum()
contri = 0
if not n is None and not n is False:
if n >= 1:
n = int(n)
contri = eig_ratio[:n].sum()
elif n < 1:
for i in range(eig_value.shape[0]):
contri += eig_ratio[i]
if contri >= n:
n = i + 1
break
eig_value = eig_value[:n]
eig_ratio = eig_ratio[:n]
eig_vector = eig_vector[:, :n]
else:
contri = 1
else:
contri = None
eig_value = None
eig_ratio = None
data = np.dot(data, eig_vector)
return data, (contri, eig_value, eig_ratio), eig_vector
class RC():
'''
相关系数
参数
----
*arg:列表类型
属性
----
rc_mat:相关系数矩阵
correlation coefficient
Parameter
---------
*arg: list
Attribute
---------
rc_mat: correlation coefficient matrix
'''
def __init__(self, *arg):
arg = np.array(arg, dtype=float)
if len(arg.shape) != 2:
raise Exception("Input list should be 1-D.")
else:
self.rc_mat = _rc(arg)
def __img_process(self, index, cmap):
plt.matshow(self.rc_mat, cmap=cmap)
plt.colorbar()
if index:
n_list = range(len(index))
plt.xticks(n_list, index)
plt.yticks(n_list, index)
def show(self, index=None, cmap='Blues'):
'''
作图并显示
参数
----
index:列表形式,可选,各数组名称
cmap:字符串形式,可选,颜色板,默认为'Blues'
Display the image
Parameters
----------
index: list, callable, names of each array
cmap: str, callable, color board, default='Blues'
'''
self.__img_process(index=index, cmap=cmap)
plt.show()
def savefig(self, filename, index=None, cmap='Blues'):
'''
作图并保存
参数
----
filename:字符串形式,文件名
index:列表形式,可选,各数组名称
cmap:字符串形式,可选,颜色板,默认为'Blues'
Save the image
Parameters
----------
filename: str, file name
index: list, callable, names of each array
cmap: str, callable, color board, default='Blues'
'''
self.__img_process(index=index, cmap=cmap)
plt.savefig(filename)
|
[
"numpy.random.get_state",
"numpy.random.set_state",
"numpy.hstack",
"base64.b64encode",
"io.BytesIO",
"numpy.ascontiguousarray",
"maysics.utils.e_distances",
"numpy.array",
"numpy.argsort",
"lxml.etree.HTML",
"numpy.cov",
"lxml.etree.ElementTree",
"IPython.core.display.HTML",
"matplotlib.pyplot.close",
"numpy.dot",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"numpy.concatenate",
"matplotlib.pyplot.scatter",
"numpy.random.seed",
"numpy.round",
"numpy.diagonal",
"matplotlib.pyplot.savefig",
"numpy.linalg.eig",
"matplotlib.pyplot.xticks",
"numpy.nonzero",
"matplotlib.pyplot.title",
"matplotlib.pyplot.matshow",
"matplotlib.pyplot.show",
"numpy.median",
"math.ceil",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.percentile",
"matplotlib.pyplot.subplot",
"numpy.random.shuffle"
] |
[((350, 361), 'numpy.cov', 'np.cov', (['arg'], {}), '(arg)\n', (356, 361), True, 'import numpy as np\n'), ((647, 674), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (655, 674), True, 'import numpy as np\n'), ((1968, 1996), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data.T'], {}), '(data.T)\n', (1988, 1996), True, 'import numpy as np\n'), ((2025, 2057), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 3 * num)'}), '(figsize=(9, 3 * num))\n', (2035, 2057), True, 'from matplotlib import pyplot as plt\n'), ((2357, 2373), 'matplotlib.pyplot.title', 'plt.title', (['"""分布图"""'], {}), "('分布图')\n", (2366, 2373), True, 'from matplotlib import pyplot as plt\n'), ((2392, 2401), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2399, 2401), False, 'from io import BytesIO\n'), ((2406, 2425), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {}), '(buffer)\n', (2417, 2425), True, 'from matplotlib import pyplot as plt\n'), ((2430, 2441), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2439, 2441), True, 'from matplotlib import pyplot as plt\n'), ((2486, 2513), 'base64.b64encode', 'base64.b64encode', (['plot_data'], {}), '(plot_data)\n', (2502, 2513), False, 'import base64\n'), ((2997, 3023), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3007, 3023), True, 'from matplotlib import pyplot as plt\n'), ((3028, 3072), 'matplotlib.pyplot.matshow', 'plt.matshow', (['cov_mat'], {'fignum': '(0)', 'cmap': '"""Blues"""'}), "(cov_mat, fignum=0, cmap='Blues')\n", (3039, 3072), True, 'from matplotlib import pyplot as plt\n'), ((3077, 3091), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3089, 3091), True, 'from matplotlib import pyplot as plt\n'), ((3096, 3114), 'matplotlib.pyplot.title', 'plt.title', (['"""相关系数图"""'], {}), "('相关系数图')\n", (3105, 3114), True, 'from matplotlib import pyplot as plt\n'), ((3133, 3142), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3140, 3142), False, 'from io import BytesIO\n'), ((3147, 3166), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {}), '(buffer)\n', (3158, 3166), True, 'from matplotlib import pyplot as plt\n'), ((3171, 3182), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3180, 3182), True, 'from matplotlib import pyplot as plt\n'), ((3227, 3254), 'base64.b64encode', 'base64.b64encode', (['plot_data'], {}), '(plot_data)\n', (3243, 3254), False, 'import base64\n'), ((3746, 3788), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.5 * num, 2.5 * num)'}), '(figsize=(2.5 * num, 2.5 * num))\n', (3756, 3788), True, 'from matplotlib import pyplot as plt\n'), ((3926, 3935), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3933, 3935), False, 'from io import BytesIO\n'), ((3940, 3959), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buffer'], {}), '(buffer)\n', (3951, 3959), True, 'from matplotlib import pyplot as plt\n'), ((3964, 3975), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3973, 3975), True, 'from matplotlib import pyplot as plt\n'), ((4020, 4047), 'base64.b64encode', 'base64.b64encode', (['plot_data'], {}), '(plot_data)\n', (4036, 4047), False, 'import base64\n'), ((4827, 4843), 'lxml.etree.HTML', 'etree.HTML', (['root'], {}), '(root)\n', (4837, 4843), False, 'from lxml import etree\n'), ((4855, 4878), 'lxml.etree.ElementTree', 'etree.ElementTree', (['html'], {}), '(html)\n', (4872, 4878), False, 'from lxml import etree\n'), ((6955, 6981), 'numpy.array', 'np.array', (['seq'], {'dtype': 'dtype'}), '(seq, dtype=dtype)\n', (6963, 6981), True, 'import numpy as np\n'), ((9637, 9658), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (9656, 9658), True, 'import numpy as np\n'), ((9804, 9824), 'numpy.array', 'np.array', (['a_new_list'], {}), '(a_new_list)\n', (9812, 9824), True, 'import numpy as np\n'), ((10799, 10813), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (10807, 10813), True, 'import numpy as np\n'), ((10828, 10845), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (10836, 10845), True, 'import numpy as np\n'), ((12167, 12181), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (12175, 12181), True, 'import numpy as np\n'), ((12196, 12213), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (12204, 12213), True, 'import numpy as np\n'), ((12529, 12626), 'numpy.concatenate', 'np.concatenate', (['[data[:num_validation_samples * n], data[num_validation_samples * (n + 1):]]'], {}), '([data[:num_validation_samples * n], data[\n num_validation_samples * (n + 1):]])\n', (12543, 12626), True, 'import numpy as np\n'), ((12676, 12779), 'numpy.concatenate', 'np.concatenate', (['[targets[:num_validation_samples * n], targets[num_validation_samples * (n +\n 1):]]'], {}), '([targets[:num_validation_samples * n], targets[\n num_validation_samples * (n + 1):]])\n', (12690, 12779), True, 'import numpy as np\n'), ((13671, 13685), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (13679, 13685), True, 'import numpy as np\n'), ((13700, 13717), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (13708, 13717), True, 'import numpy as np\n'), ((15202, 15229), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (15210, 15229), True, 'import numpy as np\n'), ((17639, 17666), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (17647, 17666), True, 'import numpy as np\n'), ((18714, 18741), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (18722, 18741), True, 'import numpy as np\n'), ((19987, 20014), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (19995, 20014), True, 'import numpy as np\n'), ((21017, 21041), 'numpy.dot', 'np.dot', (['data', 'eig_vector'], {}), '(data, eig_vector)\n', (21023, 21041), True, 'import numpy as np\n'), ((376, 396), 'numpy.diagonal', 'np.diagonal', (['cov_mat'], {}), '(cov_mat)\n', (387, 396), True, 'import numpy as np\n'), ((822, 850), 'numpy.round', 'np.round', (['mean_', 'value_round'], {}), '(mean_, value_round)\n', (830, 850), True, 'import numpy as np\n'), ((2104, 2140), 'numpy.percentile', 'np.percentile', (['data[i]', '[25, 50, 75]'], {}), '(data[i], [25, 50, 75])\n', (2117, 2140), True, 'import numpy as np\n'), ((2149, 2220), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mean_[i]', '(i + 1)'], {'marker': '"""o"""', 'color': '"""white"""', 's': '(30)', 'zorder': '(3)'}), "(mean_[i], i + 1, marker='o', color='white', s=30, zorder=3)\n", (2160, 2220), True, 'from matplotlib import pyplot as plt\n'), ((2227, 2284), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(i + 1)', 'q1', 'q3'], {'color': '"""k"""', 'linestyle': '"""-"""', 'lw': '(1)'}), "(i + 1, q1, q3, color='k', linestyle='-', lw=1)\n", (2237, 2284), True, 'from matplotlib import pyplot as plt\n'), ((3833, 3861), 'matplotlib.pyplot.subplot', 'plt.subplot', (['num', 'num', '(i + 1)'], {}), '(num, num, i + 1)\n', (3844, 3861), True, 'from matplotlib import pyplot as plt\n'), ((5387, 5397), 'IPython.core.display.HTML', 'HTML', (['root'], {}), '(root)\n', (5391, 5397), False, 'from IPython.core.display import display, HTML\n'), ((8834, 8868), 'numpy.array', 'np.array', (['data[i]'], {'dtype': 'np.object'}), '(data[i], dtype=np.object)\n', (8842, 8868), True, 'import numpy as np\n'), ((9244, 9323), 'numpy.hstack', 'np.hstack', (['(padding_new[:, :index], different_set_list, padding_new[:, index:])'], {}), '((padding_new[:, :index], different_set_list, padding_new[:, index:]))\n', (9253, 9323), True, 'import numpy as np\n'), ((9344, 9386), 'numpy.vstack', 'np.vstack', (['(result[i], different_set_list)'], {}), '((result[i], different_set_list))\n', (9353, 9386), True, 'import numpy as np\n'), ((9706, 9732), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (9725, 9732), True, 'import numpy as np\n'), ((9741, 9762), 'numpy.random.shuffle', 'np.random.shuffle', (['li'], {}), '(li)\n', (9758, 9762), True, 'import numpy as np\n'), ((11134, 11162), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (11148, 11162), True, 'import numpy as np\n'), ((11179, 11200), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (11198, 11200), True, 'import numpy as np\n'), ((11209, 11232), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (11226, 11232), True, 'import numpy as np\n'), ((11241, 11267), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (11260, 11267), True, 'import numpy as np\n'), ((11276, 11302), 'numpy.random.shuffle', 'np.random.shuffle', (['targets'], {}), '(targets)\n', (11293, 11302), True, 'import numpy as np\n'), ((13747, 13775), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (13761, 13775), True, 'import numpy as np\n'), ((13792, 13813), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (13811, 13813), True, 'import numpy as np\n'), ((13822, 13845), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (13839, 13845), True, 'import numpy as np\n'), ((13854, 13880), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (13873, 13880), True, 'import numpy as np\n'), ((13889, 13915), 'numpy.random.shuffle', 'np.random.shuffle', (['targets'], {}), '(targets)\n', (13906, 13915), True, 'import numpy as np\n'), ((13993, 14021), 'math.ceil', 'math.ceil', (['(num / choose_rate)'], {}), '(num / choose_rate)\n', (14002, 14021), False, 'import math\n'), ((18785, 18812), 'maysics.utils.e_distances', 'e_distances', (['data[:, index]'], {}), '(data[:, index])\n', (18796, 18812), False, 'from maysics.utils import e_distances\n'), ((18952, 18969), 'maysics.utils.e_distances', 'e_distances', (['data'], {}), '(data)\n', (18963, 18969), False, 'from maysics.utils import e_distances\n'), ((20088, 20102), 'numpy.cov', 'np.cov', (['data.T'], {}), '(data.T)\n', (20094, 20102), True, 'import numpy as np\n'), ((20135, 20157), 'numpy.linalg.eig', 'np.linalg.eig', (['cov_mat'], {}), '(cov_mat)\n', (20148, 20157), True, 'import numpy as np\n'), ((21426, 21452), 'numpy.array', 'np.array', (['arg'], {'dtype': 'float'}), '(arg, dtype=float)\n', (21434, 21452), True, 'import numpy as np\n'), ((21651, 21686), 'matplotlib.pyplot.matshow', 'plt.matshow', (['self.rc_mat'], {'cmap': 'cmap'}), '(self.rc_mat, cmap=cmap)\n', (21662, 21686), True, 'from matplotlib import pyplot as plt\n'), ((21695, 21709), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (21707, 21709), True, 'from matplotlib import pyplot as plt\n'), ((22294, 22304), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22302, 22304), True, 'from matplotlib import pyplot as plt\n'), ((22826, 22847), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (22837, 22847), True, 'from matplotlib import pyplot as plt\n'), ((883, 906), 'numpy.median', 'np.median', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (892, 906), True, 'import numpy as np\n'), ((1349, 1369), 'numpy.array', 'np.array', (['value_list'], {}), '(value_list)\n', (1357, 1369), True, 'import numpy as np\n'), ((15656, 15676), 'numpy.nonzero', 'np.nonzero', (['(var == 0)'], {}), '(var == 0)\n', (15666, 15676), True, 'import numpy as np\n'), ((16381, 16401), 'numpy.nonzero', 'np.nonzero', (['(var == 0)'], {}), '(var == 0)\n', (16391, 16401), True, 'import numpy as np\n'), ((18885, 18910), 'numpy.array', 'np.array', (['[distance_list]'], {}), '([distance_list])\n', (18893, 18910), True, 'import numpy as np\n'), ((19032, 19057), 'numpy.array', 'np.array', (['[distance_list]'], {}), '([distance_list])\n', (19040, 19057), True, 'import numpy as np\n'), ((20198, 20219), 'numpy.argsort', 'np.argsort', (['eig_value'], {}), '(eig_value)\n', (20208, 20219), True, 'import numpy as np\n'), ((21779, 21804), 'matplotlib.pyplot.xticks', 'plt.xticks', (['n_list', 'index'], {}), '(n_list, index)\n', (21789, 21804), True, 'from matplotlib import pyplot as plt\n'), ((21817, 21842), 'matplotlib.pyplot.yticks', 'plt.yticks', (['n_list', 'index'], {}), '(n_list, index)\n', (21827, 21842), True, 'from matplotlib import pyplot as plt\n'), ((9157, 9197), 'numpy.array', 'np.array', (['(padding * num)'], {'dtype': 'np.object'}), '(padding * num, dtype=np.object)\n', (9165, 9197), True, 'import numpy as np\n'), ((15611, 15624), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (15619, 15624), True, 'import numpy as np\n'), ((16078, 16101), 'numpy.zeros', 'np.zeros', (['data.shape[1]'], {}), '(data.shape[1])\n', (16086, 16101), True, 'import numpy as np\n'), ((16135, 16149), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (16143, 16149), True, 'import numpy as np\n'), ((16336, 16349), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (16344, 16349), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
setup.py file for SWIG Interface of Ext
"""
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from os import walk
import numpy
import wget
from setuptools import Extension
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext
try:
# Obtain the numpy include directory. This logic works across numpy versions.
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
def readme():
with open('./README.md') as f:
return f.read()
def download_numpy_interface(path):
"""
Downloads numpy.i
:return: None
"""
print("Download Numpy SWIG Interface")
np_version = re.compile(r'(?P<MAJOR>[0-9]+)\.'
'(?P<MINOR>[0-9]+)') \
.search(numpy.__version__)
np_version_string = np_version.group()
np_version_info = {key: int(value)
for key, value in np_version.groupdict().items()}
np_file_name = 'numpy.i'
np_file_url = 'https://raw.githubusercontent.com/numpy/numpy/maintenance/' + \
np_version_string + '.x/tools/swig/' + np_file_name
if np_version_info['MAJOR'] == 1 and np_version_info['MINOR'] < 9:
np_file_url = np_file_url.replace('tools', 'doc')
wget.download(np_file_url, path)
return
# Download numpy.i if needed
if not os.path.exists('./EggNetExtension/numpy.i'):
print('Downloading numpy.i')
project_dir = os.path.dirname(os.path.abspath(__file__))
download_numpy_interface(path='./EggNetExtension/')
source_files = ['./EggNetExtension/NNExtension.i', './EggNetExtension/cconv.c',
'./EggNetExtension/cpool.c', './EggNetExtension/crelu.c',
'./EggNetExtension/cmatmul.c', './EggNetExtension/chelper.c']
print("************************ SOURCE FILES *************************")
print(source_files)
print("************************ SOURCE FILES *************************")
include_dirs = ['./EggNetExtension/', numpy_include]
# Simple Platform Check (not entirely accurate because here should the compiler be checked)
# ToDo: Should be done better for example via CMake -> https://www.benjack.io/2017/06/12/python-cpp-tests.html
if platform.system() == 'Linux':
extra_args = ['-std=gnu99']
elif platform.system() == 'Darwin':
extra_args = ['--verbose', '-Rpass=loop-vectorize', '-Rpass-analysis=loop-vectorize', '-ffast-math']
elif platform.system() == 'Windows':
# extra_args = ['/Qrestrict', '/W3']
extra_args = []
else:
raise RuntimeError('Operating System not supported?')
extra_link_args = []
NN_ext_module = Extension('EggNetExtension._EggNetExtension',
sources=source_files,
include_dirs=include_dirs,
swig_opts=['-py3'],
extra_compile_args=extra_args,
extra_link_args=extra_link_args,
depends=['numpy'],
optional=False)
setup(name='EggNetExtension',
version='1.0',
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
description="""NN calculation library for python""",
url='https://github.com/marbleton/FPGA_MNIST',
packages=['EggNetExtension'],
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt', '*.rst', '*.i', '*.c', '*.h'],
},
ext_modules=[NN_ext_module],
install_requires=['numpy', 'wget', 'idx2numpy'],
)
|
[
"numpy.get_numpy_include",
"wget.download",
"os.path.exists",
"re.compile",
"setuptools.setup",
"setuptools.Extension",
"platform.system",
"numpy.get_include",
"os.path.abspath"
] |
[((2713, 2939), 'setuptools.Extension', 'Extension', (['"""EggNetExtension._EggNetExtension"""'], {'sources': 'source_files', 'include_dirs': 'include_dirs', 'swig_opts': "['-py3']", 'extra_compile_args': 'extra_args', 'extra_link_args': 'extra_link_args', 'depends': "['numpy']", 'optional': '(False)'}), "('EggNetExtension._EggNetExtension', sources=source_files,\n include_dirs=include_dirs, swig_opts=['-py3'], extra_compile_args=\n extra_args, extra_link_args=extra_link_args, depends=['numpy'],\n optional=False)\n", (2722, 2939), False, 'from setuptools import Extension\n'), ((3110, 3498), 'setuptools.setup', 'setup', ([], {'name': '"""EggNetExtension"""', 'version': '"""1.0"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'description': '"""NN calculation library for python"""', 'url': '"""https://github.com/marbleton/FPGA_MNIST"""', 'packages': "['EggNetExtension']", 'package_data': "{'': ['*.txt', '*.rst', '*.i', '*.c', '*.h']}", 'ext_modules': '[NN_ext_module]', 'install_requires': "['numpy', 'wget', 'idx2numpy']"}), "(name='EggNetExtension', version='1.0', author='<NAME>', author_email=\n '<EMAIL>', license='MIT', description=\n 'NN calculation library for python', url=\n 'https://github.com/marbleton/FPGA_MNIST', packages=['EggNetExtension'],\n package_data={'': ['*.txt', '*.rst', '*.i', '*.c', '*.h']}, ext_modules\n =[NN_ext_module], install_requires=['numpy', 'wget', 'idx2numpy'])\n", (3115, 3498), False, 'from setuptools import setup, find_packages\n'), ((462, 481), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (479, 481), False, 'import numpy\n'), ((1372, 1404), 'wget.download', 'wget.download', (['np_file_url', 'path'], {}), '(np_file_url, path)\n', (1385, 1404), False, 'import wget\n'), ((1455, 1498), 'os.path.exists', 'os.path.exists', (['"""./EggNetExtension/numpy.i"""'], {}), "('./EggNetExtension/numpy.i')\n", (1469, 1498), False, 'import os\n'), ((2310, 2327), 'platform.system', 'platform.system', ([], {}), '()\n', (2325, 2327), False, 'import platform\n'), ((525, 550), 'numpy.get_numpy_include', 'numpy.get_numpy_include', ([], {}), '()\n', (548, 550), False, 'import numpy\n'), ((1567, 1592), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1582, 1592), False, 'import os\n'), ((2377, 2394), 'platform.system', 'platform.system', ([], {}), '()\n', (2392, 2394), False, 'import platform\n'), ((780, 831), 're.compile', 're.compile', (['"""(?P<MAJOR>[0-9]+)\\\\.(?P<MINOR>[0-9]+)"""'], {}), "('(?P<MAJOR>[0-9]+)\\\\.(?P<MINOR>[0-9]+)')\n", (790, 831), False, 'import re\n'), ((2518, 2535), 'platform.system', 'platform.system', ([], {}), '()\n', (2533, 2535), False, 'import platform\n')]
|
##
## Software PI-Net: Pose Interacting Network for Multi-Person Monocular 3D Pose Estimation
## Copyright Inria and UPC
## Year 2021
## Contact : <EMAIL>
##
## The software PI-Net is provided under MIT License.
##
#used in train for skeleton input
import os
import os.path as osp
import numpy as np
import math
from utils.pose_utils import get_bbox
from pycocotools.coco import COCO
from config import cfg
import json
from utils.pose_utils import pixel2cam, get_bbox, warp_coord_to_original, rigid_align, cam2pixel
from utils.vis import vis_keypoints, vis_3d_skeleton
import cv2 as cv
def larger_bbox(bbox):
w = bbox[2]
h = bbox[3]
c_x = bbox[0] + w/2.
c_y = bbox[1] + h/2.
aspect_ratio = cfg.input_shape[1]/cfg.input_shape[0]
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
bbox[2] = w*1.25
bbox[3] = h*1.25
bbox[0] = c_x - bbox[2]/2.
bbox[1] = c_y - bbox[3]/2.
return bbox
class MuCo:
def __init__(self, data_split, is_val):
self.data_split = data_split
self.img_dir = osp.join(cfg.data_dir, 'MuCo', 'data')
self.train_annot_path = cfg.train_annot_path
self.val_annot_path = cfg.val_annot_path
self.joint_num = 21
self.joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe')
self.flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18), (19, 20) )
self.skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3), (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18) )
self.joints_have_depth = True
self.root_idx = self.joints_name.index('Pelvis')
self.is_val = is_val
self.pair_index_path = cfg.pair_index_path_muco
self.data = self.load_data()
def load_data(self):
if self.data_split == 'train':
db = COCO(self.train_annot_path)
data = []
id2pairId = json.load(open(self.pair_index_path,'r'))
n = 0
for aid in db.anns.keys():
ann = db.anns[aid]
image_id = ann['image_id']
img = db.loadImgs(image_id)[0]
img_path = osp.join(self.img_dir, img['file_name'])
fx, fy = img['f']
cx, cy = img['c']
f = np.array([fx, fy]); c = np.array([cx, cy]);
joint_cam = np.array(ann['keypoints_cam'])
joint_cam_posenet = np.array(ann['keypoints_cam_posenet'])
root_cam = joint_cam[self.root_idx]
joint_img = np.array(ann['keypoints_img'])
joint_img = np.concatenate([joint_img, joint_cam[:,2:]],1)
joint_img[:,2] = joint_img[:,2] - root_cam[2]
joint_vis = np.ones((self.joint_num,1))
bbox_id = ann['id']
orig_bbox = ann['bbox']
bbox = np.array(ann['bbox'])
img_width, img_height = img['width'], img['height']
x, y, w, h = bbox
center = [x+w/2, y+h/2]
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((img_width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((img_height - 1, y1 + np.max((0, h - 1))))
if w*h > 0 and x2 >= x1 and y2 >= y1:
bbox = np.array([x1, y1, x2-x1, y2-y1])
else:
print("sanitize bboxes:",image_id)
continue
bbox = larger_bbox(bbox)
n_copain = id2pairId[str(bbox_id)] - bbox_id + n # n_copain - n = id_copain - id
id_list = db.getAnnIds(image_id) # ids of instances in same img
dis2id = {}
n_list = []
for cand_id in id_list:
bbox_cand = db.loadAnns(cand_id)[0]['bbox']
center_cand = [bbox_cand[0] + bbox_cand[2]/2, bbox_cand[1] + bbox_cand[3]/2]
dis = math.sqrt((center[0] - center_cand[0])**2 + (center[1] - center_cand[1])**2)
dis2id[dis] = cand_id
id_list_sorted = [dis2id[k] for k in sorted(dis2id.keys())]
for cand_id in id_list_sorted:
n_list.append(cand_id - bbox_id + n)
data.append({
'img_id': image_id,
'img_path': img_path,
'id': bbox_id,
'n_copain': n_copain,
'n_list': n_list,
'orig_bbox': orig_bbox,
'bbox': bbox,
'joint_img': joint_img, # [org_img_x, org_img_y, depth - root_depth]
'joint_cam': joint_cam, # [X, Y, Z] in camera coordinate
'joint_vis': joint_vis,
'root_cam': root_cam, # [X, Y, Z] in camera coordinate
'f': f,
'c': c,
'joint_cam_posenet': joint_cam_posenet, # result from posenet_nonefine
#'noise': noise,
})
n = n + 1
return data
def evaluate(self, preds, result_dir):
# test for img output, use in test.py
# add posenet 3d cam result to gt file as 'MuPoTS-3D_with_posenet_result.json', add key 'keypoints_cam_posenet'
gts = self.load_data()#self.data
sample_num = len(preds)
joint_num = self.joint_num
pred_2d_per_bbox = {}
pred_2d_save = {}
pred_3d_save = {}
gt_dict_orig = json.load(open('data/MuCo/data/annotations/MuCo-3DHP.json','r'))
gt_dict = gt_dict_orig
for n in range(sample_num):
gt = gts[n]
bbox = gt['bbox']
gt_3d_root = gt['root_cam']
bbox_id = gt['id']
f = gt['f']
c = gt['c']
pred_2d_kpt = preds[n].copy()
pred_2d_kpt = warp_coord_to_original(pred_2d_kpt, bbox, gt_3d_root)
if str(n) in pred_2d_per_bbox:
pred_2d_per_bbox[str(n)].append(pred_2d_kpt)
else:
pred_2d_per_bbox[str(n)] = [pred_2d_kpt]
pred_2d_kpt = pred_2d_per_bbox[str(n)].copy()
pred_2d_kpt = np.mean(np.array(pred_2d_kpt), axis=0)
pred_3d_kpt = pixel2cam(pred_2d_kpt, f, c)
### add posenet 3d cam result to gt file as 'MuCo_with_posenet_result.json', add key 'keypoints_cam_posenet'
gt_dict['annotations'][int(bbox_id)]['keypoints_cam_posenet'] = pred_3d_kpt.tolist()
with open('data/MuCo/MuCo_with_posenet_result.json','w') as w:
json.dump(gt_dict, w)
|
[
"numpy.ones",
"utils.pose_utils.warp_coord_to_original",
"json.dump",
"os.path.join",
"pycocotools.coco.COCO",
"math.sqrt",
"numpy.max",
"numpy.array",
"numpy.concatenate",
"utils.pose_utils.pixel2cam"
] |
[((1105, 1143), 'os.path.join', 'osp.join', (['cfg.data_dir', '"""MuCo"""', '"""data"""'], {}), "(cfg.data_dir, 'MuCo', 'data')\n", (1113, 1143), True, 'import os.path as osp\n'), ((2129, 2156), 'pycocotools.coco.COCO', 'COCO', (['self.train_annot_path'], {}), '(self.train_annot_path)\n', (2133, 2156), False, 'from pycocotools.coco import COCO\n'), ((2425, 2465), 'os.path.join', 'osp.join', (['self.img_dir', "img['file_name']"], {}), "(self.img_dir, img['file_name'])\n", (2433, 2465), True, 'import os.path as osp\n'), ((2542, 2560), 'numpy.array', 'np.array', (['[fx, fy]'], {}), '([fx, fy])\n', (2550, 2560), True, 'import numpy as np\n'), ((2566, 2584), 'numpy.array', 'np.array', (['[cx, cy]'], {}), '([cx, cy])\n', (2574, 2584), True, 'import numpy as np\n'), ((2611, 2641), 'numpy.array', 'np.array', (["ann['keypoints_cam']"], {}), "(ann['keypoints_cam'])\n", (2619, 2641), True, 'import numpy as np\n'), ((2674, 2712), 'numpy.array', 'np.array', (["ann['keypoints_cam_posenet']"], {}), "(ann['keypoints_cam_posenet'])\n", (2682, 2712), True, 'import numpy as np\n'), ((2786, 2816), 'numpy.array', 'np.array', (["ann['keypoints_img']"], {}), "(ann['keypoints_img'])\n", (2794, 2816), True, 'import numpy as np\n'), ((2841, 2889), 'numpy.concatenate', 'np.concatenate', (['[joint_img, joint_cam[:, 2:]]', '(1)'], {}), '([joint_img, joint_cam[:, 2:]], 1)\n', (2855, 2889), True, 'import numpy as np\n'), ((2970, 2998), 'numpy.ones', 'np.ones', (['(self.joint_num, 1)'], {}), '((self.joint_num, 1))\n', (2977, 2998), True, 'import numpy as np\n'), ((3086, 3107), 'numpy.array', 'np.array', (["ann['bbox']"], {}), "(ann['bbox'])\n", (3094, 3107), True, 'import numpy as np\n'), ((3256, 3270), 'numpy.max', 'np.max', (['(0, x)'], {}), '((0, x))\n', (3262, 3270), True, 'import numpy as np\n'), ((3288, 3302), 'numpy.max', 'np.max', (['(0, y)'], {}), '((0, y))\n', (3294, 3302), True, 'import numpy as np\n'), ((5945, 5998), 'utils.pose_utils.warp_coord_to_original', 'warp_coord_to_original', (['pred_2d_kpt', 'bbox', 'gt_3d_root'], {}), '(pred_2d_kpt, bbox, gt_3d_root)\n', (5967, 5998), False, 'from utils.pose_utils import pixel2cam, get_bbox, warp_coord_to_original, rigid_align, cam2pixel\n'), ((6329, 6357), 'utils.pose_utils.pixel2cam', 'pixel2cam', (['pred_2d_kpt', 'f', 'c'], {}), '(pred_2d_kpt, f, c)\n', (6338, 6357), False, 'from utils.pose_utils import pixel2cam, get_bbox, warp_coord_to_original, rigid_align, cam2pixel\n'), ((6653, 6674), 'json.dump', 'json.dump', (['gt_dict', 'w'], {}), '(gt_dict, w)\n', (6662, 6674), False, 'import json\n'), ((3509, 3545), 'numpy.array', 'np.array', (['[x1, y1, x2 - x1, y2 - y1]'], {}), '([x1, y1, x2 - x1, y2 - y1])\n', (3517, 3545), True, 'import numpy as np\n'), ((4104, 4189), 'math.sqrt', 'math.sqrt', (['((center[0] - center_cand[0]) ** 2 + (center[1] - center_cand[1]) ** 2)'], {}), '((center[0] - center_cand[0]) ** 2 + (center[1] - center_cand[1]) ** 2\n )\n', (4113, 4189), False, 'import math\n'), ((6272, 6293), 'numpy.array', 'np.array', (['pred_2d_kpt'], {}), '(pred_2d_kpt)\n', (6280, 6293), True, 'import numpy as np\n'), ((3348, 3366), 'numpy.max', 'np.max', (['(0, w - 1)'], {}), '((0, w - 1))\n', (3354, 3366), True, 'import numpy as np\n'), ((3415, 3433), 'numpy.max', 'np.max', (['(0, h - 1)'], {}), '((0, h - 1))\n', (3421, 3433), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from sklearn.exceptions import NotFittedError
from sklearn.neighbors import KernelDensity
from sklearn.linear_model import LinearRegression, LogisticRegression
import pickle
import os
import matplotlib.pylab as plt
from sklearn.externals import joblib
import numpy as np
from sklearn.model_selection import GridSearchCV
import seaborn as sbn
import logging
from .absmodel import Module
logger_format = "%(levelname)s [%(asctime)s]: %(message)s"
logging.basicConfig(filename="logfile.log",
level=logging.DEBUG, format=logger_format,
filemode='w') # use filemode='a' for APPEND
logger = logging.getLogger(__name__)
def grid_fit_kde(residual):
"""
Grid search for best bandwidth of KDE
Args:
residual: residual value.
Returns:
"""
grid = GridSearchCV(KernelDensity(), {'bandwidth':np.linspace(0.1,1.0,20)}, cv=20)
grid.fit(residual)
return grid.best_params_
class MixLinearModel(Module):
"""
Mixture of linear src.
Train logistic regression for 0/1 prediction. And fit weighted linear regression,
with weight from output of the logistic regression.
Fit mixture of linear-src for rainy and non-rainy events.
"""
def __init__(self, linear_reg=LinearRegression(), log_reg=LogisticRegression(),
kde=KernelDensity(kernel="gaussian"), eps=0.0001, offset = -.05):
super(MixLinearModel, self).__init__()
self.linear_reg = linear_reg
self.eps = eps
self.log_reg = log_reg
self.kde = kde
self.fitted = False
self.residual = False
self.offset= offset
@staticmethod
def residual_plot(observed, true_value, fitted):
plt.scatter(true_value, np.log(observed))
plt.plot(true_value, fitted, '-r')
plt.xlabel('Log (predictor + eps)')
plt.ylabel('Log (response + eps)')
plt.show()
@staticmethod
def residual_density_plot(residual):
plt.subplot(211)
sbn.distplot(residual,hist=True )
plt.subplot(212)
sbn.kdeplot(residual)
@staticmethod
def grid_fit_kde(residual):
from sklearn.model_selection import GridSearchCV
grid = GridSearchCV(KernelDensity(), {'bandwidth':np.linspace(0.1,1.0,20)}, cv=20)
grid.fit(residual)
return grid.best_params_
def _fit(self, x, y, verbose=False, load=False):
"""
Args:
y: Nx1 ndarray observed value.
x: NxD ndarry features.
Returns:
"""
x, y = Module.validate(x, y)
l_x, l_y = np.log(x + self.eps), np.log(y + self.eps)
y_zero_one = (y > 0.0).astype(int)
if y_zero_one.max() == y_zero_one.min():
raise NotFittedError("Logistic model couldn't fit, because the number of classes is <2")
self.log_reg.fit(x, y_zero_one)
sample_weight = self.log_reg.predict_proba(x)[:, 1]
# Linear regression under log mode.
self.linear_reg.fit(X=l_x, y=l_y, sample_weight=sample_weight)
self.fitted = self.linear_reg.predict(l_x)
self.residual = (self.fitted - l_y)
# Grid fit for bandwidth.
if load is False:
param = grid_fit_kde(self.residual)
self.kde = KernelDensity(bandwidth=param["bandwidth"])
self.kde.fit(self.residual)
else:
self.kde = pickle.load(open("all_kde.kd","rb"))
self.fitted = True
#logger.debug("KDE bandwidth %s"%self.kde.bandwidth)
return self
def predict(self, x, y, label=None):
"""
Predict log-likelihood of given observation under the trained src.
Args:
y: ndarray Ground truth observation.
x: ndarray matrix Features.
label: None,
Returns:
"""
x , y = Module.validate(x, y)
if self.fitted is False:
raise NotFittedError("Call fit before prediction")
log_pred = self.log_reg.predict_proba(x)[:, 1]
linear_pred = self.linear_reg.predict(np.log(x + self.eps))
return self.mixl(y, log_pred, linear_pred)
def decision_function(self, score):
"""
Return decision based on the anomaly score.
Args:
x:
y:
label:
Returns:
"""
return score - self.offset
def mixl(self, y, logreg_prediction, linear_predictions):
"""
- if RAIN = 0, $ -log (1-p_1)$
- if RAIN > 0, $ -log [p_1 \frac{P(log(RAIN + \epsilon)}{(RAIN + \epsilon)}]$
Args:
y: (np.array) observations.
logreg_prediction:(np.array) fitted values from logistic regression (0/1 src).
linear_predictions:(np.array) fitted values from linear regression on log scale.
"""
# Reshape the data
p = logreg_prediction.reshape([-1, 1])
observations = y.reshape([-1, 1])
predictions = linear_predictions.reshape([-1, 1])
zero_rain = np.multiply((1 - p), (observations == 0))
# density of residual and convert to non-log value.
residual = predictions - np.log(observations + self.eps)
residual_density = np.exp(self.kde.score_samples(residual)).reshape(-1,1)
non_zero_rain = np.divide(np.multiply(p, residual_density),
(observations + self.eps))
result = zero_rain + non_zero_rain
return -np.log(result + np.max(result))
def to_json(self):
if not self.fitted:
raise NotFittedError("Fit method should be called before save operation.")
model_config = {
"kde_model": self.kde,
"logistic_model": self.log_reg,
"linear_model": self.linear_reg
}
return model_config
@classmethod
def from_json(cls, model_config):
mlm = MixLinearModel(linear_reg=model_config['linear_model'], log_reg=model_config['logistic_model'],
kde=model_config['kde_model'])
mlm.fitted = True
return mlm
def save(self, model_id="001", model_path="rainqc_model"):
"""
save the reg src.
Returns:
"""
# model_config = {"model_id":model_id,
# "kde":self.kde,
# "logistic_reg":self.log_reg,
# "linear_regression":self.linear_reg}
# localdatasource.dump(model_config,open(model_id+".localdatasource","wb"))
current_model = os.path.join(model_path, model_id)
if not os.path.exists(current_model):
os.makedirs(current_model)
joblib.dump(self.kde, os.path.join(current_model, "kde_model.pk"))
joblib.dump(self.linear_reg, os.path.join(current_model, "linear_model.pk"))
joblib.dump(self.log_reg, os.path.join(current_model, "logistic_model.pk"))
@classmethod
def load(cls, model_id="001", model_path="rainqc_model"):
loaded_model = os.path.join(model_path, model_id)
# model_config = localdatasource.load(open(model_id+".localdatasource","rb"))
if not os.path.exists(loaded_model):
return ValueError("Directory for saved models don't exist")
reg_model = joblib.load(os.path.join(loaded_model, "linear_model.pk"))
kde = joblib.load(os.path.join(loaded_model, "kde_model.pk"))
log_reg = joblib.load(os.path.join(loaded_model, "logistic_model.pk")) # pickle.load(model_config['zerone'])
mxll = MixLinearModel(linear_reg=reg_model, log_reg=log_reg, kde=kde)
mxll.fitted = True
return mxll
|
[
"logging.getLogger",
"sklearn.exceptions.NotFittedError",
"numpy.log",
"matplotlib.pylab.show",
"os.path.exists",
"numpy.multiply",
"seaborn.distplot",
"sklearn.neighbors.KernelDensity",
"numpy.max",
"numpy.linspace",
"matplotlib.pylab.plot",
"matplotlib.pylab.xlabel",
"sklearn.linear_model.LinearRegression",
"logging.basicConfig",
"os.makedirs",
"os.path.join",
"sklearn.linear_model.LogisticRegression",
"seaborn.kdeplot",
"matplotlib.pylab.subplot",
"matplotlib.pylab.ylabel"
] |
[((486, 591), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""logfile.log"""', 'level': 'logging.DEBUG', 'format': 'logger_format', 'filemode': '"""w"""'}), "(filename='logfile.log', level=logging.DEBUG, format=\n logger_format, filemode='w')\n", (505, 591), False, 'import logging\n'), ((667, 694), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (684, 694), False, 'import logging\n'), ((866, 881), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '()\n', (879, 881), False, 'from sklearn.neighbors import KernelDensity\n'), ((1316, 1334), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1332, 1334), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1344, 1364), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1362, 1364), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1387, 1419), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""'}), "(kernel='gaussian')\n", (1400, 1419), False, 'from sklearn.neighbors import KernelDensity\n'), ((1827, 1861), 'matplotlib.pylab.plot', 'plt.plot', (['true_value', 'fitted', '"""-r"""'], {}), "(true_value, fitted, '-r')\n", (1835, 1861), True, 'import matplotlib.pylab as plt\n'), ((1870, 1905), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Log (predictor + eps)"""'], {}), "('Log (predictor + eps)')\n", (1880, 1905), True, 'import matplotlib.pylab as plt\n'), ((1914, 1948), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Log (response + eps)"""'], {}), "('Log (response + eps)')\n", (1924, 1948), True, 'import matplotlib.pylab as plt\n'), ((1957, 1967), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1965, 1967), True, 'import matplotlib.pylab as plt\n'), ((2036, 2052), 'matplotlib.pylab.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2047, 2052), True, 'import matplotlib.pylab as plt\n'), ((2061, 2094), 'seaborn.distplot', 'sbn.distplot', (['residual'], {'hist': '(True)'}), '(residual, hist=True)\n', (2073, 2094), True, 'import seaborn as sbn\n'), ((2103, 2119), 'matplotlib.pylab.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2114, 2119), True, 'import matplotlib.pylab as plt\n'), ((2128, 2149), 'seaborn.kdeplot', 'sbn.kdeplot', (['residual'], {}), '(residual)\n', (2139, 2149), True, 'import seaborn as sbn\n'), ((5084, 5121), 'numpy.multiply', 'np.multiply', (['(1 - p)', '(observations == 0)'], {}), '(1 - p, observations == 0)\n', (5095, 5121), True, 'import numpy as np\n'), ((6606, 6640), 'os.path.join', 'os.path.join', (['model_path', 'model_id'], {}), '(model_path, model_id)\n', (6618, 6640), False, 'import os\n'), ((7073, 7107), 'os.path.join', 'os.path.join', (['model_path', 'model_id'], {}), '(model_path, model_id)\n', (7085, 7107), False, 'import os\n'), ((896, 921), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(20)'], {}), '(0.1, 1.0, 20)\n', (907, 921), True, 'import numpy as np\n'), ((1801, 1817), 'numpy.log', 'np.log', (['observed'], {}), '(observed)\n', (1807, 1817), True, 'import numpy as np\n'), ((2286, 2301), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {}), '()\n', (2299, 2301), False, 'from sklearn.neighbors import KernelDensity\n'), ((2656, 2676), 'numpy.log', 'np.log', (['(x + self.eps)'], {}), '(x + self.eps)\n', (2662, 2676), True, 'import numpy as np\n'), ((2678, 2698), 'numpy.log', 'np.log', (['(y + self.eps)'], {}), '(y + self.eps)\n', (2684, 2698), True, 'import numpy as np\n'), ((2810, 2897), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""Logistic model couldn\'t fit, because the number of classes is <2"""'], {}), '(\n "Logistic model couldn\'t fit, because the number of classes is <2")\n', (2824, 2897), False, 'from sklearn.exceptions import NotFittedError\n'), ((3338, 3381), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': "param['bandwidth']"}), "(bandwidth=param['bandwidth'])\n", (3351, 3381), False, 'from sklearn.neighbors import KernelDensity\n'), ((3981, 4025), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""Call fit before prediction"""'], {}), "('Call fit before prediction')\n", (3995, 4025), False, 'from sklearn.exceptions import NotFittedError\n'), ((4136, 4156), 'numpy.log', 'np.log', (['(x + self.eps)'], {}), '(x + self.eps)\n', (4142, 4156), True, 'import numpy as np\n'), ((5219, 5250), 'numpy.log', 'np.log', (['(observations + self.eps)'], {}), '(observations + self.eps)\n', (5225, 5250), True, 'import numpy as np\n'), ((5368, 5400), 'numpy.multiply', 'np.multiply', (['p', 'residual_density'], {}), '(p, residual_density)\n', (5379, 5400), True, 'import numpy as np\n'), ((5631, 5699), 'sklearn.exceptions.NotFittedError', 'NotFittedError', (['"""Fit method should be called before save operation."""'], {}), "('Fit method should be called before save operation.')\n", (5645, 5699), False, 'from sklearn.exceptions import NotFittedError\n'), ((6656, 6685), 'os.path.exists', 'os.path.exists', (['current_model'], {}), '(current_model)\n', (6670, 6685), False, 'import os\n'), ((6699, 6725), 'os.makedirs', 'os.makedirs', (['current_model'], {}), '(current_model)\n', (6710, 6725), False, 'import os\n'), ((6756, 6799), 'os.path.join', 'os.path.join', (['current_model', '"""kde_model.pk"""'], {}), "(current_model, 'kde_model.pk')\n", (6768, 6799), False, 'import os\n'), ((6838, 6884), 'os.path.join', 'os.path.join', (['current_model', '"""linear_model.pk"""'], {}), "(current_model, 'linear_model.pk')\n", (6850, 6884), False, 'import os\n'), ((6920, 6968), 'os.path.join', 'os.path.join', (['current_model', '"""logistic_model.pk"""'], {}), "(current_model, 'logistic_model.pk')\n", (6932, 6968), False, 'import os\n'), ((7209, 7237), 'os.path.exists', 'os.path.exists', (['loaded_model'], {}), '(loaded_model)\n', (7223, 7237), False, 'import os\n'), ((7344, 7389), 'os.path.join', 'os.path.join', (['loaded_model', '"""linear_model.pk"""'], {}), "(loaded_model, 'linear_model.pk')\n", (7356, 7389), False, 'import os\n'), ((7417, 7459), 'os.path.join', 'os.path.join', (['loaded_model', '"""kde_model.pk"""'], {}), "(loaded_model, 'kde_model.pk')\n", (7429, 7459), False, 'import os\n'), ((7491, 7538), 'os.path.join', 'os.path.join', (['loaded_model', '"""logistic_model.pk"""'], {}), "(loaded_model, 'logistic_model.pk')\n", (7503, 7538), False, 'import os\n'), ((2316, 2341), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(20)'], {}), '(0.1, 1.0, 20)\n', (2327, 2341), True, 'import numpy as np\n'), ((5545, 5559), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (5551, 5559), True, 'import numpy as np\n')]
|
"""
=========================================
Robust line model estimation using RANSAC
=========================================
In this example we see how to robustly fit a line model to faulty data using
the RANSAC (random sample consensus) algorithm.
Firstly the data are generated by adding a gaussian noise to a linear function.
Then, the outlier points are added to the data set.
RANSAC iteratively estimates the parameters from the data set.
At each iteration the following steps are performed:
1. Select ``min_samples`` random samples from the original data and check
whether the set of data is valid (see ``is_data_valid`` option).
2. Estimate a model on the random subset
(``model_cls.estimate(*data[random_subset]``) and check whether the
estimated model is valid (see ``is_model_valid`` option).
3. Classify all the data points as either inliers or outliers by calculating
the residuals using the estimated model (``model_cls.residuals(*data)``) -
all data samples with residuals smaller than the ``residual_threshold``
are considered as inliers.
4. If the number of the inlier samples is greater than ever before,
save the estimated model as the best model. In case the current estimated
model has the same number of inliers, it is considered as the best model
only if the sum of residuals is lower.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all the
inlier samples of the previously determined best model.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage.measure import LineModelND, ransac
np.random.seed(seed=1)
# generate coordinates of line
x = np.arange(-200, 200)
y = 0.2 * x + 20
data = np.column_stack([x, y])
# add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
# add faulty data
faulty = np.array(30 * [(180., -100)])
faulty += 10 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# fit line using all data
model = LineModelND()
model.estimate(data)
# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(data, LineModelND, min_samples=2,
residual_threshold=1, max_trials=1000)
outliers = inliers == False
# generate coordinates of estimated models
line_x = np.arange(-250, 250)
line_y = model.predict_y(line_x)
line_y_robust = model_robust.predict_y(line_x)
fig, ax = plt.subplots()
ax.plot(data[inliers, 0], data[inliers, 1], '.b', alpha=0.6,
label='Inlier data')
ax.plot(data[outliers, 0], data[outliers, 1], '.r', alpha=0.6,
label='Outlier data')
ax.plot(line_x, line_y, '-k', label='Line model from all data')
ax.plot(line_x, line_y_robust, '-b', label='Robust line model')
ax.legend(loc='lower left')
plt.show()
######################################################################
# Now, we generalize this example to 3D points.
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from skimage.measure import LineModelND, ransac
np.random.seed(seed=1)
# generate coordinates of line
point = np.array([0, 0, 0], dtype='float')
direction = np.array([1, 1, 1], dtype='float') / np.sqrt(3)
xyz = point + 10 * np.arange(-100, 100)[..., np.newaxis] * direction
# add gaussian noise to coordinates
noise = np.random.normal(size=xyz.shape)
xyz += 0.5 * noise
xyz[::2] += 20 * noise[::2]
xyz[::4] += 100 * noise[::4]
# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(xyz, LineModelND, min_samples=2,
residual_threshold=1, max_trials=1000)
outliers = inliers == False
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xyz[inliers][:, 0], xyz[inliers][:, 1], xyz[inliers][:, 2], c='b',
marker='o', label='Inlier data')
ax.scatter(xyz[outliers][:, 0], xyz[outliers][:, 1], xyz[outliers][:, 2], c='r',
marker='o', label='Outlier data')
ax.legend(loc='lower left')
plt.show()
|
[
"numpy.random.normal",
"numpy.sqrt",
"numpy.column_stack",
"skimage.measure.LineModelND",
"numpy.array",
"skimage.measure.ransac",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((1667, 1689), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (1681, 1689), True, 'import numpy as np\n'), ((1726, 1746), 'numpy.arange', 'np.arange', (['(-200)', '(200)'], {}), '(-200, 200)\n', (1735, 1746), True, 'import numpy as np\n'), ((1771, 1794), 'numpy.column_stack', 'np.column_stack', (['[x, y]'], {}), '([x, y])\n', (1786, 1794), True, 'import numpy as np\n'), ((1840, 1873), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'data.shape'}), '(size=data.shape)\n', (1856, 1873), True, 'import numpy as np\n'), ((1979, 2009), 'numpy.array', 'np.array', (['(30 * [(180.0, -100)])'], {}), '(30 * [(180.0, -100)])\n', (1987, 2009), True, 'import numpy as np\n'), ((2127, 2140), 'skimage.measure.LineModelND', 'LineModelND', ([], {}), '()\n', (2138, 2140), False, 'from skimage.measure import LineModelND, ransac\n'), ((2252, 2331), 'skimage.measure.ransac', 'ransac', (['data', 'LineModelND'], {'min_samples': '(2)', 'residual_threshold': '(1)', 'max_trials': '(1000)'}), '(data, LineModelND, min_samples=2, residual_threshold=1, max_trials=1000)\n', (2258, 2331), False, 'from skimage.measure import LineModelND, ransac\n'), ((2444, 2464), 'numpy.arange', 'np.arange', (['(-250)', '(250)'], {}), '(-250, 250)\n', (2453, 2464), True, 'import numpy as np\n'), ((2556, 2570), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2568, 2570), True, 'from matplotlib import pyplot as plt\n'), ((2910, 2920), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2918, 2920), True, 'from matplotlib import pyplot as plt\n'), ((3188, 3210), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(1)'}), '(seed=1)\n', (3202, 3210), True, 'import numpy as np\n'), ((3251, 3285), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': '"""float"""'}), "([0, 0, 0], dtype='float')\n", (3259, 3285), True, 'import numpy as np\n'), ((3460, 3492), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'xyz.shape'}), '(size=xyz.shape)\n', (3476, 3492), True, 'import numpy as np\n'), ((3659, 3737), 'skimage.measure.ransac', 'ransac', (['xyz', 'LineModelND'], {'min_samples': '(2)', 'residual_threshold': '(1)', 'max_trials': '(1000)'}), '(xyz, LineModelND, min_samples=2, residual_threshold=1, max_trials=1000)\n', (3665, 3737), False, 'from skimage.measure import LineModelND, ransac\n'), ((3804, 3816), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3814, 3816), True, 'from matplotlib import pyplot as plt\n'), ((4136, 4146), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4144, 4146), True, 'from matplotlib import pyplot as plt\n'), ((2024, 2059), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'faulty.shape'}), '(size=faulty.shape)\n', (2040, 2059), True, 'import numpy as np\n'), ((3298, 3332), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': '"""float"""'}), "([1, 1, 1], dtype='float')\n", (3306, 3332), True, 'import numpy as np\n'), ((3335, 3345), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3342, 3345), True, 'import numpy as np\n'), ((3365, 3385), 'numpy.arange', 'np.arange', (['(-100)', '(100)'], {}), '(-100, 100)\n', (3374, 3385), True, 'import numpy as np\n')]
|
import os, pickle
import os.path as osp
import numpy as np
import cv2
import scipy.ndimage as nd
import init_path
from lib.dataset.get_dataset import get_dataset
from lib.network.sgan import SGAN
import torch
from torch.utils.data import DataLoader
import argparse
from ipdb import set_trace
import matplotlib.pyplot as plt
from lib.utils import pyutils
classes=['background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--cfg_file", default=None, type=str)
args = parser.parse_args()
args = pyutils.read_yaml2cls(args.cfg_file)
return args
# mean pixel : in B-G-R channel order
mean_pixel = np.array([104.008, 116.669, 122.675])
def preprocess(image, size):
""" pre-process images with Opencv format"""
image = np.array(image)
H, W, _ = image.shape
image = nd.zoom(image.astype('float32'), (size / H, size / W, 1.0), order=1)
image = image - mean_pixel
image = image.transpose([2, 0, 1])
image = np.expand_dims(image, axis=0)
return torch.from_numpy(image)
def generate_seed_with_ignore(localization):
"""
This function generate seed ignoring all the conflicts
:param localization: (41, 41, 21) binary value
:return:
"""
h, w, c = localization.shape
assert (h == 41) & (w == 41) & (c == 21)
# set_trace()
# find conflict index
sum_loc = np.sum(localization, axis=2)
conflict_ind = np.where(sum_loc > 1)
# set conflict position to 0
localization[conflict_ind[0], conflict_ind[1], :] = 0
# generate seed
ind = np.where(localization)
mask = np.ones(shape=(h, w), dtype=np.int) * 21
mask[ind[0], ind[1]] = ind[2]
return mask
def generate_seed_wo_ignore(localization, train_boat=False):
"""
This function generate seed with priority strategy
:param localization:
:return:
"""
h, w, c = localization.shape
assert (h == 41) & (w == 41) & (c == 21)
# generate background seed
mask = np.ones((h, w), dtype=np.int) * 21
bg_ind = np.where(localization[:, :, 0])
mask[bg_ind[0], bg_ind[1]] = 0
# generate foreground seed in the order of their area
area = np.sum(localization, axis=(0, 1))
cls_order = np.argsort(area)[::-1] # area in descending order
for cls in cls_order:
if area[cls] == 0:
break
ind = np.where(localization[:, :, cls])
mask[ind[0], ind[1]] = cls
if train_boat:
train_boat_ind = np.where(((mask == 4) | (mask == 19)) & (localization[:, :, 0] == 1))
mask[train_boat_ind] = 0
return mask
def get_localization_cues_sec(att_maps, saliency, im_label, cam_thresh):
"""get localization cues with method in SEC paper
perform hard thresholding for each foreground class
Parameters
----------
att_maps: [41, 41, 20]
saliency: [H, W]
im_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
Return
------
seg_mask: [41, 41]
"""
h, w = att_maps.shape[:2]
im_h, im_w = saliency.shape[:2]
localization1 = np.zeros(shape=(h, w, 21))
for idx in im_label: # idx: aero=1
heat_map = att_maps[:, :, idx - 1]
localization1[:, :, idx] = heat_map > cam_thresh * np.max(heat_map)
# bg_cue = saliency.astype(np.float32)
# bg_cue = bg_cue / 255
bg_cue = nd.zoom(saliency, (h / im_h, h / im_w), order=1)
localization1[:, :, 0] = bg_cue < 0.06
# handle conflict seed
if args.ignore_conflict:
seg_mask = generate_seed_with_ignore(localization1)
else:
seg_mask = generate_seed_wo_ignore(localization1, train_boat=True)
return seg_mask
def get_localization_cues_dcsp(att_maps, saliency, im_label, bg_thresh):
"""get localization cues with method in DCSP paper
compute harmonic mean for each foreground class
Parameters
----------
att_maps: [41, 41, 20]
saliency: [H, W]
im_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
Return
------
seg_mask: [41, 41]
"""
h, w = att_maps.shape[:2]
im_h, im_w = saliency.shape[:2]
re_sal = nd.zoom(saliency, (h / im_h, w / im_w), order=1)
localization1 = np.zeros(shape=(h, w, 20))
for idx in im_label: # idx: aero=1
localization1[:, :, idx - 1] = 2 / ((1 / (att_maps[:, :, idx - 1] + 1e-7)) + (1 / (re_sal + 1e-7)))
hm_max = np.max(localization1, axis=2)
seg_mask = np.argmax(localization1, axis=2) + 1
seg_mask[hm_max < bg_thresh] = 0
return seg_mask
def filter_weight_dict(weight_dict, model_dict):
# filter the parameters that exist in the pretrained model
pretrained_dict = dict()
for k, v in weight_dict.items():
# keep compatable with the previous version of network definition
if "conv" in k and "backbone" not in k:
k = "backbone." + k
if k in model_dict:
pretrained_dict[k] = v
model_dict.update(pretrained_dict)
return model_dict
if __name__ == '__main__':
args = parse_args()
device = torch.device("cuda:0")
# input and output
im_tags = pickle.load(open(args.cue_file, "rb"))
if not osp.exists(args.res_path):
os.mkdir(args.res_path)
_, test_dataset = get_dataset(args.dataset_name, args)
batch_size = 8
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
# load net and trained weights
model = SGAN(backbone_name=args.backbone)
weight_dict = torch.load(osp.join(args.save_model_path, args.cfg_name, "model_iter_" + str(args.max_iter) + ".pth"))
model_dict = filter_weight_dict(weight_dict, model.state_dict())
model.load_state_dict(model_dict)
model = model.to(device)
model.eval()
save_path = osp.join(args.res_path, args.cfg_name + args.test_cfg)
if not osp.exists(save_path):
os.makedirs(save_path)
# compute class activation map
with torch.no_grad():
for num, pack in enumerate(test_loader):
names, imgs, labels = pack[0], pack[1].to(device, dtype=torch.float32), \
pack[2].numpy()
fg_sim = pack[3].to(device, dtype=torch.float32)
bg_sim = pack[4].to(device, dtype=torch.float32)
sizes = pack[6].to("cpu").numpy()
if args.combine_seedseg:
_, segs, cams = model.forward_cam(imgs, fg_sim, bg_sim)
cams = cams + segs
# cams = segs
else:
_, _, cams = model.forward_cam(imgs, fg_sim, bg_sim)
np_cams = np.transpose(cams.cpu().numpy(), (0, 2, 3, 1))
_, h, w, c = np_cams.shape
for k, name in enumerate(names):
# get output cam
im_label = im_tags[name]
im_h, im_w = sizes[k]
np_cam = np_cams[k]
# get saliency
bg_cue = cv2.imread(osp.join(args.dataset_root, "sal", args.sdnet_path, name + ".png"), cv2.IMREAD_GRAYSCALE)
bg_cue = bg_cue.astype(np.float32)
bg_cue = bg_cue / 255
seg_mask = get_localization_cues_sec(np_cam, bg_cue, im_label, args.cam_thresh)
# save mask
write_mask = nd.zoom(seg_mask, (im_h / h, im_w / w), order=0)
cv2.imwrite(osp.join(save_path, name + ".png"), write_mask)
|
[
"torch.from_numpy",
"numpy.argsort",
"numpy.array",
"scipy.ndimage.zoom",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.where",
"numpy.max",
"os.mkdir",
"lib.network.sgan.SGAN",
"numpy.ones",
"numpy.argmax",
"torch.device",
"os.makedirs",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"lib.dataset.get_dataset.get_dataset",
"numpy.expand_dims",
"torch.utils.data.DataLoader",
"torch.no_grad",
"lib.utils.pyutils.read_yaml2cls"
] |
[((922, 959), 'numpy.array', 'np.array', (['[104.008, 116.669, 122.675]'], {}), '([104.008, 116.669, 122.675])\n', (930, 959), True, 'import numpy as np\n'), ((686, 711), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (709, 711), False, 'import argparse\n'), ((816, 852), 'lib.utils.pyutils.read_yaml2cls', 'pyutils.read_yaml2cls', (['args.cfg_file'], {}), '(args.cfg_file)\n', (837, 852), False, 'from lib.utils import pyutils\n'), ((1051, 1066), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1059, 1066), True, 'import numpy as np\n'), ((1257, 1286), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1271, 1286), True, 'import numpy as np\n'), ((1299, 1322), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1315, 1322), False, 'import torch\n'), ((1647, 1675), 'numpy.sum', 'np.sum', (['localization'], {'axis': '(2)'}), '(localization, axis=2)\n', (1653, 1675), True, 'import numpy as np\n'), ((1695, 1716), 'numpy.where', 'np.where', (['(sum_loc > 1)'], {}), '(sum_loc > 1)\n', (1703, 1716), True, 'import numpy as np\n'), ((1840, 1862), 'numpy.where', 'np.where', (['localization'], {}), '(localization)\n', (1848, 1862), True, 'import numpy as np\n'), ((2306, 2337), 'numpy.where', 'np.where', (['localization[:, :, 0]'], {}), '(localization[:, :, 0])\n', (2314, 2337), True, 'import numpy as np\n'), ((2443, 2476), 'numpy.sum', 'np.sum', (['localization'], {'axis': '(0, 1)'}), '(localization, axis=(0, 1))\n', (2449, 2476), True, 'import numpy as np\n'), ((3372, 3398), 'numpy.zeros', 'np.zeros', ([], {'shape': '(h, w, 21)'}), '(shape=(h, w, 21))\n', (3380, 3398), True, 'import numpy as np\n'), ((3644, 3692), 'scipy.ndimage.zoom', 'nd.zoom', (['saliency', '(h / im_h, h / im_w)'], {'order': '(1)'}), '(saliency, (h / im_h, h / im_w), order=1)\n', (3651, 3692), True, 'import scipy.ndimage as nd\n'), ((4458, 4506), 'scipy.ndimage.zoom', 'nd.zoom', (['saliency', '(h / im_h, w / im_w)'], {'order': '(1)'}), '(saliency, (h / im_h, w / im_w), order=1)\n', (4465, 4506), True, 'import scipy.ndimage as nd\n'), ((4527, 4553), 'numpy.zeros', 'np.zeros', ([], {'shape': '(h, w, 20)'}), '(shape=(h, w, 20))\n', (4535, 4553), True, 'import numpy as np\n'), ((4714, 4743), 'numpy.max', 'np.max', (['localization1'], {'axis': '(2)'}), '(localization1, axis=2)\n', (4720, 4743), True, 'import numpy as np\n'), ((5382, 5404), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (5394, 5404), False, 'import torch\n'), ((5579, 5615), 'lib.dataset.get_dataset.get_dataset', 'get_dataset', (['args.dataset_name', 'args'], {}), '(args.dataset_name, args)\n', (5590, 5615), False, 'from lib.dataset.get_dataset import get_dataset\n'), ((5654, 5743), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(8)'}), '(dataset=test_dataset, batch_size=batch_size, shuffle=False,\n num_workers=8)\n', (5664, 5743), False, 'from torch.utils.data import DataLoader\n'), ((5788, 5821), 'lib.network.sgan.SGAN', 'SGAN', ([], {'backbone_name': 'args.backbone'}), '(backbone_name=args.backbone)\n', (5792, 5821), False, 'from lib.network.sgan import SGAN\n'), ((6114, 6168), 'os.path.join', 'osp.join', (['args.res_path', '(args.cfg_name + args.test_cfg)'], {}), '(args.res_path, args.cfg_name + args.test_cfg)\n', (6122, 6168), True, 'import os.path as osp\n'), ((1874, 1909), 'numpy.ones', 'np.ones', ([], {'shape': '(h, w)', 'dtype': 'np.int'}), '(shape=(h, w), dtype=np.int)\n', (1881, 1909), True, 'import numpy as np\n'), ((2258, 2287), 'numpy.ones', 'np.ones', (['(h, w)'], {'dtype': 'np.int'}), '((h, w), dtype=np.int)\n', (2265, 2287), True, 'import numpy as np\n'), ((2493, 2509), 'numpy.argsort', 'np.argsort', (['area'], {}), '(area)\n', (2503, 2509), True, 'import numpy as np\n'), ((2629, 2662), 'numpy.where', 'np.where', (['localization[:, :, cls]'], {}), '(localization[:, :, cls])\n', (2637, 2662), True, 'import numpy as np\n'), ((2743, 2812), 'numpy.where', 'np.where', (['(((mask == 4) | (mask == 19)) & (localization[:, :, 0] == 1))'], {}), '(((mask == 4) | (mask == 19)) & (localization[:, :, 0] == 1))\n', (2751, 2812), True, 'import numpy as np\n'), ((4759, 4791), 'numpy.argmax', 'np.argmax', (['localization1'], {'axis': '(2)'}), '(localization1, axis=2)\n', (4768, 4791), True, 'import numpy as np\n'), ((5493, 5518), 'os.path.exists', 'osp.exists', (['args.res_path'], {}), '(args.res_path)\n', (5503, 5518), True, 'import os.path as osp\n'), ((5528, 5551), 'os.mkdir', 'os.mkdir', (['args.res_path'], {}), '(args.res_path)\n', (5536, 5551), False, 'import os, pickle\n'), ((6180, 6201), 'os.path.exists', 'osp.exists', (['save_path'], {}), '(save_path)\n', (6190, 6201), True, 'import os.path as osp\n'), ((6211, 6233), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (6222, 6233), False, 'import os, pickle\n'), ((6279, 6294), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6292, 6294), False, 'import torch\n'), ((3541, 3557), 'numpy.max', 'np.max', (['heat_map'], {}), '(heat_map)\n', (3547, 3557), True, 'import numpy as np\n'), ((7637, 7685), 'scipy.ndimage.zoom', 'nd.zoom', (['seg_mask', '(im_h / h, im_w / w)'], {'order': '(0)'}), '(seg_mask, (im_h / h, im_w / w), order=0)\n', (7644, 7685), True, 'import scipy.ndimage as nd\n'), ((7303, 7369), 'os.path.join', 'osp.join', (['args.dataset_root', '"""sal"""', 'args.sdnet_path', "(name + '.png')"], {}), "(args.dataset_root, 'sal', args.sdnet_path, name + '.png')\n", (7311, 7369), True, 'import os.path as osp\n'), ((7714, 7748), 'os.path.join', 'osp.join', (['save_path', "(name + '.png')"], {}), "(save_path, name + '.png')\n", (7722, 7748), True, 'import os.path as osp\n')]
|
import unittest
import numpy as np
import tensorflow as tf
from megnet.losses import mean_squared_error_with_scale
class TestLosses(unittest.TestCase):
def test_mse(self):
x = np.array([0.1, 0.2, 0.3])
y = np.array([0.05, 0.15, 0.25])
loss = mean_squared_error_with_scale(x, y, scale=100)
self.assertAlmostEqual(loss.numpy(), np.mean((x - y) ** 2) * 100)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"numpy.array",
"numpy.mean",
"megnet.losses.mean_squared_error_with_scale"
] |
[((428, 443), 'unittest.main', 'unittest.main', ([], {}), '()\n', (441, 443), False, 'import unittest\n'), ((192, 217), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3]'], {}), '([0.1, 0.2, 0.3])\n', (200, 217), True, 'import numpy as np\n'), ((230, 258), 'numpy.array', 'np.array', (['[0.05, 0.15, 0.25]'], {}), '([0.05, 0.15, 0.25])\n', (238, 258), True, 'import numpy as np\n'), ((274, 320), 'megnet.losses.mean_squared_error_with_scale', 'mean_squared_error_with_scale', (['x', 'y'], {'scale': '(100)'}), '(x, y, scale=100)\n', (303, 320), False, 'from megnet.losses import mean_squared_error_with_scale\n'), ((366, 387), 'numpy.mean', 'np.mean', (['((x - y) ** 2)'], {}), '((x - y) ** 2)\n', (373, 387), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
def count_harmonic_numbers(n: int):
count = 0
for i in range(1, n+1): # 1 ~ N まで
for _ in range(i, n+1, i): # N以下の i の倍数
count += 1
return count
x = np.linspace(1, 10**5, 100, dtype='int')
y = list(map(lambda x: count_harmonic_numbers(x), x))
y2 = x * np.log(x)
print(y)
print(y2)
plt.plot(x, y, label="count")
plt.plot(x, y2, label="NlogN")
plt.plot(x, x, label="N")
plt.legend()
plt.show()
|
[
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((238, 279), 'numpy.linspace', 'np.linspace', (['(1)', '(10 ** 5)', '(100)'], {'dtype': '"""int"""'}), "(1, 10 ** 5, 100, dtype='int')\n", (249, 279), True, 'import numpy as np\n'), ((370, 399), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""count"""'}), "(x, y, label='count')\n", (378, 399), True, 'import matplotlib.pyplot as plt\n'), ((400, 430), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'label': '"""NlogN"""'}), "(x, y2, label='NlogN')\n", (408, 430), True, 'import matplotlib.pyplot as plt\n'), ((431, 456), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'x'], {'label': '"""N"""'}), "(x, x, label='N')\n", (439, 456), True, 'import matplotlib.pyplot as plt\n'), ((457, 469), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (467, 469), True, 'import matplotlib.pyplot as plt\n'), ((470, 480), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (478, 480), True, 'import matplotlib.pyplot as plt\n'), ((341, 350), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (347, 350), True, 'import numpy as np\n')]
|
import pathlib
import numpy as np
def create_submission(path: pathlib.Path, predictions):
pred_with_id = np.stack([np.arange(len(predictions)), predictions], axis=1)
np.savetxt(
fname=path,
X=pred_with_id,
fmt="%d",
delimiter=",",
header="id,label",
comments="",
)
|
[
"numpy.savetxt"
] |
[((177, 277), 'numpy.savetxt', 'np.savetxt', ([], {'fname': 'path', 'X': 'pred_with_id', 'fmt': '"""%d"""', 'delimiter': '""","""', 'header': '"""id,label"""', 'comments': '""""""'}), "(fname=path, X=pred_with_id, fmt='%d', delimiter=',', header=\n 'id,label', comments='')\n", (187, 277), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 20 17:12:53 2014
author: <NAME>
"""
import numpy as np
from statsmodels.regression.linear_model import OLS, WLS
from statsmodels.sandbox.regression.predstd import wls_prediction_std
def test_predict_se():
# this test doesn't use reference values
# checks conistency across options, and compares to direct calculation
# generate dataset
nsample = 50
x1 = np.linspace(0, 20, nsample)
x = np.c_[x1, (x1 - 5)**2, np.ones(nsample)]
np.random.seed(0)#9876789) #9876543)
beta = [0.5, -0.01, 5.]
y_true2 = np.dot(x, beta)
w = np.ones(nsample)
w[nsample * 6. / 10:] = 3
sig = 0.5
y2 = y_true2 + sig * w * np.random.normal(size=nsample)
x2 = x[:,[0,2]]
# estimate OLS
res2 = OLS(y2, x2).fit()
#direct calculation
covb = res2.cov_params()
predvar = res2.mse_resid + (x2 * np.dot(covb, x2.T).T).sum(1)
predstd = np.sqrt(predvar)
prstd, iv_l, iv_u = wls_prediction_std(res2)
np.testing.assert_almost_equal(prstd, predstd, 15)
#stats.t.isf(0.05/2., 50 - 2)
q = 2.0106347546964458
ci_half = q * predstd
np.testing.assert_allclose(iv_u, res2.fittedvalues + ci_half, rtol=1e-12)
np.testing.assert_allclose(iv_l, res2.fittedvalues - ci_half, rtol=1e-12)
prstd, iv_l, iv_u = wls_prediction_std(res2, x2[:3,:])
np.testing.assert_equal(prstd, prstd[:3])
np.testing.assert_allclose(iv_u, res2.fittedvalues[:3] + ci_half[:3],
rtol=1e-12)
np.testing.assert_allclose(iv_l, res2.fittedvalues[:3] - ci_half[:3],
rtol=1e-12)
# check WLS
res3 = WLS(y2, x2, 1. / w).fit()
#direct calculation
covb = res3.cov_params()
predvar = res3.mse_resid * w + (x2 * np.dot(covb, x2.T).T).sum(1)
predstd = np.sqrt(predvar)
prstd, iv_l, iv_u = wls_prediction_std(res3)
np.testing.assert_almost_equal(prstd, predstd, 15)
#stats.t.isf(0.05/2., 50 - 2)
q = 2.0106347546964458
ci_half = q * predstd
np.testing.assert_allclose(iv_u, res3.fittedvalues + ci_half, rtol=1e-12)
np.testing.assert_allclose(iv_l, res3.fittedvalues - ci_half, rtol=1e-12)
# testing shapes of exog
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1:,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-1])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-1])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-2:,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-2:])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-2:,:], weights=[3, 3])
np.testing.assert_equal(prstd, prstd[-2:])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[:3,:])
np.testing.assert_equal(prstd, prstd[:3])
np.testing.assert_allclose(iv_u, res3.fittedvalues[:3] + ci_half[:3],
rtol=1e-12)
np.testing.assert_allclose(iv_l, res3.fittedvalues[:3] - ci_half[:3],
rtol=1e-12)
#use wrong size for exog
#prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1,0], weights=3.)
np.testing.assert_raises(ValueError, wls_prediction_std, res3, x2[-1,0],
weights=3.)
# check some weight values
sew1 = wls_prediction_std(res3, x2[-3:,:])[0]**2
for wv in np.linspace(0.5, 3, 5):
sew = wls_prediction_std(res3, x2[-3:,:], weights=1. / wv)[0]**2
np.testing.assert_allclose(sew, sew1 + res3.scale * (wv - 1))
|
[
"numpy.random.normal",
"statsmodels.sandbox.regression.predstd.wls_prediction_std",
"numpy.sqrt",
"numpy.ones",
"numpy.testing.assert_equal",
"statsmodels.regression.linear_model.WLS",
"numpy.testing.assert_allclose",
"numpy.testing.assert_raises",
"numpy.testing.assert_almost_equal",
"numpy.dot",
"numpy.linspace",
"statsmodels.regression.linear_model.OLS",
"numpy.random.seed"
] |
[((427, 454), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', 'nsample'], {}), '(0, 20, nsample)\n', (438, 454), True, 'import numpy as np\n'), ((508, 525), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (522, 525), True, 'import numpy as np\n'), ((587, 602), 'numpy.dot', 'np.dot', (['x', 'beta'], {}), '(x, beta)\n', (593, 602), True, 'import numpy as np\n'), ((611, 627), 'numpy.ones', 'np.ones', (['nsample'], {}), '(nsample)\n', (618, 627), True, 'import numpy as np\n'), ((935, 951), 'numpy.sqrt', 'np.sqrt', (['predvar'], {}), '(predvar)\n', (942, 951), True, 'import numpy as np\n'), ((977, 1001), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res2'], {}), '(res2)\n', (995, 1001), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((1006, 1056), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['prstd', 'predstd', '(15)'], {}), '(prstd, predstd, 15)\n', (1036, 1056), True, 'import numpy as np\n'), ((1149, 1222), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_u', '(res2.fittedvalues + ci_half)'], {'rtol': '(1e-12)'}), '(iv_u, res2.fittedvalues + ci_half, rtol=1e-12)\n', (1175, 1222), True, 'import numpy as np\n'), ((1227, 1300), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_l', '(res2.fittedvalues - ci_half)'], {'rtol': '(1e-12)'}), '(iv_l, res2.fittedvalues - ci_half, rtol=1e-12)\n', (1253, 1300), True, 'import numpy as np\n'), ((1326, 1361), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res2', 'x2[:3, :]'], {}), '(res2, x2[:3, :])\n', (1344, 1361), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((1365, 1406), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[:3]'], {}), '(prstd, prstd[:3])\n', (1388, 1406), True, 'import numpy as np\n'), ((1411, 1497), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_u', '(res2.fittedvalues[:3] + ci_half[:3])'], {'rtol': '(1e-12)'}), '(iv_u, res2.fittedvalues[:3] + ci_half[:3], rtol=\n 1e-12)\n', (1437, 1497), True, 'import numpy as np\n'), ((1528, 1614), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_l', '(res2.fittedvalues[:3] - ci_half[:3])'], {'rtol': '(1e-12)'}), '(iv_l, res2.fittedvalues[:3] - ci_half[:3], rtol=\n 1e-12)\n', (1554, 1614), True, 'import numpy as np\n'), ((1834, 1850), 'numpy.sqrt', 'np.sqrt', (['predvar'], {}), '(predvar)\n', (1841, 1850), True, 'import numpy as np\n'), ((1876, 1900), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3'], {}), '(res3)\n', (1894, 1900), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((1905, 1955), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['prstd', 'predstd', '(15)'], {}), '(prstd, predstd, 15)\n', (1935, 1955), True, 'import numpy as np\n'), ((2048, 2121), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_u', '(res3.fittedvalues + ci_half)'], {'rtol': '(1e-12)'}), '(iv_u, res3.fittedvalues + ci_half, rtol=1e-12)\n', (2074, 2121), True, 'import numpy as np\n'), ((2126, 2199), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_l', '(res3.fittedvalues - ci_half)'], {'rtol': '(1e-12)'}), '(iv_l, res3.fittedvalues - ci_half, rtol=1e-12)\n', (2152, 2199), True, 'import numpy as np\n'), ((2254, 2303), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-1:, :]'], {'weights': '(3.0)'}), '(res3, x2[-1:, :], weights=3.0)\n', (2272, 2303), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2306, 2347), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[-1]'], {}), '(prstd, prstd[-1])\n', (2329, 2347), True, 'import numpy as np\n'), ((2372, 2420), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-1, :]'], {'weights': '(3.0)'}), '(res3, x2[-1, :], weights=3.0)\n', (2390, 2420), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2423, 2464), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[-1]'], {}), '(prstd, prstd[-1])\n', (2446, 2464), True, 'import numpy as np\n'), ((2490, 2539), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-2:, :]'], {'weights': '(3.0)'}), '(res3, x2[-2:, :], weights=3.0)\n', (2508, 2539), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2542, 2584), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[-2:]'], {}), '(prstd, prstd[-2:])\n', (2565, 2584), True, 'import numpy as np\n'), ((2610, 2662), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-2:, :]'], {'weights': '[3, 3]'}), '(res3, x2[-2:, :], weights=[3, 3])\n', (2628, 2662), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2666, 2708), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[-2:]'], {}), '(prstd, prstd[-2:])\n', (2689, 2708), True, 'import numpy as np\n'), ((2734, 2769), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[:3, :]'], {}), '(res3, x2[:3, :])\n', (2752, 2769), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((2773, 2814), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['prstd', 'prstd[:3]'], {}), '(prstd, prstd[:3])\n', (2796, 2814), True, 'import numpy as np\n'), ((2819, 2905), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_u', '(res3.fittedvalues[:3] + ci_half[:3])'], {'rtol': '(1e-12)'}), '(iv_u, res3.fittedvalues[:3] + ci_half[:3], rtol=\n 1e-12)\n', (2845, 2905), True, 'import numpy as np\n'), ((2936, 3022), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['iv_l', '(res3.fittedvalues[:3] - ci_half[:3])'], {'rtol': '(1e-12)'}), '(iv_l, res3.fittedvalues[:3] - ci_half[:3], rtol=\n 1e-12)\n', (2962, 3022), True, 'import numpy as np\n'), ((3156, 3246), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError', 'wls_prediction_std', 'res3', 'x2[-1, 0]'], {'weights': '(3.0)'}), '(ValueError, wls_prediction_std, res3, x2[-1, 0],\n weights=3.0)\n', (3180, 3246), True, 'import numpy as np\n'), ((3369, 3391), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(5)'], {}), '(0.5, 3, 5)\n', (3380, 3391), True, 'import numpy as np\n'), ((3475, 3536), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sew', '(sew1 + res3.scale * (wv - 1))'], {}), '(sew, sew1 + res3.scale * (wv - 1))\n', (3501, 3536), True, 'import numpy as np\n'), ((486, 502), 'numpy.ones', 'np.ones', (['nsample'], {}), '(nsample)\n', (493, 502), True, 'import numpy as np\n'), ((701, 731), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'nsample'}), '(size=nsample)\n', (717, 731), True, 'import numpy as np\n'), ((783, 794), 'statsmodels.regression.linear_model.OLS', 'OLS', (['y2', 'x2'], {}), '(y2, x2)\n', (786, 794), False, 'from statsmodels.regression.linear_model import OLS, WLS\n'), ((1670, 1690), 'statsmodels.regression.linear_model.WLS', 'WLS', (['y2', 'x2', '(1.0 / w)'], {}), '(y2, x2, 1.0 / w)\n', (1673, 1690), False, 'from statsmodels.regression.linear_model import OLS, WLS\n'), ((3313, 3349), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-3:, :]'], {}), '(res3, x2[-3:, :])\n', (3331, 3349), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((3408, 3462), 'statsmodels.sandbox.regression.predstd.wls_prediction_std', 'wls_prediction_std', (['res3', 'x2[-3:, :]'], {'weights': '(1.0 / wv)'}), '(res3, x2[-3:, :], weights=1.0 / wv)\n', (3426, 3462), False, 'from statsmodels.sandbox.regression.predstd import wls_prediction_std\n'), ((892, 910), 'numpy.dot', 'np.dot', (['covb', 'x2.T'], {}), '(covb, x2.T)\n', (898, 910), True, 'import numpy as np\n'), ((1791, 1809), 'numpy.dot', 'np.dot', (['covb', 'x2.T'], {}), '(covb, x2.T)\n', (1797, 1809), True, 'import numpy as np\n')]
|
from __future__ import division
import numpy as np
def SoftmaxLoss2(w, X, y, k):
# w(feature*class,1) - weights for last class assumed to be 0
# X(instance,feature)
# y(instance,1)
#
# version of SoftmaxLoss where weights for last class are fixed at 0
# to avoid overparameterization
n, p = X.shape
w = w.reshape((p, k - 1))
w = np.hstack((w, np.zeros((p, 1))))
Z = np.exp(X.dot(w)).sum(axis=1)
nll = -((X * w[:, y].T).sum(axis=1) - np.log(Z)).sum()
g = np.zeros((p, k - 1))
for c in xrange(k - 1):
g[:, c] = -(X * ((y == c) - np.exp(X.dot(w[:, c])) / Z)
[:, np.newaxis]).sum(axis=0)
g = np.ravel(g)
return nll, g
|
[
"numpy.ravel",
"numpy.zeros",
"numpy.log"
] |
[((509, 529), 'numpy.zeros', 'np.zeros', (['(p, k - 1)'], {}), '((p, k - 1))\n', (517, 529), True, 'import numpy as np\n'), ((684, 695), 'numpy.ravel', 'np.ravel', (['g'], {}), '(g)\n', (692, 695), True, 'import numpy as np\n'), ((384, 400), 'numpy.zeros', 'np.zeros', (['(p, 1)'], {}), '((p, 1))\n', (392, 400), True, 'import numpy as np\n'), ((483, 492), 'numpy.log', 'np.log', (['Z'], {}), '(Z)\n', (489, 492), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 09 february, 2021
Testing suite for BNetwork class
@author: <NAME>
@email: <EMAIL>
@date: 09 february, 2021
"""
import unittest
import os
import numpy as np
from topopy import Flow, Basin, Network, BNetwork, DEM
from topopy.network import NetworkError
infolder = "data/in"
outfolder = "data/out"
class BNetworkClassTest(unittest.TestCase):
# Indices de las cabeceras que deben de salir (para comprobar)
results = {"small25":dict([(1, 16171), (2, 9354), (3,1463)]),
"jebja30":dict([(1, 151755), (2, 44786), (3, 48709), (4, 3819)]),
"tunez":dict([(1, 77552), (2, 30013), (3, 7247)])}
def test_BNetwork_class00(self):
"""
Test00 Crea BNetwork para cuencas de prueba a partir de un Grid de cuencas
Sin utilizar cabeceras
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
# Cargamos outlets y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, file), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
for bid in np.unique(cuencas.read_array()):
if bid == 0:
continue
bnet = BNetwork(net, cuencas, None, bid)
self.assertEqual(int(bnet._heads[0]), self.results[file][bid])
def test_BNetwork_class01(self):
"""
Test00 Crea BNetwork para cuencas de prueba a partir de un objeto Basin
Sin utilizar cabeceras
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
dem = DEM("{}/{}.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
# Cargamos outlets y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, file), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
for bid in np.unique(cuencas.read_array()):
if bid == 0:
continue
basin = Basin(dem, cuencas, bid)
bnet = BNetwork(net, basin)
# Este test solo verifica que se realice sin fallos y que
# el objeto bnet tiene una única cabecera
bnet = BNetwork(net, cuencas, None, bid)
self.assertEqual(int(bnet._heads[0]), self.results[file][bid])
def test_BNetwork_class03(self):
"""
Test que prueba cabeceras en cuenca 1 con small25
474260.9;4114339.6;3
474856.9;4114711.1;2
"""
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, "small25"))
net = Network("{}/{}_net.dat".format(infolder, "small25"))
# Cargamos outlets, heads y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, "small25"), delimiter=";")
heads = np.loadtxt("{}/{}_bnet_heads.txt".format(infolder, "small25"), delimiter=";")
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
bid = 1
bnet = BNetwork(net, cuencas, heads, bid)
self.assertEqual(np.array_equal(bnet._heads, np.array([13494, 16171])), True)
def test_BNetwork_class04(self):
"""
Test que prueba cabeceras en cuenca 1 con small25 (sin utilizar id field)
474260.9;4114339.6;3
474856.9;4114711.1;2
"""
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, "small25"))
net = Network("{}/{}_net.dat".format(infolder, "small25"))
# Cargamos outlets, heads y generamos cuencas
outlets = np.loadtxt("{}/{}_bnet_outlets.txt".format(infolder, "small25"), delimiter=";")
heads = np.loadtxt("{}/{}_bnet_heads.txt".format(infolder, "small25"), delimiter=";")
# Remove the id column
heads = heads[:,:-1]
outlets = net.snap_points(outlets)
cuencas = fd.get_drainage_basins(outlets)
bid = 1
bnet = BNetwork(net, cuencas, heads, bid)
self.assertEqual(np.array_equal(bnet._heads, np.array([16171, 13494])), True)
def test_BNetwork_class05(self):
"""
Test de creado masivo de cuencas con cabeceras aleatorias
"""
files = ["small25", "jebja30", "tunez"]
for file in files:
# Cargamos DEM, Flow, Network
fd = Flow("{}/{}_fd.tif".format(infolder, file))
net = Network("{}/{}_net.dat".format(infolder, file))
dem = DEM("{}/{}.tif".format(infolder, file))
# Generamos todas las cuencas
cuencas = fd.get_drainage_basins(min_area = 0.0025)
# Generamos 50 puntos aleatorios dentro de la extensión del objeto Network
# Estos 50 puntos se usaran como cabeceras
xmin, xmax, ymin, ymax = net.get_extent()
xi = np.random.randint(xmin, xmax, 50)
yi = np.random.randint(ymin, ymax, 50)
heads = np.array((xi, yi)).T
# Cogemos 5 cuencas aleatorias
bids = np.random.choice(np.unique(cuencas.read_array())[1:], 5)
for bid in bids:
try:
if np.random.randint(100) < 70:
bnet = BNetwork(net, cuencas, heads, bid)
else:
basin = Basin(dem, cuencas, bid)
bnet = BNetwork(net, basin, heads)
except NetworkError:
print("Network of {} file inside the basin {} has not enough pixels".format(file, bid))
continue
# Salvamos BNetwork y volvemos a cargar para comprobar que se cargan-guardan bien
bnet_path = "{}/{}_{}_bnet.dat".format(outfolder, file, bid)
bnet.save(bnet_path)
bnet2 = BNetwork(bnet_path)
computed = np.array_equal(bnet._ix, bnet2._ix)
self.assertEqual(computed, True)
# borramos archivo
os.remove(bnet_path)
if __name__ == "__main__":
unittest.main()
|
[
"topopy.BNetwork",
"os.remove",
"numpy.array",
"numpy.random.randint",
"numpy.array_equal",
"unittest.main",
"topopy.Basin"
] |
[((6743, 6758), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6756, 6758), False, 'import unittest\n'), ((3650, 3684), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'heads', 'bid'], {}), '(net, cuencas, heads, bid)\n', (3658, 3684), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((4599, 4633), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'heads', 'bid'], {}), '(net, cuencas, heads, bid)\n', (4607, 4633), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((5511, 5544), 'numpy.random.randint', 'np.random.randint', (['xmin', 'xmax', '(50)'], {}), '(xmin, xmax, 50)\n', (5528, 5544), True, 'import numpy as np\n'), ((5562, 5595), 'numpy.random.randint', 'np.random.randint', (['ymin', 'ymax', '(50)'], {}), '(ymin, ymax, 50)\n', (5579, 5595), True, 'import numpy as np\n'), ((1528, 1561), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'None', 'bid'], {}), '(net, cuencas, None, bid)\n', (1536, 1561), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((2558, 2582), 'topopy.Basin', 'Basin', (['dem', 'cuencas', 'bid'], {}), '(dem, cuencas, bid)\n', (2563, 2582), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((2606, 2626), 'topopy.BNetwork', 'BNetwork', (['net', 'basin'], {}), '(net, basin)\n', (2614, 2626), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((2782, 2815), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'None', 'bid'], {}), '(net, cuencas, None, bid)\n', (2790, 2815), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((3738, 3762), 'numpy.array', 'np.array', (['[13494, 16171]'], {}), '([13494, 16171])\n', (3746, 3762), True, 'import numpy as np\n'), ((4687, 4711), 'numpy.array', 'np.array', (['[16171, 13494]'], {}), '([16171, 13494])\n', (4695, 4711), True, 'import numpy as np\n'), ((5616, 5634), 'numpy.array', 'np.array', (['(xi, yi)'], {}), '((xi, yi))\n', (5624, 5634), True, 'import numpy as np\n'), ((6506, 6525), 'topopy.BNetwork', 'BNetwork', (['bnet_path'], {}), '(bnet_path)\n', (6514, 6525), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((6553, 6588), 'numpy.array_equal', 'np.array_equal', (['bnet._ix', 'bnet2._ix'], {}), '(bnet._ix, bnet2._ix)\n', (6567, 6588), True, 'import numpy as np\n'), ((6689, 6709), 'os.remove', 'os.remove', (['bnet_path'], {}), '(bnet_path)\n', (6698, 6709), False, 'import os\n'), ((5842, 5864), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (5859, 5864), True, 'import numpy as np\n'), ((5902, 5936), 'topopy.BNetwork', 'BNetwork', (['net', 'cuencas', 'heads', 'bid'], {}), '(net, cuencas, heads, bid)\n', (5910, 5936), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((5995, 6019), 'topopy.Basin', 'Basin', (['dem', 'cuencas', 'bid'], {}), '(dem, cuencas, bid)\n', (6000, 6019), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n'), ((6051, 6078), 'topopy.BNetwork', 'BNetwork', (['net', 'basin', 'heads'], {}), '(net, basin, heads)\n', (6059, 6078), False, 'from topopy import Flow, Basin, Network, BNetwork, DEM\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from util import log
from pprint import pprint
from input_ops import create_input_ops
from model import Model
import os
import time
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
class Trainer(object):
def __init__(self,
config,
dataset,
dataset_test):
self.config = config
hyper_parameter_str = 'bs_{}_lr_flow_{}_pixel_{}_d_{}'.format(
config.batch_size,
config.learning_rate_f,
config.learning_rate_p,
config.learning_rate_d,
)
self.train_dir = './train_dir/%s-%s-%s-num_input-%s-%s' % (
config.dataset,
config.prefix,
hyper_parameter_str,
str(config.num_input),
time.strftime("%Y%m%d-%H%M%S")
)
if not os.path.exists(self.train_dir): os.makedirs(self.train_dir)
log.infov("Train Dir: %s", self.train_dir)
# --- input ops ---
self.batch_size = config.batch_size
_, self.batch_train = create_input_ops(
dataset, self.batch_size, is_training=True)
_, self.batch_test = create_input_ops(
dataset_test, self.batch_size, is_training=False)
# --- create model ---
self.model = Model(config)
# --- optimizer ---
self.global_step = tf.contrib.framework.get_or_create_global_step(graph=None)
self.learning_rate_p = config.learning_rate_p
self.learning_rate_f = config.learning_rate_f
self.learning_rate_d = config.learning_rate_d
self.check_op = tf.no_op()
# --- checkpoint and monitoring ---
all_vars = tf.trainable_variables()
f_var = [v for v in all_vars if 'Flow' in v.op.name or 'flow' in v.op.name]
log.warn("********* f_var ********** ")
slim.model_analyzer.analyze_vars(f_var, print_info=True)
p_var = [v for v in all_vars if 'Pixel' in v.op.name or 'pixel' in v.op.name]
log.warn("********* p_var ********** ")
slim.model_analyzer.analyze_vars(p_var, print_info=True)
d_var = [v for v in all_vars if v.op.name.startswith('Discriminator')]
log.warn("********* d_var ********** ")
slim.model_analyzer.analyze_vars(d_var, print_info=True)
# the whole model without the discriminator
g_var = p_var + f_var
self.f_optimizer = tf.train.AdamOptimizer(
self.learning_rate_f,
).minimize(self.model.flow_loss,
var_list=f_var, name='optimizer_flow_loss')
self.p_optimizer = tf.train.AdamOptimizer(
self.learning_rate_p,
).minimize(self.model.pixel_loss, global_step=self.global_step,
var_list=p_var, name='optimizer_pixel_loss')
self.p_optimizer_gan = tf.train.AdamOptimizer(
self.learning_rate_p,
beta1=0.5
).minimize(self.model.pixel_loss_gan, global_step=self.global_step,
var_list=p_var, name='optimizer_pixel_loss_gan')
self.d_optimizer = tf.train.AdamOptimizer(
self.learning_rate_d,
beta1=0.5
).minimize(self.model.d_loss, global_step=self.global_step,
var_list=d_var, name='optimizer_discriminator_loss')
self.train_summary_op = tf.summary.merge_all(key='train')
self.test_summary_op = tf.summary.merge_all(key='test')
self.saver = tf.train.Saver(max_to_keep=100)
self.pretrain_saver = tf.train.Saver(var_list=all_vars, max_to_keep=1)
self.pretrain_saver_p = tf.train.Saver(var_list=p_var, max_to_keep=1)
self.pretrain_saver_f = tf.train.Saver(var_list=f_var, max_to_keep=1)
self.pretrain_saver_g = tf.train.Saver(var_list=g_var, max_to_keep=1)
self.pretrain_saver_d = tf.train.Saver(var_list=d_var, max_to_keep=1)
self.summary_writer = tf.summary.FileWriter(self.train_dir)
self.max_steps = self.config.max_steps
self.ckpt_save_step = self.config.ckpt_save_step
self.log_step = self.config.log_step
self.test_sample_step = self.config.test_sample_step
self.write_summary_step = self.config.write_summary_step
self.gan_start_step = self.config.gan_start_step
self.checkpoint_secs = 600 # 10 min
self.supervisor = tf.train.Supervisor(
logdir=self.train_dir,
is_chief=True,
saver=None,
summary_op=None,
summary_writer=self.summary_writer,
save_summaries_secs=300,
save_model_secs=self.checkpoint_secs,
global_step=self.global_step,
)
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = self.supervisor.prepare_or_wait_for_session(config=session_config)
self.ckpt_path = config.checkpoint
if self.ckpt_path is not None:
log.info("Checkpoint path: %s", self.ckpt_path)
self.pretrain_saver.restore(self.session, self.ckpt_path, )
log.info("Loaded the pretrain parameters from the provided checkpoint path")
self.ckpt_path_f = config.checkpoint_f
if self.ckpt_path_f is not None:
log.info("Checkpoint path: %s", self.ckpt_path_f)
self.pretrain_saver_f.restore(self.session, self.ckpt_path_f)
log.info("Loaded the pretrain Flow module from the provided checkpoint path")
self.ckpt_path_p = config.checkpoint_p
if self.ckpt_path_p is not None:
log.info("Checkpoint path: %s", self.ckpt_path_p)
self.pretrain_saver_p.restore(self.session, self.ckpt_path_p)
log.info("Loaded the pretrain Pixel module from the provided checkpoint path")
self.ckpt_path_g = config.checkpoint_g
if self.ckpt_path_g is not None:
log.info("Checkpoint path: %s", self.ckpt_path_g)
self.pretrain_saver_g.restore(self.session, self.ckpt_path_g)
log.info("Loaded the pretrain Generator (Pixel&Flow) module from the provided checkpoint path")
self.ckpt_path_d = config.checkpoint_d
if self.ckpt_path_d is not None:
log.info("Checkpoint path: %s", self.ckpt_path_d)
self.pretrain_saver_d.restore(self.session, self.ckpt_path_d)
log.info("Loaded the pretrain Discriminator module from the provided checkpoint path")
def train(self):
log.infov("Training Starts!")
pprint(self.batch_train)
max_steps = self.max_steps
ckpt_save_step = self.ckpt_save_step
log_step = self.log_step
test_sample_step = self.test_sample_step
write_summary_step = self.write_summary_step
gan_start_step = self.gan_start_step
for s in xrange(max_steps):
# periodic inference
if s % test_sample_step == 0:
step, test_summary, p_loss, f_loss, loss, output, step_time = \
self.run_test(self.batch_test, step=s, is_train=False)
self.log_step_message(step, p_loss, f_loss, loss, step_time, is_train=False)
self.summary_writer.add_summary(test_summary, global_step=step)
step, train_summary, p_loss, f_loss, loss, output, step_time = \
self.run_single_step(self.batch_train, step=s,
opt_gan=s > gan_start_step, is_train=True)
if s % log_step == 0:
self.log_step_message(step, p_loss, f_loss, loss, step_time)
if s % write_summary_step == 0:
self.summary_writer.add_summary(train_summary, global_step=step)
if s % ckpt_save_step == 0:
log.infov("Saved checkpoint at %d", s)
save_path = self.saver.save(
self.session, os.path.join(self.train_dir, 'model'),
global_step=step)
def run_single_step(self, batch, step=None, opt_gan=False, is_train=True):
_start_time = time.time()
batch_chunk = self.session.run(batch)
fetch = [self.global_step, self.train_summary_op, self.model.output,
self.model.pixel_loss, self.model.flow_loss,
self.model.loss, self.check_op]
# fetch optimizers
if not opt_gan:
# optimize only l1 losses
fetch += [self.p_optimizer, self.f_optimizer]
else:
if step % (self.config.update_rate+1) > 0:
# train the generator
fetch += [self.p_optimizer_gan, self.f_optimizer]
else:
# train the discriminator
fetch += [self.d_optimizer]
fetch_values = self.session.run(
fetch,
feed_dict=self.model.get_feed_dict(batch_chunk, step=step)
)
[step, summary, output, p_loss, f_loss, loss] = fetch_values[:6]
_end_time = time.time()
return step, summary, p_loss, f_loss, loss, output, (_end_time - _start_time)
def run_test(self, batch, step, is_train=False):
_start_time = time.time()
batch_chunk = self.session.run(batch)
step, summary, p_loss, f_loss, loss, output = self.session.run(
[self.global_step, self.test_summary_op,
self.model.pixel_loss, self.model.flow_loss,
self.model.loss, self.model.output],
feed_dict=self.model.get_feed_dict(batch_chunk, step=step, is_training=False)
)
_end_time = time.time()
return step, summary, p_loss, f_loss, loss, output, (_end_time - _start_time)
def log_step_message(self, step, p_loss, f_loss, loss, step_time, is_train=True):
if step_time == 0: step_time = 0.001
log_fn = (is_train and log.info or log.infov)
log_fn((" [{split_mode:5s} step {step:4d}] " +
"Loss: {loss:.5f} " +
"Pixel loss: {p_loss:.5f} " +
"Flow loss: {f_loss:.5f} " +
"({sec_per_batch:.3f} sec/batch, {instance_per_sec:.3f} instances/sec) "
).format(split_mode=(is_train and 'train' or 'val'),
step=step,
loss=loss,
p_loss=p_loss,
f_loss=f_loss,
sec_per_batch=step_time,
instance_per_sec=self.batch_size / step_time
)
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=8,
help='the mini-batch size')
parser.add_argument('--prefix', type=str, default='default',
help='a nickname for the training')
parser.add_argument('--dataset', type=str, default='car', choices=[
'car', 'chair', 'kitti', 'synthia'],
help='you can add your own dataset here')
parser.add_argument('--num_input', type=int, default=2,
help='the number of source images')
parser.add_argument('--checkpoint', type=str, default=None,
help='load all the parameters including the flow and '
'pixel modules and the discriminator')
parser.add_argument('--checkpoint_p', type=str, default=None,
help='load the parameters of the pixel module')
parser.add_argument('--checkpoint_f', type=str, default=None,
help='load the parameters of the flow module')
parser.add_argument('--checkpoint_g', type=str, default=None,
help='load the parameters of both the flow and pixel module')
parser.add_argument('--checkpoint_d', type=str, default=None,
help='load the parameters of the discriminator')
# Log
parser.add_argument('--log_step', type=int, default=10,
help='the frequency of outputing log info')
parser.add_argument('--ckpt_save_step', type=int, default=5000,
help='the frequency of saving a checkpoint')
parser.add_argument('--test_sample_step', type=int, default=100,
help='the frequency of performing testing inference during training')
parser.add_argument('--write_summary_step', type=int, default=100,
help='the frequency of writing TensorBoard summaries')
# Learning
parser.add_argument('--max_steps', type=int, default=10000000,
help='the max training iterations')
parser.add_argument('--learning_rate_p', type=float, default=5e-5,
help='the learning rate of the pixel module')
parser.add_argument('--learning_rate_f', type=float, default=1e-4,
help='the learning rate of the flow module')
parser.add_argument('--learning_rate_d', type=float, default=1e-4,
help='the learning rate of the discriminator')
parser.add_argument('--local_confidence_weight', type=int, default=1e-2,
help='the weight of the confidence prediction objective')
# Architecture
parser.add_argument('--num_res_block_pixel', type=int, default=0,
help='the number of residual block in the bottleneck of the pixel module')
parser.add_argument('--num_res_block_flow', type=int, default=4,
help='the number of residual block in the bottleneck of the flow module')
parser.add_argument('--num_dis_conv_layer', type=int, default=5,
help='the number of convolutional layers of the discriminator')
parser.add_argument('--num_conv_layer', type=int, default=5,
help='the number of convolutional layers of '
'the encoder of both the flow and pixel modules')
parser.add_argument('--num_convlstm_block', type=int, default=2,
help='the number of residual ConvLSTM block of the pixel module')
parser.add_argument('--num_convlstm_scale', type=int, default=3,
help='how many innermost layers of the pixel module '
'have a residual ConvLSTM connection')
parser.add_argument('--norm_type', type=str, default='None',
choices=['batch', 'instance', 'None'],
help='the type of normalization')
# GAN
parser.add_argument('--gan_type', type=str, default='ls', choices=['ls', 'normal'],
help='the type of GAN losses such as LS-GAN, WGAN, etc')
parser.add_argument('--gan_start_step', type=int, default=5e5,
help='start to optimize the GAN loss when the model is stable')
parser.add_argument('--update_rate', type=int, default=1,
help='update G more frequently than D')
# Multi-scale prediction: this is not reporeted in the paper
# The main idea is to imporve the flow module by training it to start from
# predict a coarser flow fields (similar to progressive learning GAN
# proposed by Karras et al. ICLR 2017)
parser.add_argument('--num_scale', type=int, default=1,
help='the number of multi-scale flow prediction '
'(1 means without multi-scale prediction)')
parser.add_argument('--moving_weight', type=str, default='uniform',
choices=['uniform', 'shift', 'step'],
help='gradually learn each scale from coarse to fine')
config = parser.parse_args()
if config.dataset == 'car':
import datasets.shapenet_car as dataset
elif config.dataset == 'chair':
import datasets.shapenet_chair as dataset
elif config.dataset == 'kitti':
import datasets.kitti as dataset
elif config.dataset == 'synthia':
import datasets.synthia as dataset
else:
raise ValueError(config.dataset)
if 'car' in config.dataset or 'chair' in config.dataset:
config.dataset_type = 'object'
else:
config.dataset_type = 'scene'
dataset_train, dataset_test = \
dataset.create_default_splits(config.num_input)
image, pose = dataset_train.get_data(dataset_train.ids[0])
config.data_info = np.concatenate([np.asarray(image.shape), np.asarray(pose.shape)])
trainer = Trainer(config, dataset_train, dataset_test)
log.warning("dataset: %s", config.dataset)
trainer.train()
if __name__ == '__main__':
main()
|
[
"model.Model",
"datasets.synthia.create_default_splits",
"tensorflow.contrib.framework.get_or_create_global_step",
"six.moves.xrange",
"input_ops.create_input_ops",
"tensorflow.GPUOptions",
"pprint.pprint",
"util.log.warning",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.asarray",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOptimizer",
"util.log.warn",
"tensorflow.summary.merge_all",
"util.log.info",
"tensorflow.summary.FileWriter",
"time.time",
"util.log.infov",
"tensorflow.no_op",
"tensorflow.contrib.slim.model_analyzer.analyze_vars",
"os.makedirs",
"tensorflow.train.Saver",
"time.strftime",
"os.path.join",
"tensorflow.train.Supervisor"
] |
[((10788, 10813), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10811, 10813), False, 'import argparse\n'), ((16457, 16504), 'datasets.synthia.create_default_splits', 'dataset.create_default_splits', (['config.num_input'], {}), '(config.num_input)\n', (16486, 16504), True, 'import datasets.synthia as dataset\n'), ((16723, 16765), 'util.log.warning', 'log.warning', (['"""dataset: %s"""', 'config.dataset'], {}), "('dataset: %s', config.dataset)\n", (16734, 16765), False, 'from util import log\n'), ((1065, 1107), 'util.log.infov', 'log.infov', (['"""Train Dir: %s"""', 'self.train_dir'], {}), "('Train Dir: %s', self.train_dir)\n", (1074, 1107), False, 'from util import log\n'), ((1212, 1272), 'input_ops.create_input_ops', 'create_input_ops', (['dataset', 'self.batch_size'], {'is_training': '(True)'}), '(dataset, self.batch_size, is_training=True)\n', (1228, 1272), False, 'from input_ops import create_input_ops\n'), ((1315, 1381), 'input_ops.create_input_ops', 'create_input_ops', (['dataset_test', 'self.batch_size'], {'is_training': '(False)'}), '(dataset_test, self.batch_size, is_training=False)\n', (1331, 1381), False, 'from input_ops import create_input_ops\n'), ((1448, 1461), 'model.Model', 'Model', (['config'], {}), '(config)\n', (1453, 1461), False, 'from model import Model\n'), ((1518, 1576), 'tensorflow.contrib.framework.get_or_create_global_step', 'tf.contrib.framework.get_or_create_global_step', ([], {'graph': 'None'}), '(graph=None)\n', (1564, 1576), True, 'import tensorflow as tf\n'), ((1764, 1774), 'tensorflow.no_op', 'tf.no_op', ([], {}), '()\n', (1772, 1774), True, 'import tensorflow as tf\n'), ((1839, 1863), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1861, 1863), True, 'import tensorflow as tf\n'), ((1957, 1996), 'util.log.warn', 'log.warn', (['"""********* f_var ********** """'], {}), "('********* f_var ********** ')\n", (1965, 1996), False, 'from util import log\n'), ((2005, 2061), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['f_var'], {'print_info': '(True)'}), '(f_var, print_info=True)\n', (2037, 2061), True, 'import tensorflow.contrib.slim as slim\n'), ((2157, 2196), 'util.log.warn', 'log.warn', (['"""********* p_var ********** """'], {}), "('********* p_var ********** ')\n", (2165, 2196), False, 'from util import log\n'), ((2205, 2261), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['p_var'], {'print_info': '(True)'}), '(p_var, print_info=True)\n', (2237, 2261), True, 'import tensorflow.contrib.slim as slim\n'), ((2350, 2389), 'util.log.warn', 'log.warn', (['"""********* d_var ********** """'], {}), "('********* d_var ********** ')\n", (2358, 2389), False, 'from util import log\n'), ((2398, 2454), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['d_var'], {'print_info': '(True)'}), '(d_var, print_info=True)\n', (2430, 2454), True, 'import tensorflow.contrib.slim as slim\n'), ((3487, 3520), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""train"""'}), "(key='train')\n", (3507, 3520), True, 'import tensorflow as tf\n'), ((3552, 3584), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {'key': '"""test"""'}), "(key='test')\n", (3572, 3584), True, 'import tensorflow as tf\n'), ((3607, 3638), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(100)'}), '(max_to_keep=100)\n', (3621, 3638), True, 'import tensorflow as tf\n'), ((3669, 3717), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'all_vars', 'max_to_keep': '(1)'}), '(var_list=all_vars, max_to_keep=1)\n', (3683, 3717), True, 'import tensorflow as tf\n'), ((3750, 3795), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'p_var', 'max_to_keep': '(1)'}), '(var_list=p_var, max_to_keep=1)\n', (3764, 3795), True, 'import tensorflow as tf\n'), ((3828, 3873), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'f_var', 'max_to_keep': '(1)'}), '(var_list=f_var, max_to_keep=1)\n', (3842, 3873), True, 'import tensorflow as tf\n'), ((3906, 3951), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'g_var', 'max_to_keep': '(1)'}), '(var_list=g_var, max_to_keep=1)\n', (3920, 3951), True, 'import tensorflow as tf\n'), ((3984, 4029), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'd_var', 'max_to_keep': '(1)'}), '(var_list=d_var, max_to_keep=1)\n', (3998, 4029), True, 'import tensorflow as tf\n'), ((4060, 4097), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.train_dir'], {}), '(self.train_dir)\n', (4081, 4097), True, 'import tensorflow as tf\n'), ((4503, 4730), 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {'logdir': 'self.train_dir', 'is_chief': '(True)', 'saver': 'None', 'summary_op': 'None', 'summary_writer': 'self.summary_writer', 'save_summaries_secs': '(300)', 'save_model_secs': 'self.checkpoint_secs', 'global_step': 'self.global_step'}), '(logdir=self.train_dir, is_chief=True, saver=None,\n summary_op=None, summary_writer=self.summary_writer,\n save_summaries_secs=300, save_model_secs=self.checkpoint_secs,\n global_step=self.global_step)\n', (4522, 4730), True, 'import tensorflow as tf\n'), ((6724, 6753), 'util.log.infov', 'log.infov', (['"""Training Starts!"""'], {}), "('Training Starts!')\n", (6733, 6753), False, 'from util import log\n'), ((6762, 6786), 'pprint.pprint', 'pprint', (['self.batch_train'], {}), '(self.batch_train)\n', (6768, 6786), False, 'from pprint import pprint\n'), ((7066, 7083), 'six.moves.xrange', 'xrange', (['max_steps'], {}), '(max_steps)\n', (7072, 7083), False, 'from six.moves import xrange\n'), ((8300, 8311), 'time.time', 'time.time', ([], {}), '()\n', (8309, 8311), False, 'import time\n'), ((9209, 9220), 'time.time', 'time.time', ([], {}), '()\n', (9218, 9220), False, 'import time\n'), ((9384, 9395), 'time.time', 'time.time', ([], {}), '()\n', (9393, 9395), False, 'import time\n'), ((9798, 9809), 'time.time', 'time.time', ([], {}), '()\n', (9807, 9809), False, 'import time\n'), ((997, 1027), 'os.path.exists', 'os.path.exists', (['self.train_dir'], {}), '(self.train_dir)\n', (1011, 1027), False, 'import os\n'), ((1029, 1056), 'os.makedirs', 'os.makedirs', (['self.train_dir'], {}), '(self.train_dir)\n', (1040, 1056), False, 'import os\n'), ((5197, 5244), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path'], {}), "('Checkpoint path: %s', self.ckpt_path)\n", (5205, 5244), False, 'from util import log\n'), ((5329, 5405), 'util.log.info', 'log.info', (['"""Loaded the pretrain parameters from the provided checkpoint path"""'], {}), "('Loaded the pretrain parameters from the provided checkpoint path')\n", (5337, 5405), False, 'from util import log\n'), ((5507, 5556), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path_f'], {}), "('Checkpoint path: %s', self.ckpt_path_f)\n", (5515, 5556), False, 'from util import log\n'), ((5643, 5720), 'util.log.info', 'log.info', (['"""Loaded the pretrain Flow module from the provided checkpoint path"""'], {}), "('Loaded the pretrain Flow module from the provided checkpoint path')\n", (5651, 5720), False, 'from util import log\n'), ((5822, 5871), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path_p'], {}), "('Checkpoint path: %s', self.ckpt_path_p)\n", (5830, 5871), False, 'from util import log\n'), ((5958, 6036), 'util.log.info', 'log.info', (['"""Loaded the pretrain Pixel module from the provided checkpoint path"""'], {}), "('Loaded the pretrain Pixel module from the provided checkpoint path')\n", (5966, 6036), False, 'from util import log\n'), ((6138, 6187), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path_g'], {}), "('Checkpoint path: %s', self.ckpt_path_g)\n", (6146, 6187), False, 'from util import log\n'), ((6274, 6379), 'util.log.info', 'log.info', (['"""Loaded the pretrain Generator (Pixel&Flow) module from the provided checkpoint path"""'], {}), "(\n 'Loaded the pretrain Generator (Pixel&Flow) module from the provided checkpoint path'\n )\n", (6282, 6379), False, 'from util import log\n'), ((6471, 6520), 'util.log.info', 'log.info', (['"""Checkpoint path: %s"""', 'self.ckpt_path_d'], {}), "('Checkpoint path: %s', self.ckpt_path_d)\n", (6479, 6520), False, 'from util import log\n'), ((6607, 6703), 'util.log.info', 'log.info', (['"""Loaded the pretrain Discriminator module from the provided checkpoint path"""'], {}), "(\n 'Loaded the pretrain Discriminator module from the provided checkpoint path'\n )\n", (6615, 6703), False, 'from util import log\n'), ((16608, 16631), 'numpy.asarray', 'np.asarray', (['image.shape'], {}), '(image.shape)\n', (16618, 16631), True, 'import numpy as np\n'), ((16633, 16655), 'numpy.asarray', 'np.asarray', (['pose.shape'], {}), '(pose.shape)\n', (16643, 16655), True, 'import numpy as np\n'), ((940, 970), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (953, 970), False, 'import time\n'), ((2566, 2610), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate_f'], {}), '(self.learning_rate_f)\n', (2588, 2610), True, 'import tensorflow as tf\n'), ((2756, 2800), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate_p'], {}), '(self.learning_rate_p)\n', (2778, 2800), True, 'import tensorflow as tf\n'), ((2982, 3037), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate_p'], {'beta1': '(0.5)'}), '(self.learning_rate_p, beta1=0.5)\n', (3004, 3037), True, 'import tensorflow as tf\n'), ((3234, 3289), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate_d'], {'beta1': '(0.5)'}), '(self.learning_rate_d, beta1=0.5)\n', (3256, 3289), True, 'import tensorflow as tf\n'), ((4931, 4963), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (4944, 4963), True, 'import tensorflow as tf\n'), ((8003, 8041), 'util.log.infov', 'log.infov', (['"""Saved checkpoint at %d"""', 's'], {}), "('Saved checkpoint at %d', s)\n", (8012, 8041), False, 'from util import log\n'), ((8121, 8158), 'os.path.join', 'os.path.join', (['self.train_dir', '"""model"""'], {}), "(self.train_dir, 'model')\n", (8133, 8158), False, 'import os\n')]
|
import numpy as np
import scipy.linalg as la
from scipy.stats import multinomial
def random_multivar_normal(n, d, k, sigma=.1):
'''
Generate random samples from a random multivariate normal distribution
with covariance A A^T + sigma^2 I.
Input:
n: int, number of samples
d: int, dimension of samples
k: int, number of samples approximated
sigma: optional float > 0, default .1, the standard deviation
of the sample noise.
Output:
cov: d x d array, the covariance matrix for the distibution
A0: d x d array, the eigenvectors we want to estimate (note the
eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples.
'''
A0 = la.qr(np.random.rand(d, k), mode='economic')[0]
cov = A0 @ A0.T + sigma**2 * np.eye(d)
X = np.random.multivariate_normal(np.zeros(d), cov, size=n)
return cov, A0, X
def spiked_covariance(n, d, k, sigma=.1):
'''
Generate random samples from a random multivariate normal distribution
with covariance A D A^T + sigma^2 I.
Here A is a set of k orthogonal vectors and D is a diagonal matrix with
random, uniform entries, sorted and scaled so that the first entry = 1.
Input:
n: int, number of samples
d: int, dimension of samples
k: int, number of samples approximated
sigma: optional float > 0, default .1, the standard deviation
of the sample noise.
Output:
cov: d x d array, the true covariance matrix for the distibution
w: d vector of the diagonal values from matrix D.
A0: d x k array, the eigenvectors we want to estimate (note the
eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples.
'''
A0 = la.qr(np.random.rand(d, k), mode='economic')[0]
w = np.sort(np.random.rand(k, 1), axis=0)[::-1]
w /= w.max()
cov = A0 @ (w**2 * A0.T) + sigma**2 * np.eye(d)
X = np.random.multivariate_normal(np.zeros(d), cov, size=n)
return cov, w, A0, X
def random_multinomial(n, d, trials=100, mean0 = True, scale=1):
'''
Generate random samples from a random multinomial distribution with p_i ~ U(0,1).
Input:
n: int, number of samples
d: int, dimension of samples
trials: optional int, the number of trials for each sample from the
multinomial distribution default is 100.
mean0: optional boolean, default True. Indicates whether to normalize
the samples so they are mean 0.
Output:
cov: d x d array, the true covariance matrix for the distribution
e: d-dimensional array, the eigenvalues of the covariance matrix
v: d x d array, the eigenvectors of the covariance matrix
X: n x d array of n d-dimensional samples from the random_dirichlet
distribution with covariance cov.
'''
# Initialize p values
p = np.random.rand(d)
p /= p.sum()
# Calculate the covariance matrix for the multinomial distribution
# For large d > 10000, use multinomial.cov(d,p)
if d >= 10000:
cov = multinomial.cov(trials, p)
else:
cov = -np.outer(p, p) * trials
cov[np.diag_indices(d)] = trials * p * (1-p)
cov *= scale**2
# Obtain the eigenvectors of the covariance matrix.
e, v = la.eigh(cov)
e = e[::-1]
v = v[:,::-1]
# Obtain a sample from the multinomial distribution of size n
X = np.random.multinomial(trials, p, n).astype(float)
if mean0:
# Let X have mean 0
X -= trials * p
X *= scale
return cov, e, v, X
def random_dirichlet(n, d, mean0=True, scale=1):
'''
Generate random samples from a random dirichlet distribution with a_i ~ U(0,1).
Input:
n: int, number of samples
d: int, dimension of samples
mean0: optional boolean, default True. Indicates whether to normalize
the samples so they are mean 0.
Output:
cov: d x d array, the true covariance matrix for the distribution
e: d-dimensional array, the eigenvalues of the covariance matrix
v: d x d array, the eigenvectors of the covariance matrix
(note the eigenvectors are columns of the array, in descending order)
X: n x d array of n d-dimensional samples from the random_dirichlet
distribution with covariance cov.
'''
# Initialize a random set of parameters a drawn from the
# uniform distribution.
a = np.random.rand(d)
a0 = a.sum()
a_denom = a0**2 * (a0 + 1)
# Obtain the covariance matrix for the dirichlet distribution.
# Note that scipy doesn't currently have a builtin method for this
# (I may add one myself)
cov = -np.outer(a, a) / a_denom # i neq j case
cov[np.diag_indices(d)] = a * (a0 - a) / a_denom # i = j case
cov *= scale**2
# Obtain the eigenvectors of the covariance matrix.
e, v = la.eigh(cov)
e = e[::-1]
v = v[:,::-1]
X = np.random.dirichlet(a, n)
if mean0:
X -= a / a0
X *= scale
return cov, e, v, X
|
[
"scipy.linalg.eigh",
"numpy.eye",
"numpy.random.rand",
"numpy.diag_indices",
"scipy.stats.multinomial.cov",
"numpy.random.multinomial",
"numpy.random.dirichlet",
"numpy.zeros",
"numpy.outer"
] |
[((2998, 3015), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (3012, 3015), True, 'import numpy as np\n'), ((3408, 3420), 'scipy.linalg.eigh', 'la.eigh', (['cov'], {}), '(cov)\n', (3415, 3420), True, 'import scipy.linalg as la\n'), ((4563, 4580), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (4577, 4580), True, 'import numpy as np\n'), ((5002, 5014), 'scipy.linalg.eigh', 'la.eigh', (['cov'], {}), '(cov)\n', (5009, 5014), True, 'import scipy.linalg as la\n'), ((5058, 5083), 'numpy.random.dirichlet', 'np.random.dirichlet', (['a', 'n'], {}), '(a, n)\n', (5077, 5083), True, 'import numpy as np\n'), ((905, 916), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (913, 916), True, 'import numpy as np\n'), ((2068, 2079), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (2076, 2079), True, 'import numpy as np\n'), ((3190, 3216), 'scipy.stats.multinomial.cov', 'multinomial.cov', (['trials', 'p'], {}), '(trials, p)\n', (3205, 3216), False, 'from scipy.stats import multinomial\n'), ((4856, 4874), 'numpy.diag_indices', 'np.diag_indices', (['d'], {}), '(d)\n', (4871, 4874), True, 'import numpy as np\n'), ((782, 802), 'numpy.random.rand', 'np.random.rand', (['d', 'k'], {}), '(d, k)\n', (796, 802), True, 'import numpy as np\n'), ((857, 866), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (863, 866), True, 'import numpy as np\n'), ((1866, 1886), 'numpy.random.rand', 'np.random.rand', (['d', 'k'], {}), '(d, k)\n', (1880, 1886), True, 'import numpy as np\n'), ((1924, 1944), 'numpy.random.rand', 'np.random.rand', (['k', '(1)'], {}), '(k, 1)\n', (1938, 1944), True, 'import numpy as np\n'), ((2020, 2029), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (2026, 2029), True, 'import numpy as np\n'), ((3278, 3296), 'numpy.diag_indices', 'np.diag_indices', (['d'], {}), '(d)\n', (3293, 3296), True, 'import numpy as np\n'), ((3530, 3565), 'numpy.random.multinomial', 'np.random.multinomial', (['trials', 'p', 'n'], {}), '(trials, p, n)\n', (3551, 3565), True, 'import numpy as np\n'), ((4808, 4822), 'numpy.outer', 'np.outer', (['a', 'a'], {}), '(a, a)\n', (4816, 4822), True, 'import numpy as np\n'), ((3242, 3256), 'numpy.outer', 'np.outer', (['p', 'p'], {}), '(p, p)\n', (3250, 3256), True, 'import numpy as np\n')]
|
#!/usr/bin/python
import numpy as np
import os
import pymaster as nmt
import pytest
import tjpcov.main as cv
from tjpcov.parser import parse
import yaml
import sacc
root = "./tests/benchmarks/32_DES_tjpcov_bm/"
input_yml = os.path.join(root, "tjpcov_conf_minimal.yaml")
input_yml_no_nmtc = os.path.join(root, "tjpcov_conf_minimal_no_nmtconf.yaml")
xcell_yml = os.path.join(root, "desy1_tjpcov_bm.yml")
def get_xcell_yml():
with open(xcell_yml) as f:
config = yaml.safe_load(f)
return config
def get_nmt_bin():
bpw_edges = [0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90, 96]
return nmt.NmtBin.from_edges(bpw_edges[:-1], bpw_edges[1:])
def get_pair_folder_name(tracer_comb):
bn = []
for tr in tracer_comb:
bn.append(tr.split('__')[0])
return '_'.join(bn)
def get_data_cl(tr1, tr2, remove_be=False):
bn = get_pair_folder_name((tr1, tr2))
fname = os.path.join(root, bn, f"cl_{tr1}_{tr2}.npz")
cl = np.load(fname)['cl']
# Remove redundant terms
if remove_be and (tr1 == tr2) and (cl.shape[0] == 4):
cl = np.delete(cl, 2, 0)
return cl
def get_fiducial_cl(s, tr1, tr2, binned=True, remove_be=False):
bn = get_pair_folder_name((tr1, tr2))
fname = os.path.join(root, 'fiducial', bn, f"cl_{tr1}_{tr2}.npz")
cl = np.load(fname)['cl']
if binned:
s = s.copy()
s.remove_selection(data_type='cl_0b')
s.remove_selection(data_type='cl_eb')
s.remove_selection(data_type='cl_be')
s.remove_selection(data_type='cl_bb')
ix = s.indices(tracers=(tr1, tr2))
bpw = s.get_bandpower_windows(ix)
cl0_bin = bpw.weight.T.dot(cl[0])
cl_bin = np.zeros((cl.shape[0], cl0_bin.size))
cl_bin[0] = cl0_bin
cl = cl_bin
else:
cl
# Remove redundant terms
if remove_be and (tr1 == tr2) and (cl.shape[0] == 4):
cl = np.delete(cl, 2, 0)
return cl
def get_tracer_noise(tr, cp=True):
bn = get_pair_folder_name((tr, tr))
fname = os.path.join(root, bn, f"cl_{tr}_{tr}.npz")
clfile = np.load(fname)
if cp:
return clfile['nl_cp'][0, -1]
else:
return clfile['nl'][0, 0]
def get_benchmark_cov(tracer_comb1, tracer_comb2):
(tr1, tr2), (tr3, tr4) = tracer_comb1, tracer_comb2
fname = os.path.join(root, 'cov', f'cov_{tr1}_{tr2}_{tr3}_{tr4}.npz')
return np.load(fname)['cov']
def get_workspace(tr1, tr2):
config = get_xcell_yml()
w = nmt.NmtWorkspace()
bn = get_pair_folder_name((tr1, tr2))
m1 = config['tracers'][tr1]['mask_name']
m2 = config['tracers'][tr2]['mask_name']
fname = os.path.join(root, bn, f"w__{m1}__{m2}.fits")
w.read_from(fname)
return w
def get_covariance_workspace(tr1, tr2, tr3, tr4):
config = get_xcell_yml()
cw = nmt.NmtCovarianceWorkspace()
m1 = config['tracers'][tr1]['mask_name']
m2 = config['tracers'][tr2]['mask_name']
m3 = config['tracers'][tr3]['mask_name']
m4 = config['tracers'][tr4]['mask_name']
fname = os.path.join(root, 'cov', f"cw__{m1}__{m2}__{m3}__{m4}.fits")
cw.read_from(fname)
return cw
def assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, threshold):
cl1 = get_data_cl(*tracer_comb1, remove_be=True)
cl2 = get_data_cl(*tracer_comb2, remove_be=True)
clf1 = get_fiducial_cl(s, *tracer_comb1, remove_be=True)
clf2 = get_fiducial_cl(s, *tracer_comb2, remove_be=True)
ndim, nbpw = cl1.shape
# This only runs if tracer_comb1 = tracer_comb2 (when the block covariance
# is invertible)
if (tracer_comb1[0] == tracer_comb1[1]) and (ndim == 3):
cov = cov.reshape((nbpw, 4, nbpw, 4))
cov = np.delete(np.delete(cov, 2, 1), 2, 3).reshape(3 * nbpw, -1)
cov_bm = cov_bm.reshape((nbpw, 4, nbpw, 4))
cov_bm = np.delete(np.delete(cov_bm, 2, 1), 2, 3).reshape(3 * nbpw, -1)
delta1 = (clf1 - cl1).flatten()
delta2 = (clf2 - cl2).flatten()
chi2 = delta1.dot(np.linalg.inv(cov)).dot(delta2)
chi2_bm = delta1.dot(np.linalg.inv(cov_bm)).dot(delta2)
assert np.abs(chi2 / chi2_bm - 1) < threshold
def test_nmt_conf_missing():
"""
Check that input file might not have nmt_conf and it still works
"""
tjpcov_class = cv.CovarianceCalculator(input_yml_no_nmtc)
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
tracer_comb1 = tracer_comb2 = ('DESgc__0', 'DESgc__0')
cache = {'bins': get_nmt_bin()}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_noise,
cache=cache)['final'] + 1e-100
@pytest.mark.parametrize('tracer_comb1,tracer_comb2',
[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')),
(('DESgc__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESgc__0', 'DESgc__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1')),
])
def test_nmt_gaussian_cov(tracer_comb1, tracer_comb2):
# tjpcov_class = cv.CovarianceCalculator(input_yml)
# cache = {'bins': get_nmt_bin()}
config, _= parse(input_yml)
bins = get_nmt_bin()
config['tjpcov']['binning_info'] = bins
tjpcov_class = cv.CovarianceCalculator(config)
cache = None
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
for tr in tracer_comb1 + tracer_comb2:
tracer_noise[tr] = get_tracer_noise(tr)
# Test error with uncoupled and coupled noise provided
with pytest.raises(ValueError):
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise=tracer_noise,
tracer_Noise_coupled=tracer_noise,
cache=cache)['final']
# Cov with coupled noise (as in benchmark)
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
cov_bm = get_benchmark_cov(tracer_comb1, tracer_comb2) + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-5
# Test error with 'bins' in cache different to that at initialization
with pytest.raises(ValueError):
cache2 = {'bins': nmt.NmtBin.from_nside_linear(32, bins.get_n_bands())}
cov2 = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise=tracer_noise,
tracer_Noise_coupled=tracer_noise,
cache=cache2)['final']
# Test it runs with 'bins' in cache if they are the same
cache2 = {'bins': bins}
cov2 = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache2)['final'] + 1e-100
assert np.all(cov == cov2)
# Cov with uncoupled noise cannot be used for benchmark as tracer_noise is
# assumed to be flat but it is not when computed from the coupled due to
# edge effects
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-5)
# Check that it runs if one of the masks does not overlap with the others
if tracer_comb1 != tracer_comb2:
os.system("rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*")
tjpcov_class.mask_fn[tracer_comb1[0]] = \
'./tests/benchmarks/32_DES_tjpcov_bm/catalogs/mask_nonoverlapping.fits.gz'
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers,
tracer_Noise_coupled=tracer_noise,
cache=cache)
os.system("rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*")
@pytest.mark.parametrize('tracer_comb1,tracer_comb2',
[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')),
(('DESgc__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESgc__0', 'DESgc__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')),
(('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1')),
])
def test_nmt_gaussian_cov_cache(tracer_comb1, tracer_comb2):
tjpcov_class = cv.CovarianceCalculator(input_yml)
ccl_tracers, tracer_noise = tjpcov_class.get_tracer_info(tjpcov_class.cl_data)
for tr in tracer_comb1 + tracer_comb2:
tracer_noise[tr] = get_tracer_noise(tr)
(tr1, tr2), (tr3, tr4) = tracer_comb1, tracer_comb2
s = None # Not needed if binned=False
cl13 = get_fiducial_cl(s, tr1, tr3, binned=False)
cl24 = get_fiducial_cl(s, tr2, tr4, binned=False)
cl14 = get_fiducial_cl(s, tr1, tr4, binned=False)
cl23 = get_fiducial_cl(s, tr2, tr3, binned=False)
cache = {
# 'f1': f1, 'f2': f2, 'f3': f3, 'f4': f4,
# 'm1': m1, 'm2': m2, 'm3': m3, 'm4': m4,
# 'w13': w13, 'w23': w23, 'w14': w14, 'w24': w24,
# 'w12': w12, 'w34': w34,
# 'cw': cw,
'cl13': cl13, 'cl24': cl24, 'cl14': cl14, 'cl23':cl23,
# 'SN13': SN13, 'SN24': SN24, 'SN14': SN14, 'SN23': SN23,
'bins': get_nmt_bin()
}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
cov_bm = get_benchmark_cov(tracer_comb1, tracer_comb2) + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-5
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-5)
w13 = get_workspace(tr1, tr3)
w23 = get_workspace(tr2, tr3)
w14 = get_workspace(tr1, tr4)
w24 = get_workspace(tr2, tr4)
w12 = get_workspace(tr1, tr2)
w34 = get_workspace(tr3, tr4)
cw = get_covariance_workspace(*tracer_comb1, *tracer_comb2)
cache = {
# 'f1': f1, 'f2': f2, 'f3': f3, 'f4': f4,
# 'm1': m1, 'm2': m2, 'm3': m3, 'm4': m4,
'w13': w13, 'w23': w23, 'w14': w14, 'w24': w24,
'w12': w12, 'w34': w34,
'cw': cw,
'cl13': cl13, 'cl24': cl24, 'cl14': cl14, 'cl23':cl23,
# 'SN13': SN13, 'SN24': SN24, 'SN14': SN14, 'SN23': SN23,
'bins': get_nmt_bin()
}
cov = tjpcov_class.nmt_gaussian_cov(tracer_comb1, tracer_comb2,
ccl_tracers, tracer_Noise_coupled=tracer_noise,
cache=cache)['final'] + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-6
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-6
if tracer_comb1 == tracer_comb2:
s = tjpcov_class.cl_data
assert_chi2(s, tracer_comb1, tracer_comb2, cov, cov_bm, 1e-6)
def test_get_all_cov_nmt():
tjpcov_class = cv.CovarianceCalculator(input_yml)
s = tjpcov_class.cl_data
bins = get_nmt_bin()
tracer_noise = {}
for tr in s.tracers:
tracer_noise[tr] = get_tracer_noise(tr)
# Test error with uncoupled and coupled noise provided
with pytest.raises(ValueError):
cov = tjpcov_class.get_all_cov_nmt(tracer_noise=tracer_noise,
tracer_noise_coupled=tracer_noise,
cache={'bins': bins})
cov = tjpcov_class.get_all_cov_nmt(tracer_noise_coupled=tracer_noise,
cache={'bins': bins}) + 1e-100
cov_bm = s.covariance.covmat + 1e-100
assert np.max(np.abs(np.diag(cov) / np.diag(cov_bm) - 1)) < 1e-5
assert np.max(np.abs(cov / cov_bm - 1)) < 1e-3
# Check chi2
clf = np.array([])
for trs in s.get_tracer_combinations():
cl_trs = get_fiducial_cl(s, *trs, remove_be=True)
clf = np.concatenate((clf, cl_trs.flatten()))
cl = s.mean
delta = clf - cl
chi2 = delta.dot(np.linalg.inv(cov)).dot(delta)
chi2_bm = delta.dot(np.linalg.inv(cov_bm)).dot(delta)
assert np.abs(chi2 / chi2_bm - 1) < 1e-5
# Check that it also works if they don't use concise data_types
s2 = s.copy()
for dp in s2.data:
dt = dp.data_type
if dt == 'cl_00':
dp.data_type = sacc.standard_types.galaxy_density_cl
elif dt == 'cl_0e':
dp.data_type = sacc.standard_types.galaxy_shearDensity_cl_e
elif dt == 'cl_0b':
dp.data_type = sacc.standard_types.galaxy_shearDensity_cl_b
elif dt == 'cl_ee':
dp.data_type = sacc.standard_types.galaxy_shear_cl_ee
elif dt == 'cl_eb':
dp.data_type = sacc.standard_types.galaxy_shear_cl_eb
elif dt == 'cl_be':
dp.data_type = sacc.standard_types.galaxy_shear_cl_be
elif dt == 'cl_bb':
dp.data_type = sacc.standard_types.galaxy_shear_cl_bb
else:
raise ValueError('Something went wrong. Data type not recognized')
tjpcov_class.cl_data = s2
cov2 = tjpcov_class.get_all_cov_nmt(tracer_noise_coupled=tracer_noise,
cache={'bins': bins}) + 1e-100
assert np.all(cov == cov2)
# Clean up after the tests
os.system("rm -rf ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/")
|
[
"pymaster.NmtWorkspace",
"numpy.abs",
"pymaster.NmtCovarianceWorkspace",
"numpy.delete",
"os.path.join",
"tjpcov.parser.parse",
"numpy.diag",
"pytest.mark.parametrize",
"numpy.array",
"yaml.safe_load",
"numpy.zeros",
"pytest.raises",
"numpy.linalg.inv",
"os.system",
"numpy.all",
"numpy.load",
"tjpcov.main.CovarianceCalculator",
"pymaster.NmtBin.from_edges"
] |
[((225, 271), 'os.path.join', 'os.path.join', (['root', '"""tjpcov_conf_minimal.yaml"""'], {}), "(root, 'tjpcov_conf_minimal.yaml')\n", (237, 271), False, 'import os\n'), ((292, 349), 'os.path.join', 'os.path.join', (['root', '"""tjpcov_conf_minimal_no_nmtconf.yaml"""'], {}), "(root, 'tjpcov_conf_minimal_no_nmtconf.yaml')\n", (304, 349), False, 'import os\n'), ((362, 403), 'os.path.join', 'os.path.join', (['root', '"""desy1_tjpcov_bm.yml"""'], {}), "(root, 'desy1_tjpcov_bm.yml')\n", (374, 403), False, 'import os\n'), ((4695, 5037), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tracer_comb1,tracer_comb2"""', "[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')), (('DESgc__0',\n 'DESwl__0'), ('DESwl__0', 'DESwl__0')), (('DESgc__0', 'DESgc__0'), (\n 'DESwl__0', 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__0',\n 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1'))]"], {}), "('tracer_comb1,tracer_comb2', [(('DESgc__0',\n 'DESgc__0'), ('DESgc__0', 'DESgc__0')), (('DESgc__0', 'DESwl__0'), (\n 'DESwl__0', 'DESwl__0')), (('DESgc__0', 'DESgc__0'), ('DESwl__0',\n 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')), ((\n 'DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1'))])\n", (4718, 5037), False, 'import pytest\n'), ((8540, 8882), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tracer_comb1,tracer_comb2"""', "[(('DESgc__0', 'DESgc__0'), ('DESgc__0', 'DESgc__0')), (('DESgc__0',\n 'DESwl__0'), ('DESwl__0', 'DESwl__0')), (('DESgc__0', 'DESgc__0'), (\n 'DESwl__0', 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__0',\n 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1'))]"], {}), "('tracer_comb1,tracer_comb2', [(('DESgc__0',\n 'DESgc__0'), ('DESgc__0', 'DESgc__0')), (('DESgc__0', 'DESwl__0'), (\n 'DESwl__0', 'DESwl__0')), (('DESgc__0', 'DESgc__0'), ('DESwl__0',\n 'DESwl__0')), (('DESwl__0', 'DESwl__0'), ('DESwl__0', 'DESwl__0')), ((\n 'DESwl__0', 'DESwl__0'), ('DESwl__1', 'DESwl__1'))])\n", (8563, 8882), False, 'import pytest\n'), ((14178, 14245), 'os.system', 'os.system', (['"""rm -rf ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/"""'], {}), "('rm -rf ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/')\n", (14187, 14245), False, 'import os\n'), ((627, 679), 'pymaster.NmtBin.from_edges', 'nmt.NmtBin.from_edges', (['bpw_edges[:-1]', 'bpw_edges[1:]'], {}), '(bpw_edges[:-1], bpw_edges[1:])\n', (648, 679), True, 'import pymaster as nmt\n'), ((921, 966), 'os.path.join', 'os.path.join', (['root', 'bn', 'f"""cl_{tr1}_{tr2}.npz"""'], {}), "(root, bn, f'cl_{tr1}_{tr2}.npz')\n", (933, 966), False, 'import os\n'), ((1252, 1309), 'os.path.join', 'os.path.join', (['root', '"""fiducial"""', 'bn', 'f"""cl_{tr1}_{tr2}.npz"""'], {}), "(root, 'fiducial', bn, f'cl_{tr1}_{tr2}.npz')\n", (1264, 1309), False, 'import os\n'), ((2037, 2080), 'os.path.join', 'os.path.join', (['root', 'bn', 'f"""cl_{tr}_{tr}.npz"""'], {}), "(root, bn, f'cl_{tr}_{tr}.npz')\n", (2049, 2080), False, 'import os\n'), ((2094, 2108), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2101, 2108), True, 'import numpy as np\n'), ((2323, 2384), 'os.path.join', 'os.path.join', (['root', '"""cov"""', 'f"""cov_{tr1}_{tr2}_{tr3}_{tr4}.npz"""'], {}), "(root, 'cov', f'cov_{tr1}_{tr2}_{tr3}_{tr4}.npz')\n", (2335, 2384), False, 'import os\n'), ((2486, 2504), 'pymaster.NmtWorkspace', 'nmt.NmtWorkspace', ([], {}), '()\n', (2502, 2504), True, 'import pymaster as nmt\n'), ((2649, 2694), 'os.path.join', 'os.path.join', (['root', 'bn', 'f"""w__{m1}__{m2}.fits"""'], {}), "(root, bn, f'w__{m1}__{m2}.fits')\n", (2661, 2694), False, 'import os\n'), ((2821, 2849), 'pymaster.NmtCovarianceWorkspace', 'nmt.NmtCovarianceWorkspace', ([], {}), '()\n', (2847, 2849), True, 'import pymaster as nmt\n'), ((3042, 3103), 'os.path.join', 'os.path.join', (['root', '"""cov"""', 'f"""cw__{m1}__{m2}__{m3}__{m4}.fits"""'], {}), "(root, 'cov', f'cw__{m1}__{m2}__{m3}__{m4}.fits')\n", (3054, 3103), False, 'import os\n'), ((4260, 4302), 'tjpcov.main.CovarianceCalculator', 'cv.CovarianceCalculator', (['input_yml_no_nmtc'], {}), '(input_yml_no_nmtc)\n', (4283, 4302), True, 'import tjpcov.main as cv\n'), ((5342, 5358), 'tjpcov.parser.parse', 'parse', (['input_yml'], {}), '(input_yml)\n', (5347, 5358), False, 'from tjpcov.parser import parse\n'), ((5447, 5478), 'tjpcov.main.CovarianceCalculator', 'cv.CovarianceCalculator', (['config'], {}), '(config)\n', (5470, 5478), True, 'import tjpcov.main as cv\n'), ((7534, 7553), 'numpy.all', 'np.all', (['(cov == cov2)'], {}), '(cov == cov2)\n', (7540, 7553), True, 'import numpy as np\n'), ((9102, 9136), 'tjpcov.main.CovarianceCalculator', 'cv.CovarianceCalculator', (['input_yml'], {}), '(input_yml)\n', (9125, 9136), True, 'import tjpcov.main as cv\n'), ((11853, 11887), 'tjpcov.main.CovarianceCalculator', 'cv.CovarianceCalculator', (['input_yml'], {}), '(input_yml)\n', (11876, 11887), True, 'import tjpcov.main as cv\n'), ((12683, 12695), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12691, 12695), True, 'import numpy as np\n'), ((14130, 14149), 'numpy.all', 'np.all', (['(cov == cov2)'], {}), '(cov == cov2)\n', (14136, 14149), True, 'import numpy as np\n'), ((475, 492), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (489, 492), False, 'import yaml\n'), ((976, 990), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (983, 990), True, 'import numpy as np\n'), ((1098, 1117), 'numpy.delete', 'np.delete', (['cl', '(2)', '(0)'], {}), '(cl, 2, 0)\n', (1107, 1117), True, 'import numpy as np\n'), ((1319, 1333), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (1326, 1333), True, 'import numpy as np\n'), ((1706, 1743), 'numpy.zeros', 'np.zeros', (['(cl.shape[0], cl0_bin.size)'], {}), '((cl.shape[0], cl0_bin.size))\n', (1714, 1743), True, 'import numpy as np\n'), ((1914, 1933), 'numpy.delete', 'np.delete', (['cl', '(2)', '(0)'], {}), '(cl, 2, 0)\n', (1923, 1933), True, 'import numpy as np\n'), ((2396, 2410), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (2403, 2410), True, 'import numpy as np\n'), ((4086, 4112), 'numpy.abs', 'np.abs', (['(chi2 / chi2_bm - 1)'], {}), '(chi2 / chi2_bm - 1)\n', (4092, 4112), True, 'import numpy as np\n'), ((5741, 5766), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5754, 5766), False, 'import pytest\n'), ((6702, 6727), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6715, 6727), False, 'import pytest\n'), ((7995, 8062), 'os.system', 'os.system', (['"""rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*"""'], {}), "('rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*')\n", (8004, 8062), False, 'import os\n'), ((8469, 8536), 'os.system', 'os.system', (['"""rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*"""'], {}), "('rm -f ./tests/benchmarks/32_DES_tjpcov_bm/tjpcov_tmp/*')\n", (8478, 8536), False, 'import os\n'), ((12107, 12132), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12120, 12132), False, 'import pytest\n'), ((13011, 13037), 'numpy.abs', 'np.abs', (['(chi2 / chi2_bm - 1)'], {}), '(chi2 / chi2_bm - 1)\n', (13017, 13037), True, 'import numpy as np\n'), ((6585, 6609), 'numpy.abs', 'np.abs', (['(cov / cov_bm - 1)'], {}), '(cov / cov_bm - 1)\n', (6591, 6609), True, 'import numpy as np\n'), ((10447, 10471), 'numpy.abs', 'np.abs', (['(cov / cov_bm - 1)'], {}), '(cov / cov_bm - 1)\n', (10453, 10471), True, 'import numpy as np\n'), ((11632, 11656), 'numpy.abs', 'np.abs', (['(cov / cov_bm - 1)'], {}), '(cov / cov_bm - 1)\n', (11638, 11656), True, 'import numpy as np\n'), ((12622, 12646), 'numpy.abs', 'np.abs', (['(cov / cov_bm - 1)'], {}), '(cov / cov_bm - 1)\n', (12628, 12646), True, 'import numpy as np\n'), ((3981, 3999), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (3994, 3999), True, 'import numpy as np\n'), ((4038, 4059), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_bm'], {}), '(cov_bm)\n', (4051, 4059), True, 'import numpy as np\n'), ((12911, 12929), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (12924, 12929), True, 'import numpy as np\n'), ((12966, 12987), 'numpy.linalg.inv', 'np.linalg.inv', (['cov_bm'], {}), '(cov_bm)\n', (12979, 12987), True, 'import numpy as np\n'), ((3704, 3724), 'numpy.delete', 'np.delete', (['cov', '(2)', '(1)'], {}), '(cov, 2, 1)\n', (3713, 3724), True, 'import numpy as np\n'), ((3833, 3856), 'numpy.delete', 'np.delete', (['cov_bm', '(2)', '(1)'], {}), '(cov_bm, 2, 1)\n', (3842, 3856), True, 'import numpy as np\n'), ((6523, 6535), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (6530, 6535), True, 'import numpy as np\n'), ((6538, 6553), 'numpy.diag', 'np.diag', (['cov_bm'], {}), '(cov_bm)\n', (6545, 6553), True, 'import numpy as np\n'), ((10385, 10397), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (10392, 10397), True, 'import numpy as np\n'), ((10400, 10415), 'numpy.diag', 'np.diag', (['cov_bm'], {}), '(cov_bm)\n', (10407, 10415), True, 'import numpy as np\n'), ((11570, 11582), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (11577, 11582), True, 'import numpy as np\n'), ((11585, 11600), 'numpy.diag', 'np.diag', (['cov_bm'], {}), '(cov_bm)\n', (11592, 11600), True, 'import numpy as np\n'), ((12560, 12572), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (12567, 12572), True, 'import numpy as np\n'), ((12575, 12590), 'numpy.diag', 'np.diag', (['cov_bm'], {}), '(cov_bm)\n', (12582, 12590), True, 'import numpy as np\n')]
|
"""
Driver program for training and evaluation.
"""
import argparse
import logging
import numpy as np
import random
import torch
import torch.optim as O
from datasets import get_dataset, get_dataset_configurations
from models import get_model
from runners import Runner
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sentence similarity models')
parser.add_argument('--model', default='sif', choices=['sif', 'mpcnn', 'mpcnn-lite', 'bimpm'], help='Model to use')
parser.add_argument('--dataset', default='sick', choices=['sick', 'wikiqa'], help='Dataset to use')
parser.add_argument('--batch-size', type=int, default=64, help='Batch size')
parser.add_argument('--epochs', type=int, default=15, help='Number of epochs')
parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')
parser.add_argument('--regularization', type=float, default=3e-4, help='Regularization')
parser.add_argument('--seed', type=int, default=1234, help='Seed for reproducibility')
parser.add_argument('--device', type=int, default=0, help='Device, -1 for CPU')
parser.add_argument('--log-interval', type=int, default=50, help='Device, -1 for CPU')
# Special options for SIF model
parser.add_argument('--unsupervised', action='store_true', default=False, help='Set this flag to use unsupervised mode.')
parser.add_argument('--alpha', type=float, default=1e-3, help='Smoothing term for smooth inverse frequency baseline model')
parser.add_argument('--no-remove-special-direction', action='store_true', default=False, help='Set to not remove projection onto first principal component')
parser.add_argument('--frequency-dataset', default='enwiki', choices=['train', 'enwiki'])
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.device != -1:
torch.cuda.manual_seed(args.seed)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(args)
model = get_model(args, dataset_cls, embedding)
if args.model == 'sif':
model.populate_word_frequency_estimation(train_loader)
total_params = 0
for param in model.parameters():
size = [s for s in param.size()]
total_params += np.prod(size)
logger.info('Total number of parameters: %s', total_params)
loss_fn, metrics, y_to_score, resolved_pred_to_score = get_dataset_configurations(args)
optimizer = O.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.regularization)
runner = Runner(model, loss_fn, metrics, optimizer, y_to_score, resolved_pred_to_score, args.device, None)
runner.run(args.epochs, train_loader, dev_loader, test_loader, args.log_interval)
|
[
"logging.getLogger",
"torch.manual_seed",
"numpy.prod",
"logging.StreamHandler",
"argparse.ArgumentParser",
"runners.Runner",
"logging.Formatter",
"models.get_model",
"random.seed",
"torch.cuda.manual_seed",
"numpy.random.seed",
"datasets.get_dataset_configurations",
"datasets.get_dataset"
] |
[((314, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sentence similarity models"""'}), "(description='Sentence similarity models')\n", (337, 379), False, 'import argparse\n'), ((1789, 1811), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1800, 1811), False, 'import random\n'), ((1816, 1841), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1830, 1841), True, 'import numpy as np\n'), ((1846, 1874), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1863, 1874), False, 'import torch\n'), ((1957, 1984), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1974, 1984), False, 'import logging\n'), ((2028, 2051), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2049, 2051), False, 'import logging\n'), ((2099, 2147), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s - %(message)s"""'], {}), "('%(levelname)s - %(message)s')\n", (2116, 2147), False, 'import logging\n'), ((2274, 2291), 'datasets.get_dataset', 'get_dataset', (['args'], {}), '(args)\n', (2285, 2291), False, 'from datasets import get_dataset, get_dataset_configurations\n'), ((2304, 2343), 'models.get_model', 'get_model', (['args', 'dataset_cls', 'embedding'], {}), '(args, dataset_cls, embedding)\n', (2313, 2343), False, 'from models import get_model\n'), ((2698, 2730), 'datasets.get_dataset_configurations', 'get_dataset_configurations', (['args'], {}), '(args)\n', (2724, 2730), False, 'from datasets import get_dataset, get_dataset_configurations\n'), ((2869, 2970), 'runners.Runner', 'Runner', (['model', 'loss_fn', 'metrics', 'optimizer', 'y_to_score', 'resolved_pred_to_score', 'args.device', 'None'], {}), '(model, loss_fn, metrics, optimizer, y_to_score,\n resolved_pred_to_score, args.device, None)\n', (2875, 2970), False, 'from runners import Runner\n'), ((1909, 1942), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1931, 1942), False, 'import torch\n'), ((2560, 2573), 'numpy.prod', 'np.prod', (['size'], {}), '(size)\n', (2567, 2573), True, 'import numpy as np\n')]
|
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
names = locals()
img0 = Image.open("./assets/pyCharm.png")
# print image info:
print(img0.size, img0.format, img0.mode, np.array(img0))
# save other format
# img0.save('./assets/pyCharm.tiff')
# img0.convert('RGB').save('./assets/pyCharm.jpeg')
# img0.convert('L').save('./assets/pyCharm.bmp') # 灰度图
img1 = Image.open('./assets/pyCharm.tiff') # 3通道图
img2 = Image.open('./assets/pyCharm.jpeg')
img3 = Image.open('./assets/pyCharm.bmp')
# 3通道图可以拆分
img4, img5, img6 = img2.split()
img7 = Image.merge('RGB', [img5, img6, img4])
plt.figure(figsize=(15, 15))
for i in range(8):
plt.subplot(4, 3, i + 1)
plt.axis('off') # hide axis
plt.imshow(names.get('img' + str(i)))
plt.title(names.get('img' + str(i)).format)
# 去除 png 的白边
img_dir = '/Users/carl/Pictures/logos/'
logo = Image.open(img_dir + 'google.png')
# 将压缩的8位图像转成rgba
logo = logo.convert('RGBA')
# 分离通道
(logo_r, logo_g, logo_b, logo_a) = logo.split()
# 转换成numpy数组
arr_r = np.array(logo_r)
arr_g = np.array(logo_g)
arr_b = np.array(logo_b)
arr_a = np.array(logo_a)
# 筛选像素坐标
idx = (arr_r == 245) & (arr_g == 247) & (arr_b == 247)
# 修改为透明像素点
arr_r[idx] = 0
arr_g[idx] = 0
arr_b[idx] = 0
arr_a[idx] = 0
# 将numpy数组转回图片对象
shard_r = Image.fromarray(arr_r)
shard_g = Image.fromarray(arr_g)
shard_b = Image.fromarray(arr_b)
shard_a = Image.fromarray(arr_a)
rgb_dict = 'rgba'
for i in range(4):
plt.subplot(4, 3, i+9)
plt.axis('off') # hide axis
plt.imshow(names.get('shard_' + rgb_dict[i]))
plt.title(names.get('shard_' + rgb_dict[i]).format)
# 合并通道,保存
Image.merge('RGBA', [shard_r, shard_g, shard_b, shard_a]).save(img_dir + 'logo-1.png', overWrite=True)
plt.tight_layout()
plt.show()
|
[
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplot",
"PIL.Image.merge",
"matplotlib.pyplot.show"
] |
[((104, 138), 'PIL.Image.open', 'Image.open', (['"""./assets/pyCharm.png"""'], {}), "('./assets/pyCharm.png')\n", (114, 138), False, 'from PIL import Image\n'), ((389, 424), 'PIL.Image.open', 'Image.open', (['"""./assets/pyCharm.tiff"""'], {}), "('./assets/pyCharm.tiff')\n", (399, 424), False, 'from PIL import Image\n'), ((440, 475), 'PIL.Image.open', 'Image.open', (['"""./assets/pyCharm.jpeg"""'], {}), "('./assets/pyCharm.jpeg')\n", (450, 475), False, 'from PIL import Image\n'), ((483, 517), 'PIL.Image.open', 'Image.open', (['"""./assets/pyCharm.bmp"""'], {}), "('./assets/pyCharm.bmp')\n", (493, 517), False, 'from PIL import Image\n'), ((568, 606), 'PIL.Image.merge', 'Image.merge', (['"""RGB"""', '[img5, img6, img4]'], {}), "('RGB', [img5, img6, img4])\n", (579, 606), False, 'from PIL import Image\n'), ((608, 636), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (618, 636), True, 'from matplotlib import pyplot as plt\n'), ((871, 905), 'PIL.Image.open', 'Image.open', (["(img_dir + 'google.png')"], {}), "(img_dir + 'google.png')\n", (881, 905), False, 'from PIL import Image\n'), ((1027, 1043), 'numpy.array', 'np.array', (['logo_r'], {}), '(logo_r)\n', (1035, 1043), True, 'import numpy as np\n'), ((1052, 1068), 'numpy.array', 'np.array', (['logo_g'], {}), '(logo_g)\n', (1060, 1068), True, 'import numpy as np\n'), ((1077, 1093), 'numpy.array', 'np.array', (['logo_b'], {}), '(logo_b)\n', (1085, 1093), True, 'import numpy as np\n'), ((1102, 1118), 'numpy.array', 'np.array', (['logo_a'], {}), '(logo_a)\n', (1110, 1118), True, 'import numpy as np\n'), ((1281, 1303), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_r'], {}), '(arr_r)\n', (1296, 1303), False, 'from PIL import Image\n'), ((1314, 1336), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_g'], {}), '(arr_g)\n', (1329, 1336), False, 'from PIL import Image\n'), ((1347, 1369), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_b'], {}), '(arr_b)\n', (1362, 1369), False, 'from PIL import Image\n'), ((1380, 1402), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_a'], {}), '(arr_a)\n', (1395, 1402), False, 'from PIL import Image\n'), ((1722, 1740), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1738, 1740), True, 'from matplotlib import pyplot as plt\n'), ((1741, 1751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1749, 1751), True, 'from matplotlib import pyplot as plt\n'), ((200, 214), 'numpy.array', 'np.array', (['img0'], {}), '(img0)\n', (208, 214), True, 'import numpy as np\n'), ((661, 685), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(3)', '(i + 1)'], {}), '(4, 3, i + 1)\n', (672, 685), True, 'from matplotlib import pyplot as plt\n'), ((690, 705), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (698, 705), True, 'from matplotlib import pyplot as plt\n'), ((1445, 1469), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(3)', '(i + 9)'], {}), '(4, 3, i + 9)\n', (1456, 1469), True, 'from matplotlib import pyplot as plt\n'), ((1472, 1487), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1480, 1487), True, 'from matplotlib import pyplot as plt\n'), ((1618, 1675), 'PIL.Image.merge', 'Image.merge', (['"""RGBA"""', '[shard_r, shard_g, shard_b, shard_a]'], {}), "('RGBA', [shard_r, shard_g, shard_b, shard_a])\n", (1629, 1675), False, 'from PIL import Image\n')]
|
import numpy as np
import pandas as pd
import pytest
from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2
from inz.utils import chi2, select_k_best, split, train_test_split
def test_split_list_int():
ints = list(range(7))
want = [[0, 1, 2], [3, 4, 5], [6]]
get = list(split(ints, 3))
assert len(get) == len(want)
assert get == want
def test_split_int():
ints = range(7)
want = [[0, 1, 2], [3, 4, 5], [6]]
get = list(split(ints, 3))
assert len(get) == len(want)
assert get == want
def test_split_list_int_greater_width():
ints = list(range(3))
want = [[0, 1, 2]]
get = list(split(ints, 4))
assert len(get) == len(want)
assert get == want
def test_split_list_str():
strings = list(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2))
assert len(get) == len(want)
assert get == want
def test_str():
string = ''.join(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(string, 2))
assert len(get) == len(want)
assert get == want
def test_split_ndarray_int():
array = np.arange(10, dtype=int).reshape(-1, 2)
want = [np.array([[0, 1], [2, 3]]),
np.array([[4, 5], [6, 7]]),
np.array([[8, 9]])]
get = list(split(array, 2))
assert len(get) == len(want)
for i, j in zip(get, want):
assert type(i) == type(j)
assert np.array_equal(i, j)
def test_split_generator_str():
strings = map(str, range(6))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2))
assert len(get) == len(want)
assert get == want
def test_split_list_int_not_allow():
ints = list(range(7))
want = [[0, 1, 2], [3, 4, 5]]
get = list(split(ints, 3, False))
assert len(get) == len(want)
assert get == want
def test_split_list_int_greater_width_not_allow():
ints = list(range(3))
want = []
get = list(split(ints, 4, False))
assert len(get) == len(want)
assert get == want
def test_split_list_str_not_allow():
strings = list(map(str, range(6)))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2, False))
assert len(get) == len(want)
assert get == want
def test_split_ndarray_int_not_allow():
array = np.arange(10, dtype=int).reshape(-1, 2)
want = [np.array([[0, 1], [2, 3]]),
np.array([[4, 5], [6, 7]])]
get = list(split(array, 2, False))
assert len(get) == len(want)
for i, j in zip(get, want):
assert type(i) == type(j)
assert np.array_equal(i, j)
def test_split_generator_str_not_allow():
strings = map(str, range(6))
want = [['0', '1'], ['2', '3'], ['4', '5']]
get = list(split(strings, 2, False))
assert len(get) == len(want)
assert get == want
@pytest.fixture
def data():
X = pd.read_csv('../../data/data.csv')
y = X.pop('Choroba')
return X.values, y.values
def test_chi2(data):
X, y = data
sk_val, _ = sk_chi2(X, y)
my_val = chi2(X, y)
np.testing.assert_equal(sk_val, my_val)
def test_select_k_best(data):
X, y = data
for i in range(1, 31):
sk_sup1 = SelectKBest(sk_chi2, i).fit(X, y).get_support()
sk_sup2 = SelectKBest(sk_chi2, i).fit(X, y).get_support(True)
my_sup1 = select_k_best(X, y, k=i)
my_sup2 = select_k_best(X, y, k=i, indices=True)
np.testing.assert_equal(sk_sup1, my_sup1, str(i))
np.testing.assert_equal(sk_sup2, sorted(my_sup2), str(i))
def test_train_test_split():
x = np.arange(10)
get = train_test_split(x, shuffle=False)
want = [np.arange(7), np.arange(7, 10)]
for i in zip(get, want):
np.testing.assert_equal(*i)
def test_train_test_split5():
x = np.arange(10)
get = train_test_split(x, test_size=.5, shuffle=False)
want = [np.arange(5), np.arange(5, 10)]
for i in zip(get, want):
np.testing.assert_equal(*i)
if __name__ == '__main__':
pytest.main()
|
[
"inz.utils.train_test_split",
"inz.utils.split",
"numpy.testing.assert_equal",
"pandas.read_csv",
"inz.utils.select_k_best",
"pytest.main",
"sklearn.feature_selection.SelectKBest",
"numpy.array",
"inz.utils.chi2",
"numpy.array_equal",
"sklearn.feature_selection.chi2",
"numpy.arange"
] |
[((2899, 2933), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/data.csv"""'], {}), "('../../data/data.csv')\n", (2910, 2933), True, 'import pandas as pd\n'), ((3044, 3057), 'sklearn.feature_selection.chi2', 'sk_chi2', (['X', 'y'], {}), '(X, y)\n', (3051, 3057), True, 'from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2\n'), ((3071, 3081), 'inz.utils.chi2', 'chi2', (['X', 'y'], {}), '(X, y)\n', (3075, 3081), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3087, 3126), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['sk_val', 'my_val'], {}), '(sk_val, my_val)\n', (3110, 3126), True, 'import numpy as np\n'), ((3603, 3616), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3612, 3616), True, 'import numpy as np\n'), ((3627, 3661), 'inz.utils.train_test_split', 'train_test_split', (['x'], {'shuffle': '(False)'}), '(x, shuffle=False)\n', (3643, 3661), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3811, 3824), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3820, 3824), True, 'import numpy as np\n'), ((3835, 3884), 'inz.utils.train_test_split', 'train_test_split', (['x'], {'test_size': '(0.5)', 'shuffle': '(False)'}), '(x, test_size=0.5, shuffle=False)\n', (3851, 3884), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((4026, 4039), 'pytest.main', 'pytest.main', ([], {}), '()\n', (4037, 4039), False, 'import pytest\n'), ((297, 311), 'inz.utils.split', 'split', (['ints', '(3)'], {}), '(ints, 3)\n', (302, 311), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((467, 481), 'inz.utils.split', 'split', (['ints', '(3)'], {}), '(ints, 3)\n', (472, 481), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((646, 660), 'inz.utils.split', 'split', (['ints', '(4)'], {}), '(ints, 4)\n', (651, 660), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((849, 866), 'inz.utils.split', 'split', (['strings', '(2)'], {}), '(strings, 2)\n', (854, 866), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1046, 1062), 'inz.utils.split', 'split', (['string', '(2)'], {}), '(string, 2)\n', (1051, 1062), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1216, 1242), 'numpy.array', 'np.array', (['[[0, 1], [2, 3]]'], {}), '([[0, 1], [2, 3]])\n', (1224, 1242), True, 'import numpy as np\n'), ((1256, 1282), 'numpy.array', 'np.array', (['[[4, 5], [6, 7]]'], {}), '([[4, 5], [6, 7]])\n', (1264, 1282), True, 'import numpy as np\n'), ((1296, 1314), 'numpy.array', 'np.array', (['[[8, 9]]'], {}), '([[8, 9]])\n', (1304, 1314), True, 'import numpy as np\n'), ((1331, 1346), 'inz.utils.split', 'split', (['array', '(2)'], {}), '(array, 2)\n', (1336, 1346), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1462, 1482), 'numpy.array_equal', 'np.array_equal', (['i', 'j'], {}), '(i, j)\n', (1476, 1482), True, 'import numpy as np\n'), ((1613, 1630), 'inz.utils.split', 'split', (['strings', '(2)'], {}), '(strings, 2)\n', (1618, 1630), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1802, 1823), 'inz.utils.split', 'split', (['ints', '(3)', '(False)'], {}), '(ints, 3, False)\n', (1807, 1823), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((1989, 2010), 'inz.utils.split', 'split', (['ints', '(4)', '(False)'], {}), '(ints, 4, False)\n', (1994, 2010), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((2209, 2233), 'inz.utils.split', 'split', (['strings', '(2)', '(False)'], {}), '(strings, 2, False)\n', (2214, 2233), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((2397, 2423), 'numpy.array', 'np.array', (['[[0, 1], [2, 3]]'], {}), '([[0, 1], [2, 3]])\n', (2405, 2423), True, 'import numpy as np\n'), ((2437, 2463), 'numpy.array', 'np.array', (['[[4, 5], [6, 7]]'], {}), '([[4, 5], [6, 7]])\n', (2445, 2463), True, 'import numpy as np\n'), ((2480, 2502), 'inz.utils.split', 'split', (['array', '(2)', '(False)'], {}), '(array, 2, False)\n', (2485, 2502), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((2618, 2638), 'numpy.array_equal', 'np.array_equal', (['i', 'j'], {}), '(i, j)\n', (2632, 2638), True, 'import numpy as np\n'), ((2779, 2803), 'inz.utils.split', 'split', (['strings', '(2)', '(False)'], {}), '(strings, 2, False)\n', (2784, 2803), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3357, 3381), 'inz.utils.select_k_best', 'select_k_best', (['X', 'y'], {'k': 'i'}), '(X, y, k=i)\n', (3370, 3381), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3400, 3438), 'inz.utils.select_k_best', 'select_k_best', (['X', 'y'], {'k': 'i', 'indices': '(True)'}), '(X, y, k=i, indices=True)\n', (3413, 3438), False, 'from inz.utils import chi2, select_k_best, split, train_test_split\n'), ((3674, 3686), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (3683, 3686), True, 'import numpy as np\n'), ((3688, 3704), 'numpy.arange', 'np.arange', (['(7)', '(10)'], {}), '(7, 10)\n', (3697, 3704), True, 'import numpy as np\n'), ((3743, 3770), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['*i'], {}), '(*i)\n', (3766, 3770), True, 'import numpy as np\n'), ((3896, 3908), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (3905, 3908), True, 'import numpy as np\n'), ((3910, 3926), 'numpy.arange', 'np.arange', (['(5)', '(10)'], {}), '(5, 10)\n', (3919, 3926), True, 'import numpy as np\n'), ((3965, 3992), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['*i'], {}), '(*i)\n', (3988, 3992), True, 'import numpy as np\n'), ((1164, 1188), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (1173, 1188), True, 'import numpy as np\n'), ((2345, 2369), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (2354, 2369), True, 'import numpy as np\n'), ((3220, 3243), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['sk_chi2', 'i'], {}), '(sk_chi2, i)\n', (3231, 3243), False, 'from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2\n'), ((3286, 3309), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['sk_chi2', 'i'], {}), '(sk_chi2, i)\n', (3297, 3309), False, 'from sklearn.feature_selection import SelectKBest, chi2 as sk_chi2\n')]
|
#!/usr/bin/env python3
import pvml
import numpy as np
import matplotlib.pyplot as plt
import argparse
from itertools import zip_longest
_NORMALIZATION = {
"none": lambda *X: (X[0] if len(X) == 1 else X),
"meanvar": pvml.meanvar_normalization,
"minmax": pvml.minmax_normalization,
"maxabs": pvml.maxabs_normalization,
"l2": pvml.l2_normalization,
"l1": pvml.l1_normalization,
"whitening": pvml.whitening,
"pca": pvml.pca
}
def parse_args():
parser = argparse.ArgumentParser("Classification demo")
a = parser.add_argument
a("-r", "--lr", type=float, default=0.01,
help="learning rate (%(default)g)")
a("-l", "--lambda", type=float, dest="lambda_", default=0,
help="regularization coefficient (%(default)g)")
a("-s", "--steps", type=int, default=10000,
help="maximum number of training iterations (%(default)d)")
a("-p", "--plot-every", type=int, default=100,
help="frequency of plotting training data (%(default)d)")
a("-t", "--test", help="test set")
a("-f", "--features", help="Comma-separated feature columns")
a("-n", "--normalization", choices=_NORMALIZATION.keys(),
default="none", help="Feature normalization")
a("-c", "--class", type=int, default=-1, dest="class_",
help="Class column")
a("--seed", type=int, default=171956,
help="Random seed")
a("--confusion-matrix", "-C", action="store_true",
help="Show the confusion matrix.")
a("--dump", action="store_true",
help="Save the decision boundary and other data")
a("--nodraw", action="store_true",
help="Skip drawing the plots")
a("-m", "--model", choices=_MODELS.keys(), default="logreg",
help="Classification model")
a("-k", "--kernel", choices=["rbf", "polynomial"], default="rbf",
help="Kernel function")
a("--kernel-param", type=float, default=2,
help="Parameter of the kernel")
a("--knn-k", type=int, default=0, help="KNN neighbors (default auto)")
a("--classtree-minsize", type=int, default=1,
help="Classification tree minimum node size (%(default)d)")
a("--classtree-diversity", default="gini",
choices=["gini", "entropy", "error"],
help="Classification tree diversity function (%(default)s)")
a("--classtree-cv", type=int, default=5,
help="Cross-validation folds used for pruning (%(default)d)")
a("--mlp-hidden", default="",
help="Comma-separated list of number of hidden neurons")
a("--mlp-momentum", type=float, default=0.99,
help="Momentum term (%(default)g)")
a("--mlp-batch", type=int,
help="Batch size (default: use all training data)")
a("train", help="training set")
return parser.parse_args()
class DemoModel:
def __init__(self, args, binary, iterative=True):
self.lr = args.lr
self.lambda_ = args.lambda_
self.binary = binary
self.iterative = iterative
self.plot_every = args.plot_every
self.draw = not args.nodraw
self.confusion_matrix = args.confusion_matrix
self.dump = args.dump
def train(self, X, Y, Xtest, Ytest, steps):
st = self.plot_every
iterations = []
train_acc = []
test_acc = []
train_loss = []
test_loss = []
print("Step Train", "" if Xtest is None else "Test")
for step in range(st, steps + st, st):
self.train_step(X, Y, st)
iterations.append(step)
Z, P = self.inference(X)
train_acc.append(100 * (Z == Y).mean())
train_loss.append(self.loss(Y, P))
if Xtest is not None:
Ztest, Ptest = self.inference(Xtest)
test_acc.append(100 * (Ztest == Ytest).mean())
test_loss.append(self.loss(Ytest, Ptest))
self.plot_curves(0, "Accuracy (%)", iterations, train_acc,
test_acc)
self.plot_curves(1, "Loss", iterations, train_loss, test_loss)
self.plot_confusion(4, "Confusion matrix (train)", Z, Y)
if X.shape[1] == 2:
self.plot_data(2, "Training set", X, Y)
if Xtest is not None:
self.plot_data(3, "Test set", Xtest, Ytest)
if Xtest is None:
print("{} {:.2f}%".format(step, train_acc[-1]))
else:
self.plot_confusion(5, "Confusion matrix (test)", Ztest, Ytest)
print("{} {:.2f}% {:.2f}%".format(step, train_acc[-1],
test_acc[-1]))
plt.pause(0.0001)
if not self.iterative or (self.draw and not plt.fignum_exists(0)):
break
if self.dump:
with open("dump.txt", "wt") as f:
for t in zip_longest(iterations, train_acc, test_acc,
train_loss, test_loss):
row = (x if x is not None else "" for x in t)
print("{} {} {} {} {}".format(*row), file=f)
def plot_curves(self, fignum, title, iters, train, test):
train = [x for x in train if x is not None]
test = [x for x in test if x is not None]
if not self.draw or (not train and not test):
return
plt.figure(fignum)
plt.clf()
plt.title(title)
plt.xlabel("Iterations")
if train:
plt.plot(iters, train)
if test:
plt.plot(iters, test)
plt.legend(["train", "test"])
def plot_data(self, fignum, title, X, Y, resolution=200):
if not self.draw:
return
plt.figure(fignum)
plt.clf()
plt.title(title)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.coolwarm)
xmin, xmax = plt.gca().get_xlim()
ymin, ymax = plt.gca().get_ylim()
ax = np.linspace(xmin, xmax, resolution)
ay = np.linspace(ymin, ymax, resolution)
gx, gy = np.meshgrid(ax, ay)
data = np.vstack((gx.reshape(-1), gy.reshape(-1))).T
v = self.inference(data)[1]
if v.ndim == 1:
v = v.reshape(gx.shape)
plt.contour(gx, gy, v, [0.5], cmap=plt.cm.coolwarm)
self.dump_contour(gx, gy, v - 0.5, title)
elif v.shape[1] == 2:
v = v[:, 0] - v[:, 1]
v = v.reshape(gx.shape)
plt.contour(gx, gy, v, [0.0], cmap=plt.cm.coolwarm)
self.dump_contour(gx, gy, v, title)
else:
values = np.arange(v.shape[1] - 1) + 0.5
v = v.argmax(1)
v = v.reshape(gx.shape)
plt.contour(gx, gy, v, values, cmap=plt.cm.coolwarm)
def dump_contour(self, gx, gy, v, title):
if self.dump:
with open(f"contour-{title}.txt".replace(" ", "_"), "w") as f:
for i in range(v.shape[0]):
for j in range(v.shape[1]):
print(gx[i, j], gy[i, j], v[i, j], file=f)
print(file=f)
def plot_confusion(self, fignum, title, predictions, labels):
if not self.draw or not self.confusion_matrix:
return
klasses = max(predictions.max(), labels.max()) + 1
plt.figure(fignum)
plt.clf()
plt.title(title)
cmat = np.bincount(klasses * labels + predictions,
minlength=klasses ** 2)
cmat = cmat.reshape(klasses, klasses)
cmat = 100 * cmat / np.maximum(1, cmat.sum(1, keepdims=True))
im = plt.imshow(cmat, vmin=0, vmax=100, cmap="OrRd")
plt.gca().set_xticks(np.arange(klasses))
plt.gca().set_yticks(np.arange(klasses))
colors = ("black", "white")
for i in range(klasses):
for j in range(klasses):
val = cmat[i, j]
color = (colors[0] if val < 50 else colors[1])
im.axes.text(j, i, "%.1f" % val, color=color,
horizontalalignment="center",
verticalalignment="center")
def train_step(self, X, Y, steps):
pass
def inference(self, X):
pass
def loss(self, Y, P):
pass
_MODELS = {}
def _register_model(name):
def f(cls):
_MODELS[name] = cls
return cls
return f
@_register_model("logreg")
class LogisticRegressionModel(DemoModel):
def __init__(self, args):
super().__init__(args, True)
self.w = None
self.b = 0
def train_step(self, X, Y, steps):
ret = pvml.logreg_train(X, Y, lr=self.lr,
lambda_=self.lambda_, steps=steps,
init_w=self.w, init_b=self.b)
self.w, self.b = ret
def inference(self, X):
P = pvml.logreg_inference(X, self.w, self.b)
return (P > 0.5).astype(int), P
def loss(self, Y, P):
return pvml.binary_cross_entropy(Y, P)
@_register_model("logreg_l1")
class LogisticRegressionL1Model(LogisticRegressionModel):
def train_step(self, X, Y, steps):
ret = pvml.logreg_l1_train(X, Y, lr=self.lr,
lambda_=self.lambda_, steps=steps,
init_w=self.w, init_b=self.b)
self.w, self.b = ret
@_register_model("ksvm")
class KernelSVMModel(DemoModel):
def __init__(self, args):
super().__init__(args, True)
self.alpha = None
self.b = 0
self.Xtrain = None
self.kfun = args.kernel
self.kparam = args.kernel_param
def train_step(self, X, Y, steps):
self.Xtrain = X
ret = pvml.ksvm_train(X, Y, self.kfun, self.kparam,
lr=self.lr, lambda_=self.lambda_,
steps=steps, init_alpha=self.alpha,
init_b=self.b)
self.alpha, self.b = ret
def inference(self, X):
ret = pvml.ksvm_inference(X, self.Xtrain, self.alpha, self.b,
self.kfun, self.kparam)
labels, logits = ret
return labels, logits + 0.5
def loss(self, Y, P):
return pvml.hinge_loss(Y, P - 0.5)
@_register_model("svm")
class LinearSVMModel(DemoModel):
def __init__(self, args):
super().__init__(args, True)
self.w = None
self.b = 0
def train_step(self, X, Y, steps):
ret = pvml.svm_train(X, Y, lr=self.lr, lambda_=self.lambda_,
steps=steps, init_w=self.w,
init_b=self.b)
self.w, self.b = ret
def inference(self, X):
labels, logits = pvml.svm_inference(X, self.w, self.b)
return labels, logits + 0.5
def loss(self, Y, P):
return pvml.hinge_loss(Y, P - 0.5)
@_register_model("multinomial")
class MultinomialLogisticRegressionModel(DemoModel):
def __init__(self, args):
super().__init__(args, False)
self.w = None
self.b = None
def train_step(self, X, Y, steps):
self.w, self.b = pvml.multinomial_logreg_train(
X, Y, lr=self.lr,
lambda_=self.lambda_,
steps=steps, init_w=self.w,
init_b=self.b)
def inference(self, X):
P = pvml.multinomial_logreg_inference(X, self.w, self.b)
Z = np.argmax(P, 1)
return Z, P
def loss(self, Y, P):
return pvml.cross_entropy(Y, P)
@_register_model("ovo_svm")
class OvoSVMModel(DemoModel):
def __init__(self, args):
super().__init__(args, False)
self.W = None
self.b = None
def train_step(self, X, Y, steps):
ret = pvml.one_vs_one_svm_train(X, Y, lr=self.lr, lambda_=self.lambda_,
steps=steps, init_w=self.W,
init_b=self.b)
self.W, self.b = ret
def inference(self, X):
return pvml.one_vs_one_svm_inference(X, self.W, self.b)
@_register_model("ovr_svm")
class OvrSVMModel(DemoModel):
def __init__(self, args):
super().__init__(args, False)
self.W = None
self.b = None
def train_step(self, X, Y, steps):
ret = pvml.one_vs_rest_svm_train(X, Y, lr=self.lr, lambda_=self.lambda_,
steps=steps, init_w=self.W,
init_b=self.b)
self.W, self.b = ret
def inference(self, X):
return pvml.one_vs_rest_svm_inference(X, self.W, self.b)
@_register_model("ovo_ksvm")
class OvoKSVMModel(DemoModel):
def __init__(self, args):
super().__init__(args, False)
self.Xtrain = None
self.alpha = None
self.b = None
self.kfun = args.kernel
self.kparam = args.kernel_param
def train_step(self, X, Y, steps):
self.Xtrain = X.copy()
ret = pvml.one_vs_one_ksvm_train(X, Y, self.kfun, self.kparam,
lr=self.lr, lambda_=self.lambda_,
steps=steps, init_alpha=self.alpha,
init_b=self.b)
self.alpha, self.b = ret
def inference(self, X):
return pvml.one_vs_one_ksvm_inference(X, self.Xtrain, self.alpha, self.b,
self.kfun, self.kparam)
@_register_model("ovr_ksvm")
class OvrKSVMModel(DemoModel):
def __init__(self, args):
super().__init__(args, False)
self.Xtrain = None
self.alpha = None
self.b = None
self.kfun = args.kernel
self.kparam = args.kernel_param
def train_step(self, X, Y, steps):
self.Xtrain = X.copy()
ret = pvml.one_vs_rest_ksvm_train(X, Y, self.kfun, self.kparam,
lr=self.lr, lambda_=self.lambda_,
steps=steps, init_alpha=self.alpha,
init_b=self.b)
self.alpha, self.b = ret
def inference(self, X):
return pvml.one_vs_rest_ksvm_inference(X, self.Xtrain, self.alpha, self.b,
self.kfun, self.kparam)
@_register_model("hgda")
class HeteroscedasticGDA(DemoModel):
def __init__(self, args):
super().__init__(args, False, False)
self.means = None
self.icovs = None
self.priors = None
def train_step(self, X, Y, steps):
ret = pvml.hgda_train(X, Y)
self.means, self.invcovs, self.priors = ret
def inference(self, X):
ret = pvml.hgda_inference(X, self.means, self.invcovs,
self.priors)
labels, scores = ret
return labels, scores
@_register_model("ogda")
class OmoscedasticGDA(DemoModel):
def __init__(self, args):
super().__init__(args, False, False)
self.w = None
self.b = None
def train_step(self, X, Y, steps):
self.w, self.b = pvml.ogda_train(X, Y)
def inference(self, X):
labels, scores = pvml.ogda_inference(X, self.w, self.b)
return labels, scores
@_register_model("mindist")
class MinimumDistanceClassifier(DemoModel):
def __init__(self, args):
super().__init__(args, False, False)
self.means = None
def train_step(self, X, Y, steps):
self.means = pvml.mindist_train(X, Y)
def inference(self, X):
labels, scores = pvml.mindist_inference(X, self.means)
return labels, scores
@_register_model("categorical_nb")
class CategoricalNaiveBayes(DemoModel):
def __init__(self, args):
super().__init__(args, False, False)
self.probs = None
self.priors = None
def train_step(self, X, Y, steps):
ret = pvml.categorical_naive_bayes_train(X, Y)
self.probs, self.priors = ret
def inference(self, X):
ret = pvml.categorical_naive_bayes_inference(X, self.probs,
self.priors)
labels, scores = ret
return ret
@_register_model("multinomial_nb")
class MultinomialNaiveBayes(DemoModel):
def __init__(self, args):
super().__init__(args, False, False)
self.w = None
self.b = None
def train_step(self, X, Y, steps):
ret = pvml.multinomial_naive_bayes_train(X, Y)
self.w, self.b = ret
def inference(self, X):
ret = pvml.multinomial_naive_bayes_inference(X, self.w,
self.b)
labels, scores = ret
return ret
@_register_model("gaussian_nb")
class GaussianNaiveBayes(DemoModel):
def __init__(self, args):
super().__init__(args, False, False)
self.means = None
self.vars = None
self.priors = None
def train_step(self, X, Y, steps):
ret = pvml.gaussian_naive_bayes_train(X, Y)
self.means, self.vars, self.priors = ret
def inference(self, X):
ret = pvml.gaussian_naive_bayes_inference(X, self.means,
self.vars,
self.priors)
return ret
@_register_model("classtree")
class ClassificationTree(DemoModel):
def __init__(self, args):
super().__init__(args, False, False)
self.tree = pvml.ClassificationTree()
self.minsize = args.classtree_minsize
self.cv = args.classtree_cv
self.diversity = args.classtree_diversity
def train_step(self, X, Y, steps):
self.tree.train(X, Y, minsize=self.minsize,
diversity=self.diversity, pruning_cv=self.cv)
def inference(self, X):
ret = self.tree.inference(X)
return ret
@_register_model("perceptron")
class Perceptron(DemoModel):
def __init__(self, args):
super().__init__(args, True)
self.w = None
self.b = 0
def train_step(self, X, Y, steps):
ret = pvml.perceptron_train(X, Y, steps, init_w=self.w,
init_b=self.b)
self.w, self.b = ret
def inference(self, X):
ret = pvml.perceptron_inference(X, self.w, self.b)
return ret
@_register_model("knn")
class KNN(DemoModel):
def __init__(self, args):
super().__init__(args, False, False)
self.X = None
self.Y = None
self.k = args.knn_k
def train_step(self, X, Y, steps):
self.X = X.copy()
self.Y = Y.copy()
if self.k < 1:
print("Select K... ", end="", flush=True)
self.k, acc = pvml.knn_select_k(X, Y)
print("{} ({:.3f}%)".format(self.k, acc * 100))
def inference(self, X):
ret = pvml.knn_inference(X, self.X, self.Y, self.k)
return ret
@_register_model("kmeans")
class KMeans(DemoModel):
def __init__(self, args):
super().__init__(args, False)
self.k = 2
self.centroids = None
def train_step(self, X, Y, steps):
new_k = Y.max() + 1
if new_k > self.k:
# If the classes change centroids are reset
self.centroids = None
self.k = new_k
self.centroids = pvml.kmeans_train(X, self.k, steps=steps,
init_centroids=self.centroids)
self._sort_centroids(X, Y)
def inference(self, X):
ret = pvml.kmeans_inference(X, self.centroids)
return ret
def _sort_centroids(self, X, Y):
# K-means labels do not correspond to training labels. A
# categorical classifier is used to reorder the centroids to
# minimize the error.
P, _ = pvml.kmeans_inference(X, self.centroids)
probs, priors = pvml.categorical_naive_bayes_train(P[:, None], Y)
YK = np.arange(self.k)[:, None]
Q, _ = pvml.categorical_naive_bayes_inference(YK, probs, priors)
ii = np.argsort(Q)
self.centroids = self.centroids[ii, :]
@_register_model("mlp")
class MultiLayerPerceptron(DemoModel):
def __init__(self, args):
super().__init__(args, False)
self.net = None
self.hidden = [int(x) for x in args.mlp_hidden.split(",") if x.strip()]
self.momentum = args.mlp_momentum
self.batch = args.mlp_batch
def train_step(self, X, Y, steps):
if self.net is None:
counts = [X.shape[1]] + self.hidden + [Y.max() + 1]
self.net = pvml.MLP(counts)
self.net.train(X, Y, lr=self.lr, lambda_=self.lambda_,
momentum=self.momentum, steps=steps,
batch=self.batch)
def inference(self, X):
labels, scores = self.net.inference(X)
return labels, scores
def loss(self, Y, P):
return self.net.loss(Y, P)
def select_features(X, Y, features, class_):
if features is None and class_ == -1:
return X, Y
if features is None:
features = np.arange(X.shape[1] - 1)
else:
features = np.array(list(map(int, features.split(","))))
data = np.concatenate((X, Y[:, None]), 1)
X = data[:, features]
Y = data[:, class_]
return X, Y
def normalization(X, Xtest, fun):
if Xtest is None:
return _NORMALIZATION[fun](X), None
else:
return _NORMALIZATION[fun](X, Xtest)
def main():
args = parse_args()
np.random.seed(args.seed)
X, Y = pvml.load_dataset(args.train)
print("Training set loaded: {} samples, {} features, {} classes".format(
X.shape[0], X.shape[1], Y.max() + 1))
X, Y = select_features(X, Y, args.features, args.class_)
if args.test:
Xtest, Ytest = pvml.load_dataset(args.test)
print("Test set loaded: {} samples, {} features, {} classes".format(
Xtest.shape[0], Xtest.shape[1], Ytest.max() + 1))
Xtest, Ytest = select_features(Xtest, Ytest, args.features,
args.class_)
else:
Xtest, Ytest = None, None
X, Xtest = normalization(X, Xtest, args.normalization)
model = _MODELS[args.model](args)
if model.binary:
Y = (Y > 0).astype(int)
if Ytest is not None:
Ytest = (Ytest > 0).astype(int)
plt.ion()
model.train(X, Y, Xtest, Ytest, args.steps)
plt.ioff()
print("TRAINING COMPLETED")
plt.show()
if __name__ == "__main__":
main()
|
[
"pvml.logreg_l1_train",
"pvml.kmeans_train",
"pvml.svm_inference",
"pvml.ksvm_train",
"numpy.argsort",
"pvml.ClassificationTree",
"pvml.binary_cross_entropy",
"pvml.ogda_inference",
"pvml.svm_train",
"pvml.perceptron_inference",
"pvml.hgda_train",
"numpy.arange",
"matplotlib.pyplot.imshow",
"pvml.one_vs_one_ksvm_inference",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"pvml.one_vs_one_ksvm_train",
"pvml.categorical_naive_bayes_inference",
"pvml.hinge_loss",
"pvml.multinomial_logreg_train",
"pvml.one_vs_one_svm_train",
"pvml.one_vs_rest_ksvm_inference",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.contour",
"pvml.categorical_naive_bayes_train",
"numpy.random.seed",
"numpy.concatenate",
"matplotlib.pyplot.scatter",
"pvml.knn_select_k",
"numpy.meshgrid",
"pvml.knn_inference",
"pvml.one_vs_rest_svm_inference",
"pvml.mindist_inference",
"pvml.kmeans_inference",
"pvml.load_dataset",
"pvml.one_vs_one_svm_inference",
"pvml.gaussian_naive_bayes_train",
"matplotlib.pyplot.gca",
"itertools.zip_longest",
"matplotlib.pyplot.ioff",
"numpy.argmax",
"pvml.gaussian_naive_bayes_inference",
"pvml.ksvm_inference",
"pvml.logreg_train",
"pvml.MLP",
"pvml.one_vs_rest_ksvm_train",
"pvml.multinomial_logreg_inference",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"matplotlib.pyplot.pause",
"numpy.bincount",
"pvml.mindist_train",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"pvml.perceptron_train",
"matplotlib.pyplot.fignum_exists",
"pvml.multinomial_naive_bayes_train",
"matplotlib.pyplot.clf",
"pvml.logreg_inference",
"pvml.hgda_inference",
"matplotlib.pyplot.figure",
"pvml.multinomial_naive_bayes_inference",
"pvml.one_vs_rest_svm_train",
"pvml.ogda_train",
"pvml.cross_entropy"
] |
[((490, 536), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Classification demo"""'], {}), "('Classification demo')\n", (513, 536), False, 'import argparse\n'), ((21042, 21076), 'numpy.concatenate', 'np.concatenate', (['(X, Y[:, None])', '(1)'], {}), '((X, Y[:, None]), 1)\n', (21056, 21076), True, 'import numpy as np\n'), ((21342, 21367), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (21356, 21367), True, 'import numpy as np\n'), ((21379, 21408), 'pvml.load_dataset', 'pvml.load_dataset', (['args.train'], {}), '(args.train)\n', (21396, 21408), False, 'import pvml\n'), ((22198, 22207), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (22205, 22207), True, 'import matplotlib.pyplot as plt\n'), ((22260, 22270), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (22268, 22270), True, 'import matplotlib.pyplot as plt\n'), ((22307, 22317), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22315, 22317), True, 'import matplotlib.pyplot as plt\n'), ((5291, 5309), 'matplotlib.pyplot.figure', 'plt.figure', (['fignum'], {}), '(fignum)\n', (5301, 5309), True, 'import matplotlib.pyplot as plt\n'), ((5318, 5327), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5325, 5327), True, 'import matplotlib.pyplot as plt\n'), ((5336, 5352), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5345, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5361, 5385), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {}), "('Iterations')\n", (5371, 5385), True, 'import matplotlib.pyplot as plt\n'), ((5648, 5666), 'matplotlib.pyplot.figure', 'plt.figure', (['fignum'], {}), '(fignum)\n', (5658, 5666), True, 'import matplotlib.pyplot as plt\n'), ((5675, 5684), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5682, 5684), True, 'import matplotlib.pyplot as plt\n'), ((5693, 5709), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5702, 5709), True, 'import matplotlib.pyplot as plt\n'), ((5718, 5774), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': 'Y', 'cmap': 'plt.cm.coolwarm'}), '(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.coolwarm)\n', (5729, 5774), True, 'import matplotlib.pyplot as plt\n'), ((5872, 5907), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'resolution'], {}), '(xmin, xmax, resolution)\n', (5883, 5907), True, 'import numpy as np\n'), ((5921, 5956), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'resolution'], {}), '(ymin, ymax, resolution)\n', (5932, 5956), True, 'import numpy as np\n'), ((5974, 5993), 'numpy.meshgrid', 'np.meshgrid', (['ax', 'ay'], {}), '(ax, ay)\n', (5985, 5993), True, 'import numpy as np\n'), ((7222, 7240), 'matplotlib.pyplot.figure', 'plt.figure', (['fignum'], {}), '(fignum)\n', (7232, 7240), True, 'import matplotlib.pyplot as plt\n'), ((7249, 7258), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7256, 7258), True, 'import matplotlib.pyplot as plt\n'), ((7267, 7283), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7276, 7283), True, 'import matplotlib.pyplot as plt\n'), ((7299, 7366), 'numpy.bincount', 'np.bincount', (['(klasses * labels + predictions)'], {'minlength': '(klasses ** 2)'}), '(klasses * labels + predictions, minlength=klasses ** 2)\n', (7310, 7366), True, 'import numpy as np\n'), ((7523, 7570), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cmat'], {'vmin': '(0)', 'vmax': '(100)', 'cmap': '"""OrRd"""'}), "(cmat, vmin=0, vmax=100, cmap='OrRd')\n", (7533, 7570), True, 'import matplotlib.pyplot as plt\n'), ((8537, 8641), 'pvml.logreg_train', 'pvml.logreg_train', (['X', 'Y'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_w': 'self.w', 'init_b': 'self.b'}), '(X, Y, lr=self.lr, lambda_=self.lambda_, steps=steps,\n init_w=self.w, init_b=self.b)\n', (8554, 8641), False, 'import pvml\n'), ((8772, 8812), 'pvml.logreg_inference', 'pvml.logreg_inference', (['X', 'self.w', 'self.b'], {}), '(X, self.w, self.b)\n', (8793, 8812), False, 'import pvml\n'), ((8895, 8926), 'pvml.binary_cross_entropy', 'pvml.binary_cross_entropy', (['Y', 'P'], {}), '(Y, P)\n', (8920, 8926), False, 'import pvml\n'), ((9070, 9177), 'pvml.logreg_l1_train', 'pvml.logreg_l1_train', (['X', 'Y'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_w': 'self.w', 'init_b': 'self.b'}), '(X, Y, lr=self.lr, lambda_=self.lambda_, steps=steps,\n init_w=self.w, init_b=self.b)\n', (9090, 9177), False, 'import pvml\n'), ((9622, 9757), 'pvml.ksvm_train', 'pvml.ksvm_train', (['X', 'Y', 'self.kfun', 'self.kparam'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_alpha': 'self.alpha', 'init_b': 'self.b'}), '(X, Y, self.kfun, self.kparam, lr=self.lr, lambda_=self.\n lambda_, steps=steps, init_alpha=self.alpha, init_b=self.b)\n', (9637, 9757), False, 'import pvml\n'), ((9919, 9998), 'pvml.ksvm_inference', 'pvml.ksvm_inference', (['X', 'self.Xtrain', 'self.alpha', 'self.b', 'self.kfun', 'self.kparam'], {}), '(X, self.Xtrain, self.alpha, self.b, self.kfun, self.kparam)\n', (9938, 9998), False, 'import pvml\n'), ((10140, 10167), 'pvml.hinge_loss', 'pvml.hinge_loss', (['Y', '(P - 0.5)'], {}), '(Y, P - 0.5)\n', (10155, 10167), False, 'import pvml\n'), ((10389, 10491), 'pvml.svm_train', 'pvml.svm_train', (['X', 'Y'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_w': 'self.w', 'init_b': 'self.b'}), '(X, Y, lr=self.lr, lambda_=self.lambda_, steps=steps, init_w=\n self.w, init_b=self.b)\n', (10403, 10491), False, 'import pvml\n'), ((10628, 10665), 'pvml.svm_inference', 'pvml.svm_inference', (['X', 'self.w', 'self.b'], {}), '(X, self.w, self.b)\n', (10646, 10665), False, 'import pvml\n'), ((10744, 10771), 'pvml.hinge_loss', 'pvml.hinge_loss', (['Y', '(P - 0.5)'], {}), '(Y, P - 0.5)\n', (10759, 10771), False, 'import pvml\n'), ((11036, 11153), 'pvml.multinomial_logreg_train', 'pvml.multinomial_logreg_train', (['X', 'Y'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_w': 'self.w', 'init_b': 'self.b'}), '(X, Y, lr=self.lr, lambda_=self.lambda_, steps\n =steps, init_w=self.w, init_b=self.b)\n', (11065, 11153), False, 'import pvml\n'), ((11239, 11291), 'pvml.multinomial_logreg_inference', 'pvml.multinomial_logreg_inference', (['X', 'self.w', 'self.b'], {}), '(X, self.w, self.b)\n', (11272, 11291), False, 'import pvml\n'), ((11304, 11319), 'numpy.argmax', 'np.argmax', (['P', '(1)'], {}), '(P, 1)\n', (11313, 11319), True, 'import numpy as np\n'), ((11382, 11406), 'pvml.cross_entropy', 'pvml.cross_entropy', (['Y', 'P'], {}), '(Y, P)\n', (11400, 11406), False, 'import pvml\n'), ((11633, 11746), 'pvml.one_vs_one_svm_train', 'pvml.one_vs_one_svm_train', (['X', 'Y'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_w': 'self.W', 'init_b': 'self.b'}), '(X, Y, lr=self.lr, lambda_=self.lambda_, steps=\n steps, init_w=self.W, init_b=self.b)\n', (11658, 11746), False, 'import pvml\n'), ((11895, 11943), 'pvml.one_vs_one_svm_inference', 'pvml.one_vs_one_svm_inference', (['X', 'self.W', 'self.b'], {}), '(X, self.W, self.b)\n', (11924, 11943), False, 'import pvml\n'), ((12170, 12284), 'pvml.one_vs_rest_svm_train', 'pvml.one_vs_rest_svm_train', (['X', 'Y'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_w': 'self.W', 'init_b': 'self.b'}), '(X, Y, lr=self.lr, lambda_=self.lambda_, steps=\n steps, init_w=self.W, init_b=self.b)\n', (12196, 12284), False, 'import pvml\n'), ((12435, 12484), 'pvml.one_vs_rest_svm_inference', 'pvml.one_vs_rest_svm_inference', (['X', 'self.W', 'self.b'], {}), '(X, self.W, self.b)\n', (12465, 12484), False, 'import pvml\n'), ((12847, 12992), 'pvml.one_vs_one_ksvm_train', 'pvml.one_vs_one_ksvm_train', (['X', 'Y', 'self.kfun', 'self.kparam'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_alpha': 'self.alpha', 'init_b': 'self.b'}), '(X, Y, self.kfun, self.kparam, lr=self.lr,\n lambda_=self.lambda_, steps=steps, init_alpha=self.alpha, init_b=self.b)\n', (12873, 12992), False, 'import pvml\n'), ((13189, 13284), 'pvml.one_vs_one_ksvm_inference', 'pvml.one_vs_one_ksvm_inference', (['X', 'self.Xtrain', 'self.alpha', 'self.b', 'self.kfun', 'self.kparam'], {}), '(X, self.Xtrain, self.alpha, self.b, self.\n kfun, self.kparam)\n', (13219, 13284), False, 'import pvml\n'), ((13688, 13834), 'pvml.one_vs_rest_ksvm_train', 'pvml.one_vs_rest_ksvm_train', (['X', 'Y', 'self.kfun', 'self.kparam'], {'lr': 'self.lr', 'lambda_': 'self.lambda_', 'steps': 'steps', 'init_alpha': 'self.alpha', 'init_b': 'self.b'}), '(X, Y, self.kfun, self.kparam, lr=self.lr,\n lambda_=self.lambda_, steps=steps, init_alpha=self.alpha, init_b=self.b)\n', (13715, 13834), False, 'import pvml\n'), ((14034, 14130), 'pvml.one_vs_rest_ksvm_inference', 'pvml.one_vs_rest_ksvm_inference', (['X', 'self.Xtrain', 'self.alpha', 'self.b', 'self.kfun', 'self.kparam'], {}), '(X, self.Xtrain, self.alpha, self.b, self.\n kfun, self.kparam)\n', (14065, 14130), False, 'import pvml\n'), ((14445, 14466), 'pvml.hgda_train', 'pvml.hgda_train', (['X', 'Y'], {}), '(X, Y)\n', (14460, 14466), False, 'import pvml\n'), ((14562, 14623), 'pvml.hgda_inference', 'pvml.hgda_inference', (['X', 'self.means', 'self.invcovs', 'self.priors'], {}), '(X, self.means, self.invcovs, self.priors)\n', (14581, 14623), False, 'import pvml\n'), ((14962, 14983), 'pvml.ogda_train', 'pvml.ogda_train', (['X', 'Y'], {}), '(X, Y)\n', (14977, 14983), False, 'import pvml\n'), ((15038, 15076), 'pvml.ogda_inference', 'pvml.ogda_inference', (['X', 'self.w', 'self.b'], {}), '(X, self.w, self.b)\n', (15057, 15076), False, 'import pvml\n'), ((15343, 15367), 'pvml.mindist_train', 'pvml.mindist_train', (['X', 'Y'], {}), '(X, Y)\n', (15361, 15367), False, 'import pvml\n'), ((15422, 15459), 'pvml.mindist_inference', 'pvml.mindist_inference', (['X', 'self.means'], {}), '(X, self.means)\n', (15444, 15459), False, 'import pvml\n'), ((15749, 15789), 'pvml.categorical_naive_bayes_train', 'pvml.categorical_naive_bayes_train', (['X', 'Y'], {}), '(X, Y)\n', (15783, 15789), False, 'import pvml\n'), ((15871, 15937), 'pvml.categorical_naive_bayes_inference', 'pvml.categorical_naive_bayes_inference', (['X', 'self.probs', 'self.priors'], {}), '(X, self.probs, self.priors)\n', (15909, 15937), False, 'import pvml\n'), ((16289, 16329), 'pvml.multinomial_naive_bayes_train', 'pvml.multinomial_naive_bayes_train', (['X', 'Y'], {}), '(X, Y)\n', (16323, 16329), False, 'import pvml\n'), ((16402, 16459), 'pvml.multinomial_naive_bayes_inference', 'pvml.multinomial_naive_bayes_inference', (['X', 'self.w', 'self.b'], {}), '(X, self.w, self.b)\n', (16440, 16459), False, 'import pvml\n'), ((16839, 16876), 'pvml.gaussian_naive_bayes_train', 'pvml.gaussian_naive_bayes_train', (['X', 'Y'], {}), '(X, Y)\n', (16870, 16876), False, 'import pvml\n'), ((16969, 17043), 'pvml.gaussian_naive_bayes_inference', 'pvml.gaussian_naive_bayes_inference', (['X', 'self.means', 'self.vars', 'self.priors'], {}), '(X, self.means, self.vars, self.priors)\n', (17004, 17043), False, 'import pvml\n'), ((17327, 17352), 'pvml.ClassificationTree', 'pvml.ClassificationTree', ([], {}), '()\n', (17350, 17352), False, 'import pvml\n'), ((17956, 18020), 'pvml.perceptron_train', 'pvml.perceptron_train', (['X', 'Y', 'steps'], {'init_w': 'self.w', 'init_b': 'self.b'}), '(X, Y, steps, init_w=self.w, init_b=self.b)\n', (17977, 18020), False, 'import pvml\n'), ((18129, 18173), 'pvml.perceptron_inference', 'pvml.perceptron_inference', (['X', 'self.w', 'self.b'], {}), '(X, self.w, self.b)\n', (18154, 18173), False, 'import pvml\n'), ((18710, 18755), 'pvml.knn_inference', 'pvml.knn_inference', (['X', 'self.X', 'self.Y', 'self.k'], {}), '(X, self.X, self.Y, self.k)\n', (18728, 18755), False, 'import pvml\n'), ((19183, 19255), 'pvml.kmeans_train', 'pvml.kmeans_train', (['X', 'self.k'], {'steps': 'steps', 'init_centroids': 'self.centroids'}), '(X, self.k, steps=steps, init_centroids=self.centroids)\n', (19200, 19255), False, 'import pvml\n'), ((19377, 19417), 'pvml.kmeans_inference', 'pvml.kmeans_inference', (['X', 'self.centroids'], {}), '(X, self.centroids)\n', (19398, 19417), False, 'import pvml\n'), ((19655, 19695), 'pvml.kmeans_inference', 'pvml.kmeans_inference', (['X', 'self.centroids'], {}), '(X, self.centroids)\n', (19676, 19695), False, 'import pvml\n'), ((19720, 19769), 'pvml.categorical_naive_bayes_train', 'pvml.categorical_naive_bayes_train', (['P[:, None]', 'Y'], {}), '(P[:, None], Y)\n', (19754, 19769), False, 'import pvml\n'), ((19825, 19882), 'pvml.categorical_naive_bayes_inference', 'pvml.categorical_naive_bayes_inference', (['YK', 'probs', 'priors'], {}), '(YK, probs, priors)\n', (19863, 19882), False, 'import pvml\n'), ((19896, 19909), 'numpy.argsort', 'np.argsort', (['Q'], {}), '(Q)\n', (19906, 19909), True, 'import numpy as np\n'), ((20930, 20955), 'numpy.arange', 'np.arange', (['(X.shape[1] - 1)'], {}), '(X.shape[1] - 1)\n', (20939, 20955), True, 'import numpy as np\n'), ((21636, 21664), 'pvml.load_dataset', 'pvml.load_dataset', (['args.test'], {}), '(args.test)\n', (21653, 21664), False, 'import pvml\n'), ((4596, 4613), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (4605, 4613), True, 'import matplotlib.pyplot as plt\n'), ((5416, 5438), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'train'], {}), '(iters, train)\n', (5424, 5438), True, 'import matplotlib.pyplot as plt\n'), ((5468, 5489), 'matplotlib.pyplot.plot', 'plt.plot', (['iters', 'test'], {}), '(iters, test)\n', (5476, 5489), True, 'import matplotlib.pyplot as plt\n'), ((5502, 5531), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {}), "(['train', 'test'])\n", (5512, 5531), True, 'import matplotlib.pyplot as plt\n'), ((6163, 6214), 'matplotlib.pyplot.contour', 'plt.contour', (['gx', 'gy', 'v', '[0.5]'], {'cmap': 'plt.cm.coolwarm'}), '(gx, gy, v, [0.5], cmap=plt.cm.coolwarm)\n', (6174, 6214), True, 'import matplotlib.pyplot as plt\n'), ((7600, 7618), 'numpy.arange', 'np.arange', (['klasses'], {}), '(klasses)\n', (7609, 7618), True, 'import numpy as np\n'), ((7649, 7667), 'numpy.arange', 'np.arange', (['klasses'], {}), '(klasses)\n', (7658, 7667), True, 'import numpy as np\n'), ((18583, 18606), 'pvml.knn_select_k', 'pvml.knn_select_k', (['X', 'Y'], {}), '(X, Y)\n', (18600, 18606), False, 'import pvml\n'), ((19783, 19800), 'numpy.arange', 'np.arange', (['self.k'], {}), '(self.k)\n', (19792, 19800), True, 'import numpy as np\n'), ((20428, 20444), 'pvml.MLP', 'pvml.MLP', (['counts'], {}), '(counts)\n', (20436, 20444), False, 'import pvml\n'), ((4808, 4875), 'itertools.zip_longest', 'zip_longest', (['iterations', 'train_acc', 'test_acc', 'train_loss', 'test_loss'], {}), '(iterations, train_acc, test_acc, train_loss, test_loss)\n', (4819, 4875), False, 'from itertools import zip_longest\n'), ((5796, 5805), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5803, 5805), True, 'import matplotlib.pyplot as plt\n'), ((5838, 5847), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5845, 5847), True, 'import matplotlib.pyplot as plt\n'), ((6381, 6432), 'matplotlib.pyplot.contour', 'plt.contour', (['gx', 'gy', 'v', '[0.0]'], {'cmap': 'plt.cm.coolwarm'}), '(gx, gy, v, [0.0], cmap=plt.cm.coolwarm)\n', (6392, 6432), True, 'import matplotlib.pyplot as plt\n'), ((6624, 6676), 'matplotlib.pyplot.contour', 'plt.contour', (['gx', 'gy', 'v', 'values'], {'cmap': 'plt.cm.coolwarm'}), '(gx, gy, v, values, cmap=plt.cm.coolwarm)\n', (6635, 6676), True, 'import matplotlib.pyplot as plt\n'), ((7579, 7588), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7586, 7588), True, 'import matplotlib.pyplot as plt\n'), ((7628, 7637), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7635, 7637), True, 'import matplotlib.pyplot as plt\n'), ((6516, 6541), 'numpy.arange', 'np.arange', (['(v.shape[1] - 1)'], {}), '(v.shape[1] - 1)\n', (6525, 6541), True, 'import numpy as np\n'), ((4670, 4690), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['(0)'], {}), '(0)\n', (4687, 4690), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 11 13:46:58 2019
@author: bdgecyt
"""
import cv2
import math
from time import time
import numpy as np
import wrapper
from operator import itemgetter
boxes = []
xCount = 0
yCount = 0
iter = 0
img = 0
def on_mouse(event, x, y, flags, params):
global iter
t = time()
global img
if event == cv2.EVENT_LBUTTONDOWN:
print('Start Mouse Position: '+str(x)+', '+str(y))
sbox = [x, y]
boxes.append(sbox)
# cv2.line(img,pt1=(0,0),pt2=(x,y),color=(255,255,0),thickness=2)
elif event == cv2.EVENT_LBUTTONUP:
print('End Mouse Position: '+str(x)+', '+str(y))
ebox = [x, y]
boxes.append(ebox)
# print boxes
iter += 1
# print iter
def split(start, end, segments):
x_delta = (end[0] - start[0]) / float(segments)
y_delta = (end[1] - start[1]) / float(segments)
points = []
for i in range(1, segments):
points.append([start[0] + i * x_delta, start[1] + i * y_delta])
return [start] + points + [end]
def line_intersection(line1, line2):
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return x, y
def norm(point1, point2):
xdiff = point1[0] - point2[0]
ydiff = point1[1] - point2[1]
norm = math.sqrt(xdiff*xdiff + ydiff*ydiff)
# print norm
return norm
def orderptinline(pts, vp):
# print("ordering points")
# print(pts)
lengths = [norm(pt, vp) for pt in pts]
lengths= np.argsort(lengths)[::-1]
strlength = ''.join(str(e) for e in lengths)
# print(strlength)
return strlength
def getborderpt(line1, line2):
return line_intersection(line1, line2)
def findAnglebetVP(line, vp):
a = np.array(line[0])
b = np.array(line[1])
c = np.array(vp)
ba = a - b
bc = c - b
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return np.degrees(angle)
def estimatelength(order,a1,a2,r1,r2, Vanish, response):
if order == "0123":
# print("order is:" + order)
reflength = (norm(a1, r2)/norm(a1, Vanish))/(norm(r1,r2)/norm(r1,Vanish))*response
# print(reflength)
ref2length = (norm(a2, r2)/norm(a2, Vanish))/(norm(r1,r2)/norm(r1,Vanish))*response
# print(ref2length)
finallength = reflength-ref2length
elif order == "0213":
# print("order is:" + order)
reflength = (norm(a1, r2)/norm(a1, Vanish))/(norm(r1,r2)/norm(r1,Vanish))*response
ref2length = response/(norm(r1, r2)/norm(r1, Vanish))/(norm(a2,r2)/norm(a2,Vanish))
finallength = reflength - ref2length
elif order == "0213":
reflength = (norm(a1, r2)/norm(a1, Vanish))/(norm(r1,r2)/norm(r1,Vanish))*response
ref2length = response/((norm(r1, a2)/norm(r1, Vanish))/(norm(r2,a2)/norm(r2,Vanish))-1)
finallength = reflength + ref2length
elif order == "2031":
reflength = response/(norm(r1, r2)/norm(r1, Vanish))/(norm(a1,r2)/norm(a1,Vanish))
ref2length = reflength/((norm(a1, a2)/norm(a1, Vanish))/(norm(r2,a2)/norm(r2,Vanish))-1)
finallength = reflength + ref2length
elif order == "2301":
reflength = response/((norm(r1, a1)/norm(r1, Vanish))/(norm(r2,a1)/norm(r2,Vanish))-1)
ref2length = (reflength +response)/((norm(r1, a2)/norm(r1, Vanish))/(norm(a1,a2)/norm(a2,Vanish))-1)
finallength = ref2length
else:
finallength = 99999
return finallength
def calibrateframe(img, findref = False):
vps = wrapper.dealAImage(img,"data/result/",True,True,True)
vps = [[i[0], i[1]] for i in vps]
print(vps)
count = 0
# while(True):
#
# # print count
# if iter == 2:
# cv2.destroyAllWindows()
# break
#
# count += 1
# cv2.namedWindow('real image')
# cv2.setMouseCallback('real image', on_mouse, 0)
#
# if len(boxes) != 0:
# for i in range(0,len(boxes), 2):
# # print(i)
# try:
# cv2.line(img,pt1=tuple(boxes[i]),pt2=tuple(boxes[i+1]),color=(0,255,255),thickness=2)
#
# except:
# continue
# cv2.imshow('real image', img)
# if cv2.waitKey(1) == 27:
# cv2.destroyAllWindows()
# break
print(vps)
vps = sorted(vps, key=itemgetter(1))
print(vps)
print(boxes)
xVanish = vps[0]
print ("x vanishing pt:" + str(xVanish))
yVanish = vps[1]
print ("y vanishing pt:" + str(yVanish))
zVanish = vps[2]
print ("z vanishing pt:" + str(zVanish))
if findref == True:
referenceline = [boxes[0], boxes[1]]
referenceline.sort(key = lambda x: norm(x, xVanish), reverse = False)
ang1 = findAnglebetVP(referenceline, xVanish)
print("angles between reference line and xVanish:" + str(ang1))
referenceline.sort(key = lambda x: norm(x, yVanish), reverse = False)
ang2 = findAnglebetVP(referenceline, yVanish)
print("angles between reference line and yVanish:" + str(ang2))
if ang1> ang2:
print("ref vp is Y vanishing point" )
refV= yVanish
ortV= xVanish
if ang2> ang1:
print("ref vp is X vanishing point" )
refV= xVanish
ortV= yVanish
referenceline.sort(key = lambda x: norm(x, refV), reverse = True)
estimateline = [boxes[2], boxes[3]]
estimateline.sort(key = lambda x: norm(x, refV), reverse = True)
response = float(input("Please enter length of reference object: "))
response2 = float(input("Please enter length of measured object: "))
return response, response2, estimateline, referenceline, refV, ortV, zVanish, xVanish, yVanish
else:
return zVanish, xVanish, yVanish
def drawfallarea(img, refV, ortV, zVanish, correctpt, correct2pt):
nextpt= [int(0.78*img_shape[1]),
int(0.615*img_shape[0])]
droptoVP3 = [nextpt, zVanish]
print("vp3")
print(droptoVP3)
bordervp3= line_intersection(droptoVP3, [(0, img_shape[0]),(img_shape[1], img_shape[0])])
dropline3 = [nextpt, bordervp3]
ptB = line_intersection(dropline3, [correctpt, ortV])
cv2.line(img,(int(correctpt[0]), int(correctpt[1])), (int(ptB[0]), int(ptB[1])),color=(0,0,255),thickness=2)
backline1 = [correct2pt, ortV]
backline2 = [ptB, refV]
ptC= line_intersection(backline1, backline2)
cv2.line(img,(int(correct2pt[0]), int(correct2pt[1])), (int(ptC[0]), int(ptC[1])),color=(0,0,255),thickness=2)
cv2.line(img,(int(ptB[0]), int(ptB[1])), (int(ptC[0]), int(ptC[1])),color=(0,0,255),thickness=2)
def processframe(img, response, response2, estimateline, referenceline, refV, ortV, zVanish, xVanish, yVanish, img_shape):
droptoVP1= [estimateline[0], zVanish]
droptoVP2= [estimateline[1], zVanish]
print("vp1")
print(droptoVP1)
# print(droptoVP1)
# cv2.line(img,(0, int(0.9*img_shape[0])), (img_shape[1], int(0.9*img_shape[0])),color=(0,255,255),thickness=10)
#test line
# cv2.line(img,(0, int(0.8*img_shape[0])), (int(0.78*img_shape[1]), int(0.615*img_shape[0])),color=(0,255,255),thickness=10)
bordervp1= line_intersection(droptoVP1, [(0, img_shape[0]),(img_shape[1], img_shape[0])])
bordervp2= line_intersection(droptoVP2, [(0, img_shape[0]),(img_shape[1], img_shape[0])])
# print(bordervp1)
# print(bordervp2)
dropline1 = [estimateline[0], bordervp1]
dropline2 = [estimateline[1], bordervp2]
refline1 = [referenceline[0],ortV]
refline2 = [referenceline[1],ortV]
print("breaking drop line to segments")
dropline1seg = split(dropline1[0], dropline1[1], 50)
# print(dropline1seg)
finallengths = []
dropline2pts = []
for pt in dropline1seg:
# print(pt)
cv2.circle(img,(int(pt[0]), int(pt[1])), 3, (0,255,255), -1)
# cv2.line(img,(int(pt[0]), int(pt[1])), (int(yVanish[0]), int(yVanish[1])),color=(0,255,255),thickness=2)
intersectDropline2= line_intersection([pt, refV], dropline2)
dropline2pts += [intersectDropline2]
intersectRefline1= line_intersection([pt, refV], refline1)
intersectRefline2= line_intersection([pt, refV], refline2)
cv2.circle(img,(int(intersectDropline2[0]), int(intersectDropline2[1])), 3, (255,0,0), -1)
cv2.circle(img,(int(intersectRefline1[0]), int(intersectRefline1[1])), 3, (0,255,0), -1)
cv2.circle(img,(int(intersectRefline2[0]), int(intersectRefline2[1])), 3, (0,0,255), -1)
ordered = orderptinline([pt, intersectDropline2,intersectRefline1, intersectRefline2] , refV)
finallength = estimatelength(ordered, pt, intersectDropline2,intersectRefline1, intersectRefline2, refV, response)
# reflength = (norm(pt, intersectRefline2)/norm(pt, yVanish))/(norm(intersectRefline1,intersectRefline2)/norm(intersectRefline1,yVanish))*response
# print(reflength)
# ref2length = (norm(intersectDropline2, intersectRefline2)/norm(intersectDropline2, yVanish))/(norm(intersectRefline1,intersectRefline2)/norm(intersectRefline1,yVanish))*response
# print(ref2length)
# finallength = reflength-ref2length
# print("finallength:" +str(finallength))
finallengths += [finallength]
measurements = [abs(response2- i)for i in finallengths]
correctpt = dropline1seg[np.argmin(measurements)]
correct2pt = dropline2pts[np.argmin(measurements)]
#if finallength >16 and finallength <18:
cv2.line(img,(int(estimateline[0][0]), int(estimateline[0][1])), (int(estimateline[1][0]), int(estimateline[1][1])),color=(0,255,255),thickness=2)
cv2.line(img,(int(correctpt[0]), int(correctpt[1])), (int(correct2pt[0]), int(correct2pt[1])),color=(0,0,255),thickness=2)
drawfallarea(img, refV, ortV, zVanish, correctpt, correct2pt)
print("nearest measurement:" +str( finallengths[np.argmin(measurements)] ) )
if zVanish:
cv2.line(img,(int(0.5*img.shape[1]), int(0.5*img.shape[0])), (int(zVanish[0]), int(zVanish[1])),color=(0,255,255),thickness=2)
if xVanish:
cv2.line(img,(int(0.5*img.shape[1]), int(0.5*img.shape[0])), (int(xVanish[0]), int(xVanish[1])),color=(0,255,255),thickness=2)
if yVanish:
cv2.line(img,(int(0.5*img.shape[1]), int(0.5*img.shape[0])), (int(yVanish[0]), int(yVanish[1])),color=(0,255,255),thickness=2)
# return img
if __name__ == "__main__":
img = cv2.imread('data\\18.jpg')
# img = cv2.resize(img, None, fx = 0.3,fy = 0.3)
img_shape = img.shape
# cv2.circle(img, (100,900), 5, (0,0,255), 5)
# while(True):
# cv2.imshow('points image', img)
# if cv2.waitKey(1) == 27:
# cv2.destroyAllWindows()
# break
# print(img.shape)
response, response2, estimateline, referenceline, refV, ortV, zVanish, xVanish, yVanish = calibrateframe(img, findref = True)
#
while(True):
print(img.shape)
img = cv2.imread('data\\18.jpg')
# img = cv2.resize(img, None, fx = 0.3,fy = 0.3)
processframe(img, response, response2, estimateline, referenceline, refV, ortV, zVanish, xVanish, yVanish, img_shape)
cv2.imshow('points image', img)
# estimateline[0][0] -= 1
# estimateline[1][0] -= 1
# print("estimate line is:" + str(estimateline))
if cv2.waitKey(1) == 27:
cv2.destroyAllWindows()
break
# img = cv2.blur(img, (3,3))
# img = cv2.resize(img, None, fx = 0.2,fy = 0.2)
# print(img.shape)
|
[
"numpy.arccos",
"math.sqrt",
"wrapper.dealAImage",
"operator.itemgetter",
"cv2.imshow",
"numpy.argsort",
"numpy.array",
"numpy.dot",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.linalg.norm",
"numpy.argmin",
"numpy.degrees",
"time.time",
"cv2.imread"
] |
[((323, 329), 'time.time', 'time', ([], {}), '()\n', (327, 329), False, 'from time import time\n'), ((1635, 1675), 'math.sqrt', 'math.sqrt', (['(xdiff * xdiff + ydiff * ydiff)'], {}), '(xdiff * xdiff + ydiff * ydiff)\n', (1644, 1675), False, 'import math\n'), ((2078, 2095), 'numpy.array', 'np.array', (['line[0]'], {}), '(line[0])\n', (2086, 2095), True, 'import numpy as np\n'), ((2104, 2121), 'numpy.array', 'np.array', (['line[1]'], {}), '(line[1])\n', (2112, 2121), True, 'import numpy as np\n'), ((2130, 2142), 'numpy.array', 'np.array', (['vp'], {}), '(vp)\n', (2138, 2142), True, 'import numpy as np\n'), ((2268, 2291), 'numpy.arccos', 'np.arccos', (['cosine_angle'], {}), '(cosine_angle)\n', (2277, 2291), True, 'import numpy as np\n'), ((2308, 2325), 'numpy.degrees', 'np.degrees', (['angle'], {}), '(angle)\n', (2318, 2325), True, 'import numpy as np\n'), ((3910, 3967), 'wrapper.dealAImage', 'wrapper.dealAImage', (['img', '"""data/result/"""', '(True)', '(True)', '(True)'], {}), "(img, 'data/result/', True, True, True)\n", (3928, 3967), False, 'import wrapper\n'), ((11040, 11066), 'cv2.imread', 'cv2.imread', (['"""data\\\\18.jpg"""'], {}), "('data\\\\18.jpg')\n", (11050, 11066), False, 'import cv2\n'), ((1837, 1856), 'numpy.argsort', 'np.argsort', (['lengths'], {}), '(lengths)\n', (1847, 1856), True, 'import numpy as np\n'), ((2197, 2211), 'numpy.dot', 'np.dot', (['ba', 'bc'], {}), '(ba, bc)\n', (2203, 2211), True, 'import numpy as np\n'), ((9958, 9981), 'numpy.argmin', 'np.argmin', (['measurements'], {}), '(measurements)\n', (9967, 9981), True, 'import numpy as np\n'), ((10013, 10036), 'numpy.argmin', 'np.argmin', (['measurements'], {}), '(measurements)\n', (10022, 10036), True, 'import numpy as np\n'), ((11557, 11583), 'cv2.imread', 'cv2.imread', (['"""data\\\\18.jpg"""'], {}), "('data\\\\18.jpg')\n", (11567, 11583), False, 'import cv2\n'), ((11774, 11805), 'cv2.imshow', 'cv2.imshow', (['"""points image"""', 'img'], {}), "('points image', img)\n", (11784, 11805), False, 'import cv2\n'), ((2215, 2233), 'numpy.linalg.norm', 'np.linalg.norm', (['ba'], {}), '(ba)\n', (2229, 2233), True, 'import numpy as np\n'), ((2236, 2254), 'numpy.linalg.norm', 'np.linalg.norm', (['bc'], {}), '(bc)\n', (2250, 2254), True, 'import numpy as np\n'), ((4779, 4792), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (4789, 4792), False, 'from operator import itemgetter\n'), ((11939, 11953), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (11950, 11953), False, 'import cv2\n'), ((11973, 11996), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (11994, 11996), False, 'import cv2\n'), ((10485, 10508), 'numpy.argmin', 'np.argmin', (['measurements'], {}), '(measurements)\n', (10494, 10508), True, 'import numpy as np\n')]
|
from flask import Flask,request,render_template
import numpy as np
from Reccomending_functions import item_item_cf,user_user_cf,rank_matrix_factorize
from Database_connector import fetch_from_database
import random
#ML Packages
asd = []
app = Flask(__name__)
@app.route('/')
def index():
global asd
randindex = [x for x in range(1,301)]
random.shuffle(randindex)
movies_list = randindex[0:12]
asd = movies_list
display_list = fetch_from_database(movies_list)
return render_template("Display_movies.html",display_list=display_list)
@app.route('/recommendations',methods=['POST','GET'])
def recommend():
if request.method != 'POST':
return "Bye-Bye"
movies_list = asd
user_ratings = np.zeros((1,301))
for i in range(len(movies_list)):
user_ratings[0][movies_list[i]]=request.form['movie'+str(i+1)]
if request.form['recco_method']=="uucf":
recommendend_movies_list = user_user_cf(user_ratings,movies_list)
elif request.form['recco_method']=="iicf":
recommendend_movies_list = item_item_cf(user_ratings, movies_list)
elif request.form['recco_method']=="rf":
recommendend_movies_list = rank_matrix_factorize(user_ratings,movies_list)
print(user_ratings)
recommendend_movies_list = list(recommendend_movies_list)
sasa =[]
for i in recommendend_movies_list:
sasa.append(int(i))
movie_details = fetch_from_database(sasa)
return render_template("Display Recommendations.html",movie_details=movie_details)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
[
"flask.render_template",
"Reccomending_functions.item_item_cf",
"random.shuffle",
"flask.Flask",
"Database_connector.fetch_from_database",
"Reccomending_functions.user_user_cf",
"numpy.zeros",
"Reccomending_functions.rank_matrix_factorize"
] |
[((243, 258), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (248, 258), False, 'from flask import Flask, request, render_template\n'), ((349, 374), 'random.shuffle', 'random.shuffle', (['randindex'], {}), '(randindex)\n', (363, 374), False, 'import random\n'), ((450, 482), 'Database_connector.fetch_from_database', 'fetch_from_database', (['movies_list'], {}), '(movies_list)\n', (469, 482), False, 'from Database_connector import fetch_from_database\n'), ((494, 559), 'flask.render_template', 'render_template', (['"""Display_movies.html"""'], {'display_list': 'display_list'}), "('Display_movies.html', display_list=display_list)\n", (509, 559), False, 'from flask import Flask, request, render_template\n'), ((735, 753), 'numpy.zeros', 'np.zeros', (['(1, 301)'], {}), '((1, 301))\n', (743, 753), True, 'import numpy as np\n'), ((1417, 1442), 'Database_connector.fetch_from_database', 'fetch_from_database', (['sasa'], {}), '(sasa)\n', (1436, 1442), False, 'from Database_connector import fetch_from_database\n'), ((1454, 1530), 'flask.render_template', 'render_template', (['"""Display Recommendations.html"""'], {'movie_details': 'movie_details'}), "('Display Recommendations.html', movie_details=movie_details)\n", (1469, 1530), False, 'from flask import Flask, request, render_template\n'), ((942, 981), 'Reccomending_functions.user_user_cf', 'user_user_cf', (['user_ratings', 'movies_list'], {}), '(user_ratings, movies_list)\n', (954, 981), False, 'from Reccomending_functions import item_item_cf, user_user_cf, rank_matrix_factorize\n'), ((1063, 1102), 'Reccomending_functions.item_item_cf', 'item_item_cf', (['user_ratings', 'movies_list'], {}), '(user_ratings, movies_list)\n', (1075, 1102), False, 'from Reccomending_functions import item_item_cf, user_user_cf, rank_matrix_factorize\n'), ((1183, 1231), 'Reccomending_functions.rank_matrix_factorize', 'rank_matrix_factorize', (['user_ratings', 'movies_list'], {}), '(user_ratings, movies_list)\n', (1204, 1231), False, 'from Reccomending_functions import item_item_cf, user_user_cf, rank_matrix_factorize\n')]
|
import sqlalchemy as sa
import numpy as np
import datetime as dt
from faker import Faker
from jinja2 import Environment, PackageLoader
from database.models.core import (
Base,
Products,
Customers,
TransactionDetails,
Transactions,
)
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
PRODUCT_LIST = [
{"name": "hat", "price": 10.99},
{"name": "cap", "price": 6.99},
{"name": "shirt", "price": 50.99},
{"name": "sweater", "price": 69.99},
{"name": "shorts", "price": 49.99},
{"name": "jeans", "price": 39.99},
{"name": "neakers", "price": 32.99},
{"name": "boots", "price": 199.99},
{"name": "coats", "price": 249.99},
{"name": "accessories", "price": 149.99},
]
class DBConn:
def __init__(self, **kwargs):
"""
initialize the attributes of a class.
"""
self.host = kwargs.get("host", "host.docker.internal")
self.username = kwargs.get("username", "henry")
self.password = kwargs.get("password", "<PASSWORD>")
self.database = kwargs.get("database", "henry")
self.schema = kwargs.get("schema", "henry")
self.log = logger
def _get_conn_str(self, database_type):
"""
return the connection string based on database types
"""
if database_type == "postgres":
dbapi = "postgresql"
port = 5438
elif database_type == "mysql":
dbapi = "mysql+pymysql"
port = 3307
return f"{dbapi}://{self.username}:{self.password}@{self.host}:{port}" # noqa: E501
def get_conn(self, database_type):
"""
setup the connection to database
"""
conn_str = self._get_conn_str(database_type)
connection = sa.create_engine(conn_str, echo=True)
return connection
@property
def _database_types(self):
return ["mysql", "postgres"]
def get_session(self, database_type):
conn = self.get_conn(database_type)
Session = sa.orm.sessionmaker(bind=conn)
return Session()
class DataGenerator:
def __init__(self):
self.fake = Faker()
def _get_dates(self):
start_date = dt.date(2021, 1, 1) # set the start date
end_date = dt.datetime.now().date() # set the end date
diff = (end_date - start_date).days # calculate the delta
for i in range(0, diff):
date = start_date + dt.timedelta(days=i) # get each of the data
date = date.strftime("%Y-%m-%d") # convert it into datetime string
yield date
@property
def _name(self):
return self.fake.name()
@property
def _address(self):
return self.fake.address()
@property
def _phone(self):
return self.fake.phone_number()
def _get_email(self, name):
first_name = name.split()[0]
last_name = name.split()[-1]
index = np.random.randint(0, 3)
domains = ["gmail", "yahoo", "outlook"]
email = f"{first_name}.{last_name}@{domains[index]}.com"
return email.lower()
@property
def _product_id(self):
product_ids = list(
range(1, len(PRODUCT_LIST) + 1)
) # a list of [0, ... len(Product_list)+1]
index = np.random.randint(0, len(product_ids))
return product_ids[
index
] # return a random number from 0 to length of string
@property
def _quantity(self):
return np.random.randint(1, 10)
def get_data(self):
for date in self._get_dates():
for _ in range(np.random.randint(1, 15)):
name = self._name
data = {
"customers": {
"name": name,
"address": self._address,
"phone": self._phone,
"email": self._get_email(name),
},
"transactions": {
"transaction_date": date,
},
"transaction_details": {
"product_id": self._product_id,
"quantity": np.random.randint(1, 10),
},
}
yield data
class DBSetup(DBConn):
def _create_tables(self):
for database_type in self._database_types:
conn = self.get_conn(database_type)
if database_type == "postgres":
if not conn.dialect.has_schema(conn, self.schema):
conn.execute(sa.schema.CreateSchema(self.schema))
if database_type == "mysql":
conn.execute(f"CREATE DATABASE IF NOT EXISTS {self.schema}")
Base.metadata.create_all(conn)
def reset(self):
for database_type in self._database_types:
conn = self.get_conn(database_type)
Base.metadata.drop_all(conn)
sql = f"DROP SCHEMA IF EXISTS {self.schema}"
if database_type == "postgres":
conn.execute(f"{sql} CASCADE")
else:
conn.execute(sql)
def load_transaction(self, data, session):
customers = data.get("customers")
transactions = data.get("transactions")
transaction_details = data.get("transaction_details")
row = Customers( # maintain the relationship between each tables
**customers,
transactions=[
Transactions(
**transactions,
transaction_details=[
TransactionDetails(**transaction_details)
],
)
],
)
session.add(row)
session.commit()
def _seed_transactions(self):
my_fake_data = DataGenerator()
session = self.get_session("mysql")
for line in my_fake_data.get_data():
self.load_transaction(line, session)
@property
def _product_list(self):
return PRODUCT_LIST
def _seed_products(self):
for database_type in self._database_types: #
session = self.get_session(database_type)
for row in self._product_list:
product = Products(**row) # pass in as a kwargs
session.add(product) # insert data into both databases
session.commit()
def run(self):
self.reset()
self._create_tables()
self._seed_products()
self._seed_transactions()
class ApplicationDataBase(DBConn):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.jinja_env = Environment(
loader=PackageLoader("database", "templates")
)
db_type = "mysql"
def _get_template(self, filename, **kwargs):
temp = self.jinja_env.get_template(filename)
return temp.render(**kwargs)
def get_data(self, date, table_name):
kwargs = {"date": date}
sql = self._get_template(f"{table_name}.sql", **kwargs)
return self.run_query(sql)
def run_query(self, sql):
conn = self.get_conn("mysql")
result = conn.execute(sql)
return [dict(row) for row in result.fetchall()]
if __name__ == "__main__":
kwargs = {"host": "localhost"}
app = ApplicationDataBase(**kwargs)
data1 = app.get_data("2021-08-03", "customers")
data2 = app.get_data("2021-05-01", "transactions")
data3 = app.get_data("2021-07-21", "transaction_details")
print(data1)
print("******")
print(data2)
print("******")
print(data3)
|
[
"logging.basicConfig",
"logging.getLogger",
"sqlalchemy.orm.sessionmaker",
"database.models.core.Base.metadata.drop_all",
"database.models.core.Base.metadata.create_all",
"database.models.core.TransactionDetails",
"sqlalchemy.schema.CreateSchema",
"sqlalchemy.create_engine",
"database.models.core.Products",
"faker.Faker",
"datetime.datetime.now",
"numpy.random.randint",
"datetime.date",
"datetime.timedelta",
"jinja2.PackageLoader"
] |
[((271, 292), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (290, 292), False, 'import logging\n'), ((302, 329), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (319, 329), False, 'import logging\n'), ((1815, 1852), 'sqlalchemy.create_engine', 'sa.create_engine', (['conn_str'], {'echo': '(True)'}), '(conn_str, echo=True)\n', (1831, 1852), True, 'import sqlalchemy as sa\n'), ((2067, 2097), 'sqlalchemy.orm.sessionmaker', 'sa.orm.sessionmaker', ([], {'bind': 'conn'}), '(bind=conn)\n', (2086, 2097), True, 'import sqlalchemy as sa\n'), ((2190, 2197), 'faker.Faker', 'Faker', ([], {}), '()\n', (2195, 2197), False, 'from faker import Faker\n'), ((2246, 2265), 'datetime.date', 'dt.date', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (2253, 2265), True, 'import datetime as dt\n'), ((2976, 2999), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (2993, 2999), True, 'import numpy as np\n'), ((3527, 3551), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (3544, 3551), True, 'import numpy as np\n'), ((4790, 4820), 'database.models.core.Base.metadata.create_all', 'Base.metadata.create_all', (['conn'], {}), '(conn)\n', (4814, 4820), False, 'from database.models.core import Base, Products, Customers, TransactionDetails, Transactions\n'), ((4954, 4982), 'database.models.core.Base.metadata.drop_all', 'Base.metadata.drop_all', (['conn'], {}), '(conn)\n', (4976, 4982), False, 'from database.models.core import Base, Products, Customers, TransactionDetails, Transactions\n'), ((2307, 2324), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2322, 2324), True, 'import datetime as dt\n'), ((2485, 2505), 'datetime.timedelta', 'dt.timedelta', ([], {'days': 'i'}), '(days=i)\n', (2497, 2505), True, 'import datetime as dt\n'), ((3645, 3669), 'numpy.random.randint', 'np.random.randint', (['(1)', '(15)'], {}), '(1, 15)\n', (3662, 3669), True, 'import numpy as np\n'), ((6295, 6310), 'database.models.core.Products', 'Products', ([], {}), '(**row)\n', (6303, 6310), False, 'from database.models.core import Base, Products, Customers, TransactionDetails, Transactions\n'), ((6751, 6789), 'jinja2.PackageLoader', 'PackageLoader', (['"""database"""', '"""templates"""'], {}), "('database', 'templates')\n", (6764, 6789), False, 'from jinja2 import Environment, PackageLoader\n'), ((4229, 4253), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (4246, 4253), True, 'import numpy as np\n'), ((4622, 4657), 'sqlalchemy.schema.CreateSchema', 'sa.schema.CreateSchema', (['self.schema'], {}), '(self.schema)\n', (4644, 4657), True, 'import sqlalchemy as sa\n'), ((5644, 5685), 'database.models.core.TransactionDetails', 'TransactionDetails', ([], {}), '(**transaction_details)\n', (5662, 5685), False, 'from database.models.core import Base, Products, Customers, TransactionDetails, Transactions\n')]
|
import numpy as np
import torch
import argparse
from pina.pinn import PINN
from pina.ppinn import ParametricPINN as pPINN
from pina.label_tensor import LabelTensor
from torch.nn import ReLU, Tanh, Softplus
from pina.adaptive_functions.adaptive_softplus import AdaptiveSoftplus
from problems.parametric_elliptic_optimal_control_alpha_variable import ParametricEllipticOptimalControl
from pina.multi_deep_feed_forward import MultiDeepFeedForward
from pina.deep_feed_forward import DeepFeedForward
alpha = 1
class myFeature(torch.nn.Module):
"""
Feature: sin(x)
"""
def __init__(self):
super(myFeature, self).__init__()
def forward(self, x):
return (-x[:, 0]**2+1) * (-x[:, 1]**2+1)
class CustomMultiDFF(MultiDeepFeedForward):
def __init__(self, dff_dict):
super().__init__(dff_dict)
def forward(self, x):
out = self.uu(x)
p = LabelTensor((out['u_param'] * x[:, 3]).reshape(-1, 1), ['p'])
a = LabelTensor.hstack([out, p])
return a
model = CustomMultiDFF(
{
'uu': {
'input_variables': ['x1', 'x2', 'mu', 'alpha'],
'output_variables': ['u_param', 'y'],
'layers': [40, 40, 20],
'func': Softplus,
'extra_features': [myFeature()],
},
# 'u_param': {
# 'input_variables': ['u', 'mu'],
# 'output_variables': ['u_param'],
# 'layers': [],
# 'func': None
# },
# 'p': {
# 'input_variables': ['u'],
# 'output_variables': ['p'],
# 'layers': [10],
# 'func': None
# },
}
)
opc = ParametricEllipticOptimalControl(alpha)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run PINA")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-s", "-save", action="store_true")
group.add_argument("-l", "-load", action="store_true")
args = parser.parse_args()
# model = DeepFeedForward(
# layers=[40, 40, 20],
# output_variables=['u_param', 'y', 'p'],
# input_variables=opc.input_variables+['mu', 'alpha'],
# func=Softplus,
# extra_features=[myFeature()]
# )
pinn = pPINN(
opc,
model,
lr=0.002,
error_norm='mse',
regularizer=1e-8,
lr_accelerate=None)
if args.s:
pinn.span_pts(30, 'grid', ['D1'])
pinn.span_pts(50, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
pinn.train(10000, 20)
# with open('ocp_wrong_history.txt', 'w') as file_:
# for i, losses in enumerate(pinn.history):
# file_.write('{} {}\n'.format(i, sum(losses).item()))
pinn.save_state('pina.ocp')
else:
pinn.load_state('working.pina.ocp')
pinn.load_state('pina.ocp')
import matplotlib
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
# res = 64
# param = torch.tensor([[3., 1]])
# pts_container = []
# for mn, mx in [[-1, 1], [-1, 1]]:
# pts_container.append(np.linspace(mn, mx, res))
# grids_container = np.meshgrid(*pts_container)
# unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
# unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0], 1).reshape(-1, 2)], axis=1)
# unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu', 'alpha'])
# Z_pred = pinn.model(unrolled_pts.tensor)
# print(Z_pred.tensor.shape)
# plt.subplot(2, 3, 1)
# plt.pcolor(Z_pred['y'].reshape(res, res).detach())
# plt.colorbar()
# plt.subplot(2, 3, 2)
# plt.pcolor(Z_pred['u_param'].reshape(res, res).detach())
# plt.colorbar()
# plt.subplot(2, 3, 3)
# plt.pcolor(Z_pred['p'].reshape(res, res).detach())
# plt.colorbar()
# with open('ocp_mu3_a1_plot.txt', 'w') as f_:
# f_.write('x y u p ys\n')
# for (x, y), tru, pre, e in zip(unrolled_pts[:, :2],
# Z_pred['u_param'].reshape(-1, 1),
# Z_pred['p'].reshape(-1, 1),
# Z_pred['y'].reshape(-1, 1),
# ):
# f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
# param = torch.tensor([[3.0, 0.01]])
# unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
# unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0], 1).reshape(-1, 2)], axis=1)
# unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu', 'alpha'])
# Z_pred = pinn.model(unrolled_pts.tensor)
# plt.subplot(2, 3, 4)
# plt.pcolor(Z_pred['y'].reshape(res, res).detach())
# plt.colorbar()
# plt.subplot(2, 3, 5)
# plt.pcolor(Z_pred['u_param'].reshape(res, res).detach())
# plt.colorbar()
# plt.subplot(2, 3, 6)
# plt.pcolor(Z_pred['p'].reshape(res, res).detach())
# plt.colorbar()
# plt.show()
# with open('ocp_mu3_a0.01_plot.txt', 'w') as f_:
# f_.write('x y u p ys\n')
# for (x, y), tru, pre, e in zip(unrolled_pts[:, :2],
# Z_pred['u_param'].reshape(-1, 1),
# Z_pred['p'].reshape(-1, 1),
# Z_pred['y'].reshape(-1, 1),
# ):
# f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
y = {}
u = {}
for alpha in [0.01, 0.1, 1]:
y[alpha] = []
u[alpha] = []
for p in np.linspace(0.5, 3, 32):
a = pinn.model(LabelTensor(torch.tensor([[0, 0, p, alpha]]).double(), ['x1', 'x2', 'mu', 'alpha']).tensor)
y[alpha].append(a['y'].detach().numpy()[0])
u[alpha].append(a['u_param'].detach().numpy()[0])
plt.plot(np.linspace(0.5, 3, 32), u[1], label='u')
plt.plot(np.linspace(0.5, 3, 32), u[0.01], label='u')
plt.plot(np.linspace(0.5, 3, 32), u[0.1], label='u')
plt.plot([1, 2, 3], [0.28, 0.56, 0.85], 'o', label='Truth values')
plt.legend()
plt.show()
print(y[1])
print(y[0.1])
print(y[0.01])
with open('elliptic_param_y.txt', 'w') as f_:
f_.write('mu 1 01 001\n')
for mu, y1, y01, y001 in zip(np.linspace(0.5, 3, 32), y[1], y[0.1], y[0.01]):
f_.write('{} {} {} {}\n'.format(mu, y1, y01, y001))
with open('elliptic_param_u.txt', 'w') as f_:
f_.write('mu 1 01 001\n')
for mu, y1, y01, y001 in zip(np.linspace(0.5, 3, 32), u[1], u[0.1], u[0.01]):
f_.write('{} {} {} {}\n'.format(mu, y1, y01, y001))
plt.plot(np.linspace(0.5, 3, 32), y, label='y')
plt.plot([1, 2, 3], [0.062, 0.12, 0.19], 'o', label='Truth values')
plt.legend()
plt.show()
|
[
"argparse.ArgumentParser",
"matplotlib.use",
"matplotlib.pyplot.plot",
"problems.parametric_elliptic_optimal_control_alpha_variable.ParametricEllipticOptimalControl",
"torch.tensor",
"numpy.linspace",
"pina.label_tensor.LabelTensor.hstack",
"pina.ppinn.ParametricPINN",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((1735, 1774), 'problems.parametric_elliptic_optimal_control_alpha_variable.ParametricEllipticOptimalControl', 'ParametricEllipticOptimalControl', (['alpha'], {}), '(alpha)\n', (1767, 1774), False, 'from problems.parametric_elliptic_optimal_control_alpha_variable import ParametricEllipticOptimalControl\n'), ((1817, 1864), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run PINA"""'}), "(description='Run PINA')\n", (1840, 1864), False, 'import argparse\n'), ((2338, 2426), 'pina.ppinn.ParametricPINN', 'pPINN', (['opc', 'model'], {'lr': '(0.002)', 'error_norm': '"""mse"""', 'regularizer': '(1e-08)', 'lr_accelerate': 'None'}), "(opc, model, lr=0.002, error_norm='mse', regularizer=1e-08,\n lr_accelerate=None)\n", (2343, 2426), True, 'from pina.ppinn import ParametricPINN as pPINN\n'), ((974, 1002), 'pina.label_tensor.LabelTensor.hstack', 'LabelTensor.hstack', (['[out, p]'], {}), '([out, p])\n', (992, 1002), False, 'from pina.label_tensor import LabelTensor\n'), ((2986, 3011), 'matplotlib.use', 'matplotlib.use', (['"""GTK3Agg"""'], {}), "('GTK3Agg')\n", (3000, 3011), False, 'import matplotlib\n'), ((6492, 6558), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 2, 3]', '[0.28, 0.56, 0.85]', '"""o"""'], {'label': '"""Truth values"""'}), "([1, 2, 3], [0.28, 0.56, 0.85], 'o', label='Truth values')\n", (6500, 6558), True, 'import matplotlib.pyplot as plt\n'), ((6567, 6579), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6577, 6579), True, 'import matplotlib.pyplot as plt\n'), ((6588, 6598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6596, 6598), True, 'import matplotlib.pyplot as plt\n'), ((7231, 7298), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 2, 3]', '[0.062, 0.12, 0.19]', '"""o"""'], {'label': '"""Truth values"""'}), "([1, 2, 3], [0.062, 0.12, 0.19], 'o', label='Truth values')\n", (7239, 7298), True, 'import matplotlib.pyplot as plt\n'), ((7307, 7319), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7317, 7319), True, 'import matplotlib.pyplot as plt\n'), ((7328, 7338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7336, 7338), True, 'import matplotlib.pyplot as plt\n'), ((6025, 6048), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(32)'], {}), '(0.5, 3, 32)\n', (6036, 6048), True, 'import numpy as np\n'), ((6319, 6342), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(32)'], {}), '(0.5, 3, 32)\n', (6330, 6342), True, 'import numpy as np\n'), ((6378, 6401), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(32)'], {}), '(0.5, 3, 32)\n', (6389, 6401), True, 'import numpy as np\n'), ((6440, 6463), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(32)'], {}), '(0.5, 3, 32)\n', (6451, 6463), True, 'import numpy as np\n'), ((7184, 7207), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(32)'], {}), '(0.5, 3, 32)\n', (7195, 7207), True, 'import numpy as np\n'), ((6797, 6820), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(32)'], {}), '(0.5, 3, 32)\n', (6808, 6820), True, 'import numpy as np\n'), ((7048, 7071), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3)', '(32)'], {}), '(0.5, 3, 32)\n', (7059, 7071), True, 'import numpy as np\n'), ((6093, 6125), 'torch.tensor', 'torch.tensor', (['[[0, 0, p, alpha]]'], {}), '([[0, 0, p, alpha]])\n', (6105, 6125), False, 'import torch\n')]
|
import os
import uuid
import numpy as np
from tqdm import tqdm
import pickle
from utils.config import opt
from .voc_eval import voc_eval
devkit_path = opt.voc_data_dir[:-8]
year = opt.year
def do_python_eval(classes, image_set, output_dir='output'):
annopath = os.path.join(
devkit_path,
'VOC' + year,
'Annotations',
'{}.xml')
imagesetfile = os.path.join(
devkit_path,
'VOC' + year,
'ImageSets',
'Main',
image_set + '.txt')
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(classes):
if cls == '__background__':
continue
filename = get_voc_results_file_template(image_set).format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def get_comp_id():
salt = str(uuid.uuid4())
comp_id = 'comp4'
#comp_id = (comp_id + '_' + salt if True else comp_id)
return comp_id
def get_voc_results_file_template(image_set):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = get_comp_id() + '_det_' + image_set + '_{:s}.txt'
filedir = os.path.join(devkit_path, 'results', 'VOC' + year, 'Main')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def write_voc_results_file(image_index, classes, all_boxes, image_set):
for cls_ind, cls in enumerate(classes):
if cls == '__background__':
continue
filename = get_voc_results_file_template(image_set).format(cls)
print('Writing {} VOC results file: {}'.format(cls, filename))
with open(filename, 'wt') as f:
for im_ind, index in enumerate(image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def evaluate_detections(image_index, classes, all_boxes, output_dir, image_set):
write_voc_results_file(image_index, classes, all_boxes, image_set)
do_python_eval(classes, image_set, output_dir)
|
[
"os.path.exists",
"numpy.mean",
"pickle.dump",
"os.makedirs",
"os.path.join",
"uuid.uuid4",
"os.path.isdir",
"os.mkdir"
] |
[((269, 333), 'os.path.join', 'os.path.join', (['devkit_path', "('VOC' + year)", '"""Annotations"""', '"""{}.xml"""'], {}), "(devkit_path, 'VOC' + year, 'Annotations', '{}.xml')\n", (281, 333), False, 'import os\n'), ((386, 471), 'os.path.join', 'os.path.join', (['devkit_path', "('VOC' + year)", '"""ImageSets"""', '"""Main"""', "(image_set + '.txt')"], {}), "(devkit_path, 'VOC' + year, 'ImageSets', 'Main', image_set + '.txt'\n )\n", (398, 471), False, 'import os\n'), ((523, 569), 'os.path.join', 'os.path.join', (['devkit_path', '"""annotations_cache"""'], {}), "(devkit_path, 'annotations_cache')\n", (535, 569), False, 'import os\n'), ((2330, 2388), 'os.path.join', 'os.path.join', (['devkit_path', '"""results"""', "('VOC' + year)", '"""Main"""'], {}), "(devkit_path, 'results', 'VOC' + year, 'Main')\n", (2342, 2388), False, 'import os\n'), ((2465, 2496), 'os.path.join', 'os.path.join', (['filedir', 'filename'], {}), '(filedir, filename)\n', (2477, 2496), False, 'import os\n'), ((759, 784), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (772, 784), False, 'import os\n'), ((794, 814), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (802, 814), False, 'import os\n'), ((2019, 2031), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2029, 2031), False, 'import uuid\n'), ((2400, 2423), 'os.path.exists', 'os.path.exists', (['filedir'], {}), '(filedir)\n', (2414, 2423), False, 'import os\n'), ((2433, 2453), 'os.makedirs', 'os.makedirs', (['filedir'], {}), '(filedir)\n', (2444, 2453), False, 'import os\n'), ((1289, 1341), 'pickle.dump', 'pickle.dump', (["{'rec': rec, 'prec': prec, 'ap': ap}", 'f'], {}), "({'rec': rec, 'prec': prec, 'ap': ap}, f)\n", (1300, 1341), False, 'import pickle\n'), ((1378, 1390), 'numpy.mean', 'np.mean', (['aps'], {}), '(aps)\n', (1385, 1390), True, 'import numpy as np\n'), ((1517, 1529), 'numpy.mean', 'np.mean', (['aps'], {}), '(aps)\n', (1524, 1529), True, 'import numpy as np\n'), ((1222, 1263), 'os.path.join', 'os.path.join', (['output_dir', "(cls + '_pr.pkl')"], {}), "(output_dir, cls + '_pr.pkl')\n", (1234, 1263), False, 'import os\n')]
|
#!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import torch as th
import torchvision
from tqdm import tqdm
def main(args):
trainloader, testloader = get_loaders(args.batch_size, args.fashion)
epsilon = 0.5
beta = 1.0
alpha1 = 0.1
alpha2 = 0.05
a = np.sqrt(2.0 / (784 + 500))
W1 = np.random.uniform(-a, a, (784, 500))
b1 = np.random.uniform(-a, a, 500)
a = np.sqrt(2.0 / (500 + 10))
W2 = np.random.uniform(-a, a, (500, 10))
b2 = np.random.uniform(-a, a, 10)
states = [(np.random.uniform(0, 1., (args.batch_size, 500)), \
np.random.uniform(0, 1., (args.batch_size, 10))) for _ in range(len(trainloader))]
for epoch in range(args.epochs):
running_loss = running_energy = running_true_positive = 0.
for i, (x, labels) in enumerate(tqdm(trainloader, desc=f"Epoch {epoch}")):
x, labels = x.view(-1, 784).numpy(), labels.numpy()
h, y = states[i]
# Free phase
for j in range(20):
dh = d_rho(h) * (x @ W1 + y @ W2.T + b1) - h
dy = d_rho(y) * (h @ W2 + b2) - y
h = rho(h + epsilon * dh)
y = rho(y + epsilon * dy)
'''
energy = (np.square(h).sum() + np.square(y).sum() \
- (W1 * (x.T @ h)).sum() - (W2 * (h.T @ y)).sum()) / 2 \
- (h @ b1).sum() - (y @ b2).sum())
print(np.round(energy, 4), np.round(np.linalg.norm(dh), 4))
'''
h_free, y_free = np.copy(h), np.copy(y)
states[i] = h_free, y_free
t = np.zeros((x.shape[0], 10))
t[np.arange(t.shape[0]), labels] = 1
# Weakly clamped phase
for j in range(4):
dh = d_rho(h) * (x @ W1 + y @ W2.T + b1) - h
dy = d_rho(y) * (h @ W2 + b2) - y + beta * (t - y)
h = rho(h + epsilon * dh)
y = rho(y + epsilon * dy)
'''
energy = (np.square(h).sum() + np.square(y).sum() \
- (W1 * (x.T @ h)).sum() - (W2 * (h.T @ y)).sum()) / 2 \
- (h @ b1).sum() - (y @ b2).sum()
print(np.round(energy, 4), np.round(np.linalg.norm(dh), 4))
'''
h_clamped = np.copy(h)
y_clamped = np.copy(y)
W1 += alpha1 / beta * (rho(x.T) @ rho(h_clamped) - rho(x.T) @ rho(h_free)) / args.batch_size
W2 += alpha2 / beta * (rho(h_clamped.T) @ rho(y_clamped) - rho(h_free.T) @ rho(y_free)) / args.batch_size
b1 += alpha1 / beta * (rho(h_clamped) - rho(h_free)).mean(0)
b2 += alpha2 / beta * (rho(y_clamped) - rho(y_free)).mean(0)
running_energy += (np.square(h_free).sum() + np.square(y_free).sum() \
- (W1 * (x.T @ h_free)).sum() - (W2 * (h_free.T @ y_free)).sum()) / 2 \
- (h_free @ b1).sum() - (y_free @ b2).sum()
running_loss += np.square(t - y_free).sum()
running_true_positive += np.count_nonzero(np.argmax(y_free, 1) == labels)
energy_avg = running_energy / (len(trainloader) * args.batch_size)
accuracy_avg = running_true_positive / (len(trainloader) * args.batch_size)
loss_avg = running_loss / (len(trainloader) * args.batch_size)
print(f"Energy: {energy_avg}, Accuracy: {accuracy_avg}, Loss: {loss_avg}")
def rho(x):
return np.copy(np.clip(x, 0., 1.))
def d_rho(x):
return (x >= 0.) * (x <= 1.)
def get_loaders(batch_size, fashion=False):
mnist = torchvision.datasets.MNIST
if fashion:
mnist = torchvision.datasets.FashionMNIST
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),])
trainloader = th.utils.data.DataLoader(
mnist(root="./data", train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=2)
testloader = th.utils.data.DataLoader(
mnist(root="./data", train=False, download=True, transform=transform),
batch_size=batch_size,
shuffle=False,
num_workers=2)
return trainloader, testloader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--epochs", type=int, default=1000)
parser.add_argument("--batch-size", type=int, default=20)
parser.add_argument("--fashion", action="store_true", default=False)
args = parser.parse_args()
main(args)
|
[
"numpy.clip",
"numpy.copy",
"numpy.sqrt",
"argparse.ArgumentParser",
"tqdm.tqdm",
"numpy.argmax",
"numpy.square",
"numpy.zeros",
"numpy.random.uniform",
"torchvision.transforms.ToTensor",
"numpy.arange"
] |
[((398, 424), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (784 + 500))'], {}), '(2.0 / (784 + 500))\n', (405, 424), True, 'import numpy as np\n'), ((434, 470), 'numpy.random.uniform', 'np.random.uniform', (['(-a)', 'a', '(784, 500)'], {}), '(-a, a, (784, 500))\n', (451, 470), True, 'import numpy as np\n'), ((480, 509), 'numpy.random.uniform', 'np.random.uniform', (['(-a)', 'a', '(500)'], {}), '(-a, a, 500)\n', (497, 509), True, 'import numpy as np\n'), ((519, 544), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (500 + 10))'], {}), '(2.0 / (500 + 10))\n', (526, 544), True, 'import numpy as np\n'), ((554, 589), 'numpy.random.uniform', 'np.random.uniform', (['(-a)', 'a', '(500, 10)'], {}), '(-a, a, (500, 10))\n', (571, 589), True, 'import numpy as np\n'), ((599, 627), 'numpy.random.uniform', 'np.random.uniform', (['(-a)', 'a', '(10)'], {}), '(-a, a, 10)\n', (616, 627), True, 'import numpy as np\n'), ((4371, 4396), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4394, 4396), False, 'import argparse\n'), ((3583, 3603), 'numpy.clip', 'np.clip', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (3590, 3603), True, 'import numpy as np\n'), ((644, 693), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.0)', '(args.batch_size, 500)'], {}), '(0, 1.0, (args.batch_size, 500))\n', (661, 693), True, 'import numpy as np\n'), ((708, 756), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.0)', '(args.batch_size, 10)'], {}), '(0, 1.0, (args.batch_size, 10))\n', (725, 756), True, 'import numpy as np\n'), ((936, 976), 'tqdm.tqdm', 'tqdm', (['trainloader'], {'desc': 'f"""Epoch {epoch}"""'}), "(trainloader, desc=f'Epoch {epoch}')\n", (940, 976), False, 'from tqdm import tqdm\n'), ((1751, 1777), 'numpy.zeros', 'np.zeros', (['(x.shape[0], 10)'], {}), '((x.shape[0], 10))\n', (1759, 1777), True, 'import numpy as np\n'), ((2447, 2457), 'numpy.copy', 'np.copy', (['h'], {}), '(h)\n', (2454, 2457), True, 'import numpy as np\n'), ((2482, 2492), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (2489, 2492), True, 'import numpy as np\n'), ((3859, 3892), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (3890, 3892), False, 'import torchvision\n'), ((1672, 1682), 'numpy.copy', 'np.copy', (['h'], {}), '(h)\n', (1679, 1682), True, 'import numpy as np\n'), ((1684, 1694), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (1691, 1694), True, 'import numpy as np\n'), ((1792, 1813), 'numpy.arange', 'np.arange', (['t.shape[0]'], {}), '(t.shape[0])\n', (1801, 1813), True, 'import numpy as np\n'), ((3123, 3144), 'numpy.square', 'np.square', (['(t - y_free)'], {}), '(t - y_free)\n', (3132, 3144), True, 'import numpy as np\n'), ((3205, 3225), 'numpy.argmax', 'np.argmax', (['y_free', '(1)'], {}), '(y_free, 1)\n', (3214, 3225), True, 'import numpy as np\n'), ((2895, 2912), 'numpy.square', 'np.square', (['h_free'], {}), '(h_free)\n', (2904, 2912), True, 'import numpy as np\n'), ((2921, 2938), 'numpy.square', 'np.square', (['y_free'], {}), '(y_free)\n', (2930, 2938), True, 'import numpy as np\n')]
|
"""
File: examples/util/rectangular_binner.py
Author: <NAME>
Date: 22 Sep 2018
Description: Example script showing the use of the RectangularBinner class.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as pl
from pylinex import RectangularBinner
fontsize = 24
num_old_x_values = 1000
num_new_x_values = 20
wavelength = 0.4
old_x_values = np.linspace(-1, 1, num_old_x_values)[1:-1]
old_error = np.ones_like(old_x_values)
old_y_values =\
np.sin(2 * np.pi * old_x_values / wavelength) * np.sinh(old_x_values)
new_x_bin_edges = np.linspace(-1, 1, num_new_x_values + 1)
weights = np.ones_like(old_y_values)
binner = RectangularBinner(old_x_values, new_x_bin_edges)
new_x_values = binner.binned_x_values
(new_y_values, new_weights) = binner.bin(old_y_values, weights=weights,\
return_weights=True)
new_error = binner.bin_error(old_error, weights=weights, return_weights=False)
fig = pl.figure(figsize=(12,9))
ax = fig.add_subplot(111)
ax.plot(old_x_values, old_y_values, label='unbinned')
ax.plot(new_x_values, new_y_values, label='binned')
ax.legend(fontsize=fontsize)
ax.tick_params(labelsize=fontsize, width=2.5, length=7.5, which='major')
ax.tick_params(labelsize=fontsize, width=1.5, length=4.5, which='minor')
pl.show()
|
[
"numpy.ones_like",
"numpy.sinh",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.sin",
"pylinex.RectangularBinner",
"matplotlib.pyplot.show"
] |
[((431, 457), 'numpy.ones_like', 'np.ones_like', (['old_x_values'], {}), '(old_x_values)\n', (443, 457), True, 'import numpy as np\n'), ((566, 606), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(num_new_x_values + 1)'], {}), '(-1, 1, num_new_x_values + 1)\n', (577, 606), True, 'import numpy as np\n'), ((617, 643), 'numpy.ones_like', 'np.ones_like', (['old_y_values'], {}), '(old_y_values)\n', (629, 643), True, 'import numpy as np\n'), ((654, 702), 'pylinex.RectangularBinner', 'RectangularBinner', (['old_x_values', 'new_x_bin_edges'], {}), '(old_x_values, new_x_bin_edges)\n', (671, 702), False, 'from pylinex import RectangularBinner\n'), ((925, 951), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (934, 951), True, 'import matplotlib.pyplot as pl\n'), ((1259, 1268), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (1266, 1268), True, 'import matplotlib.pyplot as pl\n'), ((376, 412), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num_old_x_values'], {}), '(-1, 1, num_old_x_values)\n', (387, 412), True, 'import numpy as np\n'), ((478, 523), 'numpy.sin', 'np.sin', (['(2 * np.pi * old_x_values / wavelength)'], {}), '(2 * np.pi * old_x_values / wavelength)\n', (484, 523), True, 'import numpy as np\n'), ((526, 547), 'numpy.sinh', 'np.sinh', (['old_x_values'], {}), '(old_x_values)\n', (533, 547), True, 'import numpy as np\n')]
|
import math
import numpy as np
from scipy.special import expit, logit
import matplotlib.pyplot as plt
from mmur.viz import _set_plot_style
COLORS = _set_plot_style()
def plot_logstic_dgp(N=500, figsize=None):
"""Plot example of DGP as used in mmur.generators.LogisticGenerator.
Parameters
----------
N : int
number of points to generate in plot
figsize : tuple, default=None
figure passed to plt.subplots, default size is (12, 7)
Returns
-------
fig : matplotlib.figure.Figure
ax : matplotlib.axes._subplots.AxesSubplot
"""
betas = np.array((0.5, 1.2))
X = np.ones((N, 2))
X[:, 1] = np.random.uniform(-10., 10.1, size=N)
L = X.dot(betas)
gt_proba = expit(L)
proba_noisy = expit(L + np.random.normal(0, 0.5, size=N))
y = np.random.binomial(1, proba_noisy)
figsize = figsize or (12, 7)
fig, ax = plt.subplots(figsize=figsize)
sidx = np.argsort(X[:, 1])
x = X[sidx, 1]
ax.plot(x, gt_proba[sidx], label='true P', lw=2)
ax.scatter(x, proba_noisy[sidx], c='grey', marker='x', label='noisy P')
ax.scatter(x, y[sidx], c=COLORS[2], marker='x', s=50, label='y')
ax.legend(fontsize=14)
ax.set_ylabel('probability', fontsize=14)
ax.set_xlabel('X', fontsize=14)
ax.set_title('Logistic data generating process', fontsize=16)
return fig, ax
def plot_probas(
probas, ground_truth, n_sets=None, alt_label=None, axs=None
):
"""Plot sorted probabilities compared to ground truth probability.
Parameters
---------
probas : np.ndarray[float]
the classifier probabilities of shape (holdout_samples, n_sets)
ground_truth : np.ndarray[float]
ground truth probabilities, 1d array
n_sets : int, float, default=None
number of columns in proba to plot. If int it is interpreted as the
number of columns. If a float as a fraction of the columns. Default
is max(0.1 * probas.shape[1], 30)
alt_label : str, default=None
label for the source of probabilities, default is 'holdout'
axs : np.ndarray[matplotlib.axes._subplots.AxesSubplot], default=None
an array containing the axes to plot on, must be 1d and of length >= 2
Returns
-------
fig : matplotlib.figure.Figure, optional
the figure is returned when ``axs`` is None
axs : matplotlib.axes._subplots.AxesSubplot
the created or passed axes object
"""
if probas.ndim == 1:
probas = probas[:, None]
alt_label = alt_label or 'holdout'
if axs is None:
fig, axs = plt.subplots(figsize=(14, 7), nrows=1, ncols=2)
else:
fig = None
n_cols = probas.shape[1]
if isinstance(n_sets, int):
n_sets = max(n_cols, n_sets)
elif isinstance(n_sets, float):
n_sets = max(math.floor(n_sets * n_cols), n_cols)
else:
n_sets = max(math.floor(0.1 * probas.shape[1]), min(30, n_cols))
sorted_gt = np.sort(ground_truth)
xvals = logit(sorted_gt)
for i in range(n_sets - 1):
sarr = np.sort(probas[:, i])
axs[0].plot(xvals, sarr, c='grey', alpha=0.5)
axs[1].plot(sorted_gt, sarr, c='grey', alpha=0.5)
# plot outside loop for easier labelling
sarr = np.sort(probas[:, -1])
axs[0].plot(xvals, sarr, c='grey', alpha=0.5, label=alt_label)
axs[1].plot(sorted_gt, sarr, c='grey', alpha=0.5, label=alt_label)
# plot DGP
axs[0].plot(
xvals,
sorted_gt,
c='red',
ls='--',
lw=2,
zorder=10,
label='DGP',
)
axs[0].set_title('Probabilities', fontsize=18)
axs[0].set_ylabel('proba', fontsize=18)
axs[0].set_xlabel('DGP linear estimate', fontsize=18)
axs[0].tick_params(labelsize=16)
axs[0].legend(fontsize=18)
# plot DGP
axs[1].plot(
ground_truth,
ground_truth,
c='red',
ls='--',
lw=2,
zorder=10,
label='DGP'
)
axs[1].set_title('Q-Q ', fontsize=18)
axs[1].set_ylabel('proba -- ground truth', fontsize=18)
axs[1].set_xlabel('proba -- draws', fontsize=18)
axs[1].tick_params(labelsize=16)
axs[1].legend(fontsize=18)
if fig is not None:
fig.tight_layout()
return fig, axs
return axs
|
[
"numpy.random.normal",
"numpy.ones",
"math.floor",
"numpy.sort",
"scipy.special.expit",
"numpy.array",
"numpy.argsort",
"scipy.special.logit",
"mmur.viz._set_plot_style",
"numpy.random.uniform",
"matplotlib.pyplot.subplots",
"numpy.random.binomial"
] |
[((150, 167), 'mmur.viz._set_plot_style', '_set_plot_style', ([], {}), '()\n', (165, 167), False, 'from mmur.viz import _set_plot_style\n'), ((599, 619), 'numpy.array', 'np.array', (['(0.5, 1.2)'], {}), '((0.5, 1.2))\n', (607, 619), True, 'import numpy as np\n'), ((628, 643), 'numpy.ones', 'np.ones', (['(N, 2)'], {}), '((N, 2))\n', (635, 643), True, 'import numpy as np\n'), ((658, 696), 'numpy.random.uniform', 'np.random.uniform', (['(-10.0)', '(10.1)'], {'size': 'N'}), '(-10.0, 10.1, size=N)\n', (675, 696), True, 'import numpy as np\n'), ((732, 740), 'scipy.special.expit', 'expit', (['L'], {}), '(L)\n', (737, 740), False, 'from scipy.special import expit, logit\n'), ((811, 845), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'proba_noisy'], {}), '(1, proba_noisy)\n', (829, 845), True, 'import numpy as np\n'), ((894, 923), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (906, 923), True, 'import matplotlib.pyplot as plt\n'), ((935, 954), 'numpy.argsort', 'np.argsort', (['X[:, 1]'], {}), '(X[:, 1])\n', (945, 954), True, 'import numpy as np\n'), ((2953, 2974), 'numpy.sort', 'np.sort', (['ground_truth'], {}), '(ground_truth)\n', (2960, 2974), True, 'import numpy as np\n'), ((2987, 3003), 'scipy.special.logit', 'logit', (['sorted_gt'], {}), '(sorted_gt)\n', (2992, 3003), False, 'from scipy.special import expit, logit\n'), ((3243, 3265), 'numpy.sort', 'np.sort', (['probas[:, -1]'], {}), '(probas[:, -1])\n', (3250, 3265), True, 'import numpy as np\n'), ((2583, 2630), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(14, 7)', 'nrows': '(1)', 'ncols': '(2)'}), '(figsize=(14, 7), nrows=1, ncols=2)\n', (2595, 2630), True, 'import matplotlib.pyplot as plt\n'), ((3052, 3073), 'numpy.sort', 'np.sort', (['probas[:, i]'], {}), '(probas[:, i])\n', (3059, 3073), True, 'import numpy as np\n'), ((769, 801), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.5)'], {'size': 'N'}), '(0, 0.5, size=N)\n', (785, 801), True, 'import numpy as np\n'), ((2816, 2843), 'math.floor', 'math.floor', (['(n_sets * n_cols)'], {}), '(n_sets * n_cols)\n', (2826, 2843), False, 'import math\n'), ((2884, 2917), 'math.floor', 'math.floor', (['(0.1 * probas.shape[1])'], {}), '(0.1 * probas.shape[1])\n', (2894, 2917), False, 'import math\n')]
|
from __future__ import print_function
import sys
import os
import re
import numpy as np
import subprocess
from matplotlib import pyplot as plt
inputpath = os.path.join(os.path.realpath('..'),'INPUT/')
print("Initialising")
fig, ax = plt.subplots()
n=0
for filenum in ['INPUT/0.txt','INPUT/1.txt','INPUT/2.txt']:
os.rename(filenum, 'INPUT/equilibrium.map')
subprocess.call(["csphoenix"])
os.rename('INPUT/equilibrium.map', filenum)
n_variable = 8
n_multiplier = n_variable * 8
omegafile = 'OUTPUT/omega_csp'
omega_min = -2.0
omega_max = 2.0
gamma_min = -0.1
gamma_max = 0.1
with open(omegafile, 'r') as f:
line = f.readline()
[m, nr] = map(int, line.split())
print('M = ', m)
print('NR = ', nr)
n_output = m * n_multiplier * nr
r = np.zeros(n_output)
q = np.zeros(n_output)
gamma = np.zeros(n_output)
omega = np.zeros(n_output)
i = 0
for line in f:
[rf, qf, omegaf, gammaf] = map(float, line.split())
#print(rf, qf, gammaf, omegaf)
r[i] = rf
q[i] = qf
gamma[i] = gammaf
omega[i] = omegaf
i = i + 1
f.close()
plt.scatter(r, omega, s=0.5, marker='x', label='flow='+str(n))
n=n+1
inner = 0.0
outer = 1.0
## NAME THE OUTPUT FILES
plt.xlim([np.min(r),np.max(r)])
plt.xlabel('s')
plt.ylim([omega_min,omega_max])
plt.ylabel('$\omega / \omega_{A0}$')
ax.legend()
plt.title('Continuous Spectrum Frequency')
plt.figure()
plt.show()
#inner = 0.0
#outer = 1.0
## NAME THE OUTPUT FILES
#plt.xlim([np.min(r),np.max(r)])
#plt.xlabel('s')
#plt.ylim([omega_min,omega_max])
#plt.ylabel('$\omega / \omega_{A0}$')
#ax.legend()
#plt.title('Continuous Spectrum Frequency')
#plt.savefig("/SecondDisk/PHOENIX_RUNS/NSTX/OVERPLOTnumeric012.png")
#print("Frequency continuum plot done")
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"os.rename",
"matplotlib.pyplot.xlabel",
"numpy.max",
"os.path.realpath",
"matplotlib.pyplot.figure",
"numpy.zeros",
"subprocess.call",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((234, 248), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (246, 248), True, 'from matplotlib import pyplot as plt\n'), ((1454, 1464), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1462, 1464), True, 'from matplotlib import pyplot as plt\n'), ((169, 191), 'os.path.realpath', 'os.path.realpath', (['""".."""'], {}), "('..')\n", (185, 191), False, 'import os\n'), ((314, 357), 'os.rename', 'os.rename', (['filenum', '"""INPUT/equilibrium.map"""'], {}), "(filenum, 'INPUT/equilibrium.map')\n", (323, 357), False, 'import os\n'), ((359, 389), 'subprocess.call', 'subprocess.call', (["['csphoenix']"], {}), "(['csphoenix'])\n", (374, 389), False, 'import subprocess\n'), ((391, 434), 'os.rename', 'os.rename', (['"""INPUT/equilibrium.map"""', 'filenum'], {}), "('INPUT/equilibrium.map', filenum)\n", (400, 434), False, 'import os\n'), ((1296, 1311), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""s"""'], {}), "('s')\n", (1306, 1311), True, 'from matplotlib import pyplot as plt\n'), ((1313, 1345), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[omega_min, omega_max]'], {}), '([omega_min, omega_max])\n', (1321, 1345), True, 'from matplotlib import pyplot as plt\n'), ((1346, 1384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\omega / \\\\omega_{A0}$"""'], {}), "('$\\\\omega / \\\\omega_{A0}$')\n", (1356, 1384), True, 'from matplotlib import pyplot as plt\n'), ((1397, 1439), 'matplotlib.pyplot.title', 'plt.title', (['"""Continuous Spectrum Frequency"""'], {}), "('Continuous Spectrum Frequency')\n", (1406, 1439), True, 'from matplotlib import pyplot as plt\n'), ((1441, 1453), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1451, 1453), True, 'from matplotlib import pyplot as plt\n'), ((775, 793), 'numpy.zeros', 'np.zeros', (['n_output'], {}), '(n_output)\n', (783, 793), True, 'import numpy as np\n'), ((803, 821), 'numpy.zeros', 'np.zeros', (['n_output'], {}), '(n_output)\n', (811, 821), True, 'import numpy as np\n'), ((835, 853), 'numpy.zeros', 'np.zeros', (['n_output'], {}), '(n_output)\n', (843, 853), True, 'import numpy as np\n'), ((867, 885), 'numpy.zeros', 'np.zeros', (['n_output'], {}), '(n_output)\n', (875, 885), True, 'import numpy as np\n'), ((1273, 1282), 'numpy.min', 'np.min', (['r'], {}), '(r)\n', (1279, 1282), True, 'import numpy as np\n'), ((1283, 1292), 'numpy.max', 'np.max', (['r'], {}), '(r)\n', (1289, 1292), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 21 20:00:50 2020
@author: takada
"""
import logging
import numpy as np
import functools
import operator
from typing import List, Dict, Callable
import time
import nidaqmx
from nidaqmx.stream_writers import (
DigitalSingleChannelWriter, AnalogMultiChannelWriter)
from qcodes import Instrument, VisaInstrument, validators as vals
from qcodes.instrument.channel import InstrumentChannel
from qcodes.instrument.parameter import ArrayParameter, Parameter
from qcodes.dataset.sqlite.database import connect
from qcodes.dataset.sqlite.queries import get_last_run
from qcodes.dataset.data_set import load_by_id
log = logging.getLogger(__name__)
class NI6733_ao_voltage_trace(ArrayParameter):
def __init__(self, name:str, instrument: InstrumentChannel,
channum: int) -> None:
"""
This voltage trace parameter is attached to a channel of the analog output.
Parameters
----------
name : str
Name of the trace.
instrument : InstrumentChannel
Instrument channel, where the trace is attached.
channum : int
Integer number of the channel, where the trace is attached.
Returns
-------
None
DESCRIPTION.
"""
super().__init__(name=name,
shape=(1,),
label = 'voltage',
unit='V',
setpoint_names=('Count',),
setpoint_labels=('Count',),
setpoint_units=('pts',),
setpoints = None,
docstring='Holds analog output trace')
self.channum = channum
self._instrument = instrument
def get_raw(self):
pass
class NI6733_ao_voltage_channel(InstrumentChannel):
def __init__(self, parent: Instrument, name:str,
slot_num:int, channum: int, min_val:float=-10.0,
fast_sequence:bool=False, fast_sequence_delta:float = -0.1,
max_val:float= 10.0) -> None:
"""
Parameters
----------
parent : Instrument
Host instrument handler
name : str
Given name of the channel
slot_num : int
Slot number of the channel
channum : int
Channel number
min_val : float, optional
Minimum value of the channel voltage value. The default is -10.0.
max_val : float, optional
Maximum value of the channel voltage value. The default is 10.0.
fast_sequence : bool, optional
Whether this dac is used for fast sequence or not.
fast_sequence_delta: float
How far the voltage is moved by the fast sequence from its original position.
Returns
-------
None
DESCRIPTION.
"""
super().__init__(parent, name)
self.instrument = parent
self.slot_num = slot_num
self.channum = channum
self._min_val = min_val
self._max_val = max_val
self._current_val = 0.0
self._target_val = None
self._fast_sequence = fast_sequence
self._fast_sequence_delta = fast_sequence_delta
self.add_parameter('min_val',
label = 'Minimum value',
unit = 'V',
get_cmd=self.get_min_val,
set_cmd=self.set_min_val,
vals = vals.Numbers(-10.0, 10.0)
)
self.add_parameter('max_val',
label = 'Maximum value',
unit = 'V',
get_cmd=self.get_max_val,
set_cmd=self.set_max_val,
vals = vals.Numbers(-10.0, 10.0)
)
self.add_parameter('cv',
label = 'Current value',
unit = 'V',
get_cmd=self.get_current_val,
set_cmd=self.set_current_val,
vals = vals.Numbers(-5.0, 5.0)
)
self.add_parameter('fs',
label='fast sequence',
get_cmd = self.get_fast_sequence,
set_cmd = self.set_fast_sequence,
)
self.add_parameter('fs_delta',
label = 'fast sequence delta',
unit = 'V',
get_cmd = self.get_fast_sequence_delta,
set_cmd = self.set_fast_sequence_delta,
vals = vals.Numbers(-1.0, 1.0)
)
def get_min_val(self):
return self._min_val
def set_min_val(self, val:float):
self._min_val = val
def get_max_val(self):
return self._max_val
def set_max_val(self, val:float):
self._max_val = val
def get_current_val(self):
return self._current_val
def set_current_val(self, val:float):
self._target_val = val
def get_fast_sequence(self):
return self._fast_sequence
def set_fast_sequence(self, val:bool):
self._fast_sequence = val
self.instrument._fs_ready = False
def get_fast_sequence_delta(self):
return self._fast_sequence_delta
def set_fast_sequence_delta(self, val:float):
self._fast_sequence_delta = val
self.instrument._fs_ready = False
class NI6733(Instrument):
def __init__(self, name:str, device_name:str = 'PXI2',
slots:List[int]=[3,4,], ms2wait:float = 2.0,
fast_sequence_divider:float = 2.0, fs_pts:int = 101,
**kwargs):
"""
This is the qcodes driver for NI6733 16 bit Analog Output.
Args:
name (str): Given name of the DAC
device_name (str): Name of the PXI device. Default value is 'PXI2'.
slots(List[int]): List of DAC slots. Each slot has 8 DAC channels.
ms2wait (float): Wait time between minimum resolution DAC movement in [ms].
fast_sequence_divider (float): Time between fast sequence movement in [ms].
fs_pts (int): Length of the fast sequence.
"""
super().__init__(name, **kwargs)
self.device_name = device_name
self.slots = slots
self._ms2wait = ms2wait
self._fast_sequence_divider = fast_sequence_divider
self._fs_pts = fs_pts
self._fs_ready = False
self._fast_move_slot_list = list()
self._fast_move_channel_list = dict()
self._fast_move_list = dict()
self._move_points = None
self.write_task = dict()
self.fast_seq_task = dict()
for slot in self.slots:
self.write_task[slot] = nidaqmx.Task()
self.write_task['{:d}'.format(slot)] = False
self.fast_seq_task[slot] = nidaqmx.Task()
self.fast_seq_task['{:d}'.format(slot)] = False
self.ctr_task = nidaqmx.Task()
self.ctr_task_isClosed = False
self.do_task = nidaqmx.Task()
self.do_task_isClosed = False
self.add_parameter('ms2wait',
label = 'ms to wait',
unit = 'ms',
get_cmd = self.get_ms2wait,
set_cmd = self.set_ms2wait,
vals = vals.Numbers(0.0, 100.0))
self.add_parameter('fs_div',
label = 'fast sequence divider',
unit = 'ms',
get_cmd = self.get_fast_sequence_divider,
set_cmd = self.set_fast_sequence_divider,
vals = vals.Numbers(0.0, 100.0))
self.add_parameter('fs_pts',
label = 'fast sequence size',
unit = 'pts',
get_cmd = self.get_fs_pts,
set_cmd = self.set_fs_pts,
vals = vals.Ints(2, 100000)
)
######################
# Add channels to the instrument
for slot in self.slots:
for i in range(8):
chan = NI6733_ao_voltage_channel(self,
'analog_output_s{:d}c{:d}'.format(slot, i),
slot_num = slot,
channum = i)
self.add_submodule('s{:d}c{:d}'.format(slot, i), chan)
###########################
# Function for parameters
###########################
def get_ms2wait(self):
return self._ms2wait
def set_ms2wait(self, val:float):
self._ms2wait = val
def get_fast_sequence_divider(self):
return self._fast_sequence_divider
def set_fast_sequence_divider(self, val:float):
self._fast_sequence_divider = val
self._fs_ready = False
def get_fs_pts(self):
return self._fs_pts
def set_fs_pts(self, val:int):
self._fs_pts = val
self._fs_ready = False
###########################
# Utility functions
###########################
def move_all_dac(self, v:float = 0.0):
"""
Move all the dac to the given value.
Scaling factor for each dac is not applied in this operation.
Parameters
----------
v : float, optional
Target voltage in volt. The default is 0.0.
Returns
-------
None.
"""
for s in self.slots:
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(s, i))
chan._target_val = v
self.DAC_move()
def init2zero(self):
"""
Initialise all the DAC values to be 0.0 V after moving once to -10 mV.
"""
self.move_all_dac()(-0.01)
self.move_all_dac()(0.0)
def load_current_values_from_database(self,
db_path:str = './experiments.db',
run_id:int = None,
):
"""
Load current DAC values from the specified database and run_id.
If run_id is not given, we load from the latest run_id.
Args:
db_path (str): Path to the database.
run_id (int): run_id of the recovered run.
"""
# Connect to the database
conn = connect(db_path)
if run_id == None:
# Get last run id
run_id = get_last_run(conn)
# Load dataset
dataset = load_by_id(run_id)
# Whether return to initial sweep position after the measurment or not
return2initial = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['return2initial']['value']
# Collect information from sweeping parameters
data = dataset.get_parameter_data()
data_dict = dict()
for key in data.keys():
d = data[key]
for k in d.keys():
if not k in data_dict.keys():
data_dict[k] = d[k]
# Check whether measurement was complelted or not from data size
ar_size = d[k].size
fast_sweep = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['fast_sweep']['value']
sweep_dims = dataset.snapshot['station']['instruments']['measurement_information']['parameters']['sweep_dims']['value']
if fast_sweep:
first_dim_size = dataset.snapshot['station']['instruments'][self.name]['parameters']['fs_pts']['value']
else:
first_dim_size = 1
total_pts = int(functools.reduce(operator.mul, sweep_dims, 1) * first_dim_size)
if not ar_size == total_pts:
completed = False
else:
completed = True
# Set current value of each dac from static values
for sm in dataset.snapshot['station']['instruments'][self.name]['submodules'].keys():
# Get raw value of each dac
cv = dataset.snapshot['station']['instruments'][self.name]['submodules'][sm]['parameters']['cv']['raw_value']
chan = getattr(self, sm)
sm_fullname = dataset.snapshot['station']['instruments'][self.name]['submodules'][sm]['parameters']['cv']['full_name']
if sm_fullname in data_dict.keys():
if return2initial and completed:
cv = data_dict[sm_fullname][0]
else:
cv = data_dict[sm_fullname][-1]
chan._current_val = cv
conn.close()
def init_tasks(self):
"""
Close all the task, which is opend. Then open it again.
"""
if not self.do_task_isClosed:
self.do_task.close()
self.do_task = nidaqmx.Task()
if not self.ctr_task_isClosed:
self.ctr_task.close()
self.ctr_task = nidaqmx.Task()
for slot in self.slots:
if not self.write_task['{:d}'.format(slot)]:
self.write_task[slot].close()
self.write_task[slot] = nidaqmx.Task()
if not self.fast_seq_task['{:d}'.format(slot)]:
self.fast_seq_task[slot].close()
self.fast_seq_task[slot] = nidaqmx.Task()
###################################
# Base functions for voltage output
###################################
def ctr_setup(self,
task:nidaqmx.Task = None,
slot_num:int = 3,
no_of_samples:int = None,
trigger_delay:int = 0.0,
):
"""
This function setup a counter output for the counter 0 for the given slot.
Args:
task(nidaqmx.Task): Task counter is set.
slot_num(int): Slot number of the trigger out
no_of_samples (int): Number of trigger generated. If it is None, a trigger is generated continuously.
trigger_delay (int): Delay of the counter in seconds.
"""
# Create counter output channel
task.co_channels.add_co_pulse_chan_freq('{}Slot{:d}/ctr0'.format(self.device_name, slot_num),
units = nidaqmx.constants.FrequencyUnits.HZ,
idle_state = nidaqmx.constants.Level.LOW,
initial_delay = trigger_delay,
freq = 1000.0/self._fast_sequence_divider,
duty_cycle = 0.5,
)
# Set sample generation mode and number of samples to be generated.
# Comment: Incrase 'samps_per_chan' by 3 since some trigger is missed by analog output.
task.timing.cfg_implicit_timing(samps_per_chan = no_of_samples+3,
sample_mode = nidaqmx.constants.AcquisitionType.FINITE)
def do_setup(self,
task:nidaqmx.Task = None,
slot_num:int = 3,
port_num:int = 0,
line_num:int = 0,
initial_delay:int = 1,
trigger_length:int = 2,
sample_clk_src:str = '/PXI2Slot3/Ctr0InternalOutput',
):
"""
This function setup digital output task used to trigger ADC.
Parameters
----------
task : nidaqmx.Task, optional
task, where the digital output channel is set.
slot_num : int, optional
Slot number. The default is 3.
port_num : int, optional
Port number of digital output. The default is 0.
line_num : int, optional
Line number of digital output. The default is 0.
initial_delay : int, optional
Initial delay of the generated start trigger in a unit of a clock. The default is 1.
trigger_length : int, optional
Length of the trigger in a unit of a clock sample. The default is 2.
sample_clk_src : str, optional
Sample clock source. The default is '/PXI2Slot3/Ctr0InternalOutput'.
: TYPE
DESCRIPTION.
Returns
-------
None.
"""
# Calculate number of points for the trigger
points = initial_delay + trigger_length + 10
# Create digital output channel
task.do_channels.add_do_chan(lines = '{}Slot{:d}/port{:d}/line{:d}'.format(self.device_name, slot_num, port_num, line_num))
# Setup timing
task.timing.cfg_samp_clk_timing(rate = 100000,
source = sample_clk_src,
active_edge=nidaqmx.constants.Edge.RISING,
sample_mode=nidaqmx.constants.AcquisitionType.FINITE,
samps_per_chan = points
)
# Write array information of the pulse
writer = DigitalSingleChannelWriter(task.out_stream)
ar = np.zeros((points,), dtype=np.uint8)
ar[initial_delay:initial_delay+trigger_length] = 2 ** line_num
writer.write_many_sample_port_byte(ar)
def set_sample_clock(self,
task:nidaqmx.Task = None,
no_of_samples:int=None,
sample_rate:float=500.0,
sample_clk_src:str=None,
):
"""
This function setup the sample clock timing.
Parameters
----------
task : nidaqmx.Task, optional
task, where the sample clock to be set.
no_of_samples : int, optional
Number of samples (data points) to be generated. If it is None, clock mode becomes
continuous.
sample_rate : float, optional
Sampling rate in Hz. The default is 500.0 Hz.
samle_clk_src : str, optional
Sample clock source. We can set extra source. If it is None,
we use a default onboard clock.
Returns
-------
None.
"""
if sample_clk_src == None:
sample_clk_src = 'OnboardClock'
task.timing.cfg_samp_clk_timing(sample_rate,
source = sample_clk_src,
active_edge=nidaqmx.constants.Edge.RISING,
sample_mode=nidaqmx.constants.AcquisitionType.FINITE,
samps_per_chan = no_of_samples)
def DAC_move(self,
task_preparation:bool=True,
clear_task:bool=True):
"""
This function moves the DAC values, whose target value is changed.
Args:
task_preparation (bool): Whether prepare analog output and sample clock to the task.
clear_task (bool): Whether we clear the task after the movement or not.
"""
move_slot_list = list()
move_channel_list = dict()
move_list = dict()
largest_move = 0.0
for slot in self.slots:
move_channel_list[slot] = list()
move_list[slot] = list()
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(slot, i))
if not chan._target_val == None:
move_channel_list[slot].append(chan)
move_slot_list.append(slot)
cv = chan._current_val # Current DAC value
tv = chan._target_val # Target DAC value
move_list[slot].append((cv, tv)) # Keep the value
delta = abs(tv - cv) # Size of the movement
if delta > largest_move:
# Check largest movement to determine number of points.
largest_move = delta
# Convert move_slot_list to set
move_slot_list = set(move_slot_list)
# Calculate points
points = max(2, int((largest_move/(20/2.0**16)//2.0)*2.0))
# Keep points and re-define task when it changes
if not self._move_points == points:
self._move_points = points
task_preparation = True
# Create array for movement
ar = dict()
for slot in move_slot_list:
ar_list = list()
for v in move_list[slot]:
ar_list.append(np.linspace(v[0],v[1], self._move_points,dtype=float))
ar[slot] = np.vstack(tuple(ar_list))
if task_preparation:
# Clear task (It takes a few ms.)
for slot in move_slot_list:
if not self.write_task['{:d}'.format(slot)]:
self.write_task[slot].close()
self.write_task[slot] = nidaqmx.Task()
self.write_task['{:d}'.format(slot)] = False
# Create analog output channel in the task
for chan in move_channel_list[slot]:
self.write_task[slot].ao_channels.add_ao_voltage_chan(physical_channel = '{}Slot{:d}/ao{:d}'.format(self.device_name, chan.slot_num, chan.channum),
min_val = chan.min_val(),
max_val = chan.max_val(),
units = nidaqmx.constants.VoltageUnits.VOLTS)
# Setup sample clock
self.set_sample_clock(task = self.write_task[slot],
no_of_samples = self._move_points,
sample_rate = 1000.0/self.ms2wait(),
sample_clk_src = None,)
writer = dict()
for slot in move_slot_list:
# Output voltage
writer[slot] = AnalogMultiChannelWriter(self.write_task[slot].out_stream)
writer[slot].write_many_sample(ar[slot])
for slot in move_slot_list:
self.write_task[slot].start()
for slot in move_slot_list:
self.write_task[slot].wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.write_task[slot].stop()
if clear_task:
# Clear task (It takes a few ms.)
for slot in move_slot_list:
self.write_task[slot].close()
self.write_task['{:d}'.format(slot)] = True
# Update information for the moved channels
for slot in move_slot_list:
for chan in move_channel_list[slot]:
chan._current_val = chan._target_val
chan._target_val = None
def prepare_fast_move(self):
"""
This function prepare the task for fast movement.
"""
self._fast_move_slot_list = list()
self._fast_move_channel_list = dict()
self._fast_move_list = dict()
for slot in self.slots:
self._fast_move_channel_list[slot] = list()
self._fast_move_list[slot] = list()
for i in range(8):
chan = getattr(self, 's{:d}c{:d}'.format(slot, i))
if chan.fs():
self._fast_move_slot_list.append(slot)
self._fast_move_channel_list[slot].append(chan)
v0 = chan._current_val
v1 = chan._current_val + chan._fast_sequence_delta
self._fast_move_list[slot].append((v0, v1))
# Convert fast_move_slot_list to set.
self._fast_move_slot_list = set(self._fast_move_slot_list)
# Clear the counter task
if not self.ctr_task_isClosed:
self.ctr_task.close()
self.ctr_task = nidaqmx.Task()
self.ctr_task_isClosed = False
# Setup counter
self.ctr_setup(task = self.ctr_task,
slot_num = self.slots[0],
no_of_samples = self.fs_pts(),
trigger_delay = 0.0,
)
# Clear the digital out task
if not self.do_task_isClosed:
self.do_task.close()
self.do_task = nidaqmx.Task()
self.do_task_isClosed = False
# Setup digital output
self.do_setup(task = self.do_task,
slot_num = self.slots[0],
port_num = 0,
line_num = 0,
initial_delay = 0,
trigger_length = 1,
sample_clk_src = '/{}Slot{:d}/Ctr0InternalOutput'.format(self.device_name, self.slots[0]),
)
self._fs_ready = True
def DAC_fast_move(self):
"""
This function makes fast sequence of the DAC.
--> This function gets a problem when we use in a QuCoDeS. It is not possible
to use DAC_move task and DAC_fast move task at the same time.
"""
if not self._fs_ready:
raise ValueError('Fase sequence is not ready. Please perform "prepare_fast_move".')
# Number of array points has to be even. I adjust for that.
if int(self.fs_pts()%2) == 0:
points = self.fs_pts()+1
else:
points = self.fs_pts()
# Set up channels
for slot in self._fast_move_slot_list:
# Define fast sequence task
if not self.fast_seq_task['{:d}'.format(slot)]:
self.fast_seq_task[slot].close()
self.fast_seq_task[slot] = nidaqmx.Task()
self.fast_seq_task['{:d}'.format(slot)] = False
# Create analog output channel in the task
for chan in self._fast_move_channel_list[slot]:
self.fast_seq_task[slot].ao_channels.add_ao_voltage_chan(physical_channel = '{}Slot{:d}/ao{:d}'.format(self.device_name, chan.slot_num, chan.channum),
min_val = chan.min_val(),
max_val = chan.max_val(),
units = nidaqmx.constants.VoltageUnits.VOLTS)
# Setup sample clock
self.set_sample_clock(task = self.fast_seq_task[slot],
no_of_samples=points+1,
sample_rate=1000.0/self._fast_sequence_divider,
sample_clk_src='/{}Slot{:d}/Ctr0InternalOutput'.format(self.device_name, self.slots[0]),)
ar_dict = dict()
writer = dict()
for slot in self._fast_move_slot_list:
# Create array for fast movement
ar_list = list()
for chan in self._fast_move_channel_list[slot]:
v0 = chan._current_val
v1 = chan._current_val + chan._fast_sequence_delta
ar = np.empty((points+1,), dtype=float)
ar[0:self.fs_pts()] = np.linspace(v0, v1, self.fs_pts(), dtype=float)
ar[self.fs_pts()] = v0
if int(self.fs_pts()%2) == 0:
ar[self.fs_pts()+1] = v0
ar_list.append(ar)
ar_dict[slot] = np.vstack(tuple(ar_list))
# Output voltage
writer[slot] = AnalogMultiChannelWriter(self.fast_seq_task[slot].out_stream)
writer[slot].write_many_sample(ar_dict[slot])
for slot in self._fast_move_slot_list:
self.fast_seq_task[slot].start()
self.do_task.start()
self.ctr_task.start()
for slot in self._fast_move_slot_list:
self.fast_seq_task[slot].wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.fast_seq_task[slot].stop()
self.fast_seq_task[slot].close()
self.fast_seq_task['{:d}'.format(slot)] = True
self.do_task.wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.do_task.stop()
self.ctr_task.wait_until_done(timeout=nidaqmx.constants.WAIT_INFINITELY)
self.ctr_task.stop()
if __name__ == '__main__':
t = time.time()
dac = NI6733(name = 'dac',
device_name = 'PXI2',
slots=[3,4,],
ms2wait = 2.0,
fast_sequence_divider = 2.0,
fs_pts = 201,
)
# # DAC movement test
# dac.s3c0.cv(-0.1)
# dac.s4c0.cv(-0.3)
# dac.DAC_move(task_preparation = True,
# clear_task = False)
# dac.s3c0.cv(-0.3)
# dac.s4c0.cv(-0.5)
# dac.DAC_move(task_preparation = False,
# clear_task = False)
# dac.s3c0.cv(-0.5)
# dac.s4c0.cv(-0.7)
# dac.DAC_move(task_preparation = False,
# clear_task = False)
# dac.s3c0.cv(0.0)
# dac.s4c0.cv(0.0)
# dac.DAC_move(task_preparation = False,
# clear_task = True)
# # Trigger test
# dac.ctr_setup(slot_num = 3,
# no_of_samples = 20,
# trigger_delay = 0.1)
# dac.ctr_task.start()
# dac.ctr_task.wait_until_done()
# # time.sleep(5)
# dac.ctr_task.stop()
# Fast sequence test
dac.fs_pts(201)
dac.fs_div(2.0)
dac.s3c0.fs(True)
dac.s3c0.fs_delta(-1.0)
dac.prepare_fast_move()
dac.DAC_fast_move()
print('Execution time {:f}'.format(time.time() - t))
|
[
"logging.getLogger",
"qcodes.dataset.sqlite.queries.get_last_run",
"functools.reduce",
"nidaqmx.Task",
"qcodes.dataset.sqlite.database.connect",
"qcodes.validators.Numbers",
"numpy.zeros",
"numpy.linspace",
"qcodes.validators.Ints",
"numpy.empty",
"qcodes.dataset.data_set.load_by_id",
"nidaqmx.stream_writers.DigitalSingleChannelWriter",
"nidaqmx.stream_writers.AnalogMultiChannelWriter",
"time.time"
] |
[((664, 691), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (681, 691), False, 'import logging\n'), ((29407, 29418), 'time.time', 'time.time', ([], {}), '()\n', (29416, 29418), False, 'import time\n'), ((7386, 7400), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (7398, 7400), False, 'import nidaqmx\n'), ((7463, 7477), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (7475, 7477), False, 'import nidaqmx\n'), ((11022, 11038), 'qcodes.dataset.sqlite.database.connect', 'connect', (['db_path'], {}), '(db_path)\n', (11029, 11038), False, 'from qcodes.dataset.sqlite.database import connect\n'), ((11190, 11208), 'qcodes.dataset.data_set.load_by_id', 'load_by_id', (['run_id'], {}), '(run_id)\n', (11200, 11208), False, 'from qcodes.dataset.data_set import load_by_id\n'), ((17911, 17954), 'nidaqmx.stream_writers.DigitalSingleChannelWriter', 'DigitalSingleChannelWriter', (['task.out_stream'], {}), '(task.out_stream)\n', (17937, 17954), False, 'from nidaqmx.stream_writers import DigitalSingleChannelWriter, AnalogMultiChannelWriter\n'), ((17968, 18003), 'numpy.zeros', 'np.zeros', (['(points,)'], {'dtype': 'np.uint8'}), '((points,), dtype=np.uint8)\n', (17976, 18003), True, 'import numpy as np\n'), ((24912, 24926), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (24924, 24926), False, 'import nidaqmx\n'), ((25347, 25361), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (25359, 25361), False, 'import nidaqmx\n'), ((7176, 7190), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (7188, 7190), False, 'import nidaqmx\n'), ((7287, 7301), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (7299, 7301), False, 'import nidaqmx\n'), ((11117, 11135), 'qcodes.dataset.sqlite.queries.get_last_run', 'get_last_run', (['conn'], {}), '(conn)\n', (11129, 11135), False, 'from qcodes.dataset.sqlite.queries import get_last_run\n'), ((13515, 13529), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (13527, 13529), False, 'import nidaqmx\n'), ((13644, 13658), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (13656, 13658), False, 'import nidaqmx\n'), ((22939, 22997), 'nidaqmx.stream_writers.AnalogMultiChannelWriter', 'AnalogMultiChannelWriter', (['self.write_task[slot].out_stream'], {}), '(self.write_task[slot].out_stream)\n', (22963, 22997), False, 'from nidaqmx.stream_writers import DigitalSingleChannelWriter, AnalogMultiChannelWriter\n'), ((26728, 26742), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (26740, 26742), False, 'import nidaqmx\n'), ((28549, 28610), 'nidaqmx.stream_writers.AnalogMultiChannelWriter', 'AnalogMultiChannelWriter', (['self.fast_seq_task[slot].out_stream'], {}), '(self.fast_seq_task[slot].out_stream)\n', (28573, 28610), False, 'from nidaqmx.stream_writers import DigitalSingleChannelWriter, AnalogMultiChannelWriter\n'), ((3624, 3649), 'qcodes.validators.Numbers', 'vals.Numbers', (['(-10.0)', '(10.0)'], {}), '(-10.0, 10.0)\n', (3636, 3649), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((3957, 3982), 'qcodes.validators.Numbers', 'vals.Numbers', (['(-10.0)', '(10.0)'], {}), '(-10.0, 10.0)\n', (3969, 3982), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((4293, 4316), 'qcodes.validators.Numbers', 'vals.Numbers', (['(-5.0)', '(5.0)'], {}), '(-5.0, 5.0)\n', (4305, 4316), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((4902, 4925), 'qcodes.validators.Numbers', 'vals.Numbers', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (4914, 4925), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((7796, 7820), 'qcodes.validators.Numbers', 'vals.Numbers', (['(0.0)', '(100.0)'], {}), '(0.0, 100.0)\n', (7808, 7820), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((8140, 8164), 'qcodes.validators.Numbers', 'vals.Numbers', (['(0.0)', '(100.0)'], {}), '(0.0, 100.0)\n', (8152, 8164), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((8452, 8472), 'qcodes.validators.Ints', 'vals.Ints', (['(2)', '(100000)'], {}), '(2, 100000)\n', (8461, 8472), True, 'from qcodes import Instrument, VisaInstrument, validators as vals\n'), ((12299, 12344), 'functools.reduce', 'functools.reduce', (['operator.mul', 'sweep_dims', '(1)'], {}), '(operator.mul, sweep_dims, 1)\n', (12315, 12344), False, 'import functools\n'), ((13847, 13861), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (13859, 13861), False, 'import nidaqmx\n'), ((14031, 14045), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (14043, 14045), False, 'import nidaqmx\n'), ((21828, 21842), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (21840, 21842), False, 'import nidaqmx\n'), ((28140, 28176), 'numpy.empty', 'np.empty', (['(points + 1,)'], {'dtype': 'float'}), '((points + 1,), dtype=float)\n', (28148, 28176), True, 'import numpy as np\n'), ((30680, 30691), 'time.time', 'time.time', ([], {}), '()\n', (30689, 30691), False, 'import time\n'), ((21449, 21504), 'numpy.linspace', 'np.linspace', (['v[0]', 'v[1]', 'self._move_points'], {'dtype': 'float'}), '(v[0], v[1], self._move_points, dtype=float)\n', (21460, 21504), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
###########################################################################
# Active Inference algorithm
#
# Execute the AI algorithm using the data from the
# /filter/y_coloured_noise topic and publish the results to the
# /filter/ai/output topic.
# Note that only the filtering part of the AI algorithm is implemented yet.
#
# Author: <NAME>, TU Delft
# Last modified: 17.11.2019
#
###########################################################################
#Import all necessary packages
import rospy #needed to be able to program in Python
import numpy as np #needed to be able to work with numpy
import time #needed to be able to get the execution time of code parts
from scipy.linalg import toeplitz #needed to create derivative matrix in general way
from scipy.linalg import block_diag #needed to create the block-diagonal PI matrix
from jackal_active_inference_versus_kalman_filter.msg import gazebo_model_states_noise #needed to read the custom output messages gazebo_model_states_noise
from jackal_active_inference_versus_kalman_filter.msg import filt_output #needed to publish the custom output messages filt_output resulting from the filtering methods
#TODO:
#-finish the implementation with a correct usage of the learning rate, precision matrices and prior
#-implement the update rule for the next control input
#-extend the algorithm to work on all system model states
#-use IMU data in case of experiment with Jackal robot
#Active Inference class
#-------------------------------------------------------------------
class AI(object):
"""Class providing all AI functionality:
- initialization of all necessary matrices
- compute belief mu
- compute control action u"""
def __init__(self, n_states, n_inputs, n_outputs, p, x_ref):
super(AI, self).__init__()
#Input processing
self.p = p
#Indicating the first time AI function is called
self.first_time = True
#System dimensions
self.n_states = n_states
self.n_inputs = n_inputs
self.n_outputs = n_outputs
#Initial states
self.x_0 = np.matrix(np.zeros(shape = (self.n_states, 1)))
self.mu_0 = np.matrix(np.zeros(shape = ((1 + self.p) * self.n_states, 1)))
self.mu = self.mu_0
self.mu_dot = np.matrix(np.zeros(shape = ((1 + self.p) * self.n_states, 1)))
#Initial system input (u) and output (z)
self.u = np.matrix(np.zeros(shape = (self.n_inputs, 1)))
self.z = np.matrix(np.zeros(shape = (self.n_outputs, 1)))
#Derivative matrix
self.Der = np.kron(np.eye((1 + self.p), k = 1), np.matrix(np.eye(self.n_states)))
#Learning rates #TODO: tune these values when correct usage of precision matrices is known
self.alpha_mu = 3.408*10**(-6)
# self.alpha_u = 0.01
#System matrices
self.A = -209.6785884514270
self.A_tilde = np.kron(np.eye(1 + self.p), self.A)
self.B = np.matrix('16.921645797507500 -16.921645797507500')
self.C = 1
self.C_tilde = np.kron(np.matrix(np.eye(1 + self.p)), self.C)
#Initial reference path (needed for prior belief): assuming no prior belief should be given
self.x_ref = x_ref
temp = np.matrix(np.zeros(shape = ((1 + self.p), 1)))
temp[0] = 1
self.mu_ref = np.kron(temp, self.x_ref) #this assumes that reference acceleration of the robot will always be zero (the reference velocity constant)!
self.xi = self.Der * self.mu_ref - self.A_tilde * self.mu_ref
#Forward model #TODO: is this one always correct to use or should it actually be combined with alpha_u for update rule of u?
# self.G = -1 * self.C * (1 / self.A) * self.B
def construct_precision_matrices(self, sigma_w, s_w, sigma_z, s_z):
'''Using the standard deviation information of the process output noise signals, construct the precision matrices'''
#Process noise precision matrix
self.sigma_w = sigma_w
self.s_w = s_w
self.SIGMA_w = np.matrix(np.eye(self.n_states)) * self.sigma_w**2
self.PI_w = self.generate_PI(1 + self.p, self.SIGMA_w, self.s_w)
#Output noise precision matrix
self.sigma_z = sigma_z
self.s_z = s_z
self.SIGMA_z = np.matrix(np.eye(self.n_states)) * self.sigma_z**2
self.PI_z = self.generate_PI(1 + self.p, self.SIGMA_z, self.s_z)
#Total precision matrix
self.PI = block_diag(self.PI_w, self.PI_z)
def generate_PI(self, k, SIGMA, s):
if np.amax(SIGMA) == 0:
print("PI cannot be generated if sigma is 0 or negative")
n = SIGMA.shape[0]
if s != 0:
l = np.array(range(0, 2*k-1, 2))
rho = np.matrix(np.zeros(shape = (1, 2*k-1)))
rho[0,l] = np.cumprod(1-l)/(np.sqrt(2)*s)**l
V = np.matrix(np.zeros(shape = (k, k)))
for r in range(k):
V[r,:] = rho[0,r:r+k]
rho = -rho
SIGMA_tilde = np.kron(V, SIGMA)
PI = np.linalg.inv(SIGMA_tilde)
else:
PI = np.matrix(np.zeros(shape = (k*n, k*n)))
PI[0:n, 0:n] = np.linalg.inv(SIGMA)
return PI
def compute_mu(self):
'''Update belief mu'''
self.mu_dot = self.Der * self.mu - self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi) - self.C_tilde.getT() * self.PI_z * (self.z_gen - self.C_tilde * self.mu))
# self.mu_dot = self.Der * self.mu - self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi) - self.C_tilde.getT() * self.PI_z * (self.z - self.C_tilde * self.mu))
self.mu = self.mu + self.mu_dot * self.delta_t
def compute_u(self):
'''Update control action u'''
# self.u_dot = -1 * self.alpha_u * self.G.getT() * self.PI_z * (self.z - self.C_tilde * self.mu)
# self.u = self.u + self.u_dot * self.delta_t
def debug(self):
'''Debug function for AI functionality: print all kinds of desirable variables'''
print("Der:\n{}\n\nmu:\n{}\n\nmu_dot:\n{}\n\nA_tilde:\n{}\n\nPI_w:\n{}\n\nxi:\n{}\n\nC_tilde:\n{}\n\nPI_z:\n{}\n\n-------------------------------------------------------------------------------------------\n".format(self.Der, self.mu, self.mu_dot, self.A_tilde, self.PI_w, self.xi, self.C_tilde, self.PI_z))
print("Der*mu:\n{}\n\n2nd term:\n{}\n\n3rd term:\n{}\n\nmu_dot:\n{}\n\nmu:\n{}\n\n-------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------\n".format(self.Der*self.mu, self.alpha_mu * ((self.Der - self.A_tilde).getT() * self.PI_w * (self.Der * self.mu - self.A_tilde * self.mu - self.xi)), self.alpha_mu * (self.C_tilde.getT() * self.PI_z * (self.z - self.C_tilde * self.mu)), self.mu_dot, self.mu))
print("C_tildeT:\n{}\n\nPI_z:\n{}\n\nC_tildeT*PI_z:\n{}\n\nz:\n{}\n\nC_tilde:\n{}\n\nC_tilde*mu:\n{}\n\nz-C_tilde*mu:\n{}\n\n-------------------------------------------------------------------------------------------\n".format(self.C_tilde.getT(), self.PI_z, self.C_tilde.getT()*self.PI_z, self.z, self.C_tilde, self.C_tilde*self.mu, self.z-self.C_tilde*self.mu))
print("C_tildeT*PI_z:\n{}\n\nz:\n{}\n\nC_tilde*mu:\n{}\n\nz-C_tilde*mu:\n{}\n\n3rd term:\n{}\n\n-------------------------------------------------------------------------------------------\n-------------------------------------------------------------------------------------------\n".format(self.C_tilde.getT()*self.PI_z, z, self.C_tilde*self.mu, z-self.C_tilde*self.mu, self.C_tilde.getT() * self.PI_z * (z - self.C_tilde * self.mu)))
#-------------------------------------------------------------------
#Subscriber class
#-------------------------------------------------------------------
class Subscriber(object):
"""Class providing all functionality needed to:
- subscribe to the measurement data
- run the AI equations
- publish the result"""
def __init__(self):
super(Subscriber, self).__init__()
#Create AI object
self.mean_u = np.matrix([[4.183917321479406], [1.942289357961973]])
self.mean_y = 0.401988453296692
self.debug = False
self.n_states = 1
self.p = 6
self.x_ref = np.matrix(np.zeros(shape = (self.n_states, 1)))
#---------------------------------------------
self.ai = AI(n_states = self.n_states, n_inputs = 1, n_outputs = 1, p = self.p, x_ref = self.x_ref)
#Initialize node, publisher and subscriber
self.msg = filt_output() #construct the custom message filt_output
rospy.init_node('ai', anonymous=True)
self.publisher = rospy.Publisher('filter/ai/output', filt_output, queue_size=1)
rospy.Subscriber('filter/y_coloured_noise', gazebo_model_states_noise, self.callback)
rospy.spin()
def callback(self, data):
'''Get system output z and call AI functionality'''
#The first time data comes in, the Gazebo model states update time is known and the precision matrices can be constructed
if self.ai.first_time:
self.ai.delta_t = data.delta_t #get time difference between two subsequent Gazebo model states data updates
self.ai.construct_precision_matrices(data.sigma_w, data.s_w, data.sigma_z, data.s_z)
self.ai.first_time = False
#Transform system output from operating point to origin and provide to AI algorithm
self.z = data.y_model_noise[2]
self.ai.z = self.z - self.mean_y
temp = np.matrix(np.zeros(shape = (1 + self.p, 1)))
temp[0,0] = 1
self.ai.z_gen = np.kron(temp, self.ai.z)
#Call AI functionality
if self.debug:
self.ai.debug()
self.ai.compute_mu()
self.ai.compute_u()
self.x_filt = self.ai.mu[:self.n_states, 0] + 1/self.ai.C*self.mean_y
#Publish result AI algorithm
self.msg.x_filt = [float(self.x_filt)]
# self.msg.u = [float(i) for i in self.ai.u]
# self.msg.u_lin = []
# for i,x in enumerate(self.msg.u):
# self.msg.u_lin.append(x - self.mean_u[i])
self.msg.y = [float(self.z)]
self.msg.y_lin = [float(self.ai.z)]
self.publisher.publish(self.msg)
#-------------------------------------------------------------------
#Main function
if __name__ == '__main__':
subscriber = Subscriber()
|
[
"numpy.eye",
"numpy.sqrt",
"rospy.Subscriber",
"rospy.init_node",
"numpy.kron",
"numpy.zeros",
"numpy.linalg.inv",
"rospy.spin",
"scipy.linalg.block_diag",
"jackal_active_inference_versus_kalman_filter.msg.filt_output",
"numpy.matrix",
"rospy.Publisher",
"numpy.amax",
"numpy.cumprod"
] |
[((3139, 3190), 'numpy.matrix', 'np.matrix', (['"""16.921645797507500 -16.921645797507500"""'], {}), "('16.921645797507500 -16.921645797507500')\n", (3148, 3190), True, 'import numpy as np\n'), ((3520, 3545), 'numpy.kron', 'np.kron', (['temp', 'self.x_ref'], {}), '(temp, self.x_ref)\n', (3527, 3545), True, 'import numpy as np\n'), ((4689, 4721), 'scipy.linalg.block_diag', 'block_diag', (['self.PI_w', 'self.PI_z'], {}), '(self.PI_w, self.PI_z)\n', (4699, 4721), False, 'from scipy.linalg import block_diag\n'), ((8595, 8648), 'numpy.matrix', 'np.matrix', (['[[4.183917321479406], [1.942289357961973]]'], {}), '([[4.183917321479406], [1.942289357961973]])\n', (8604, 8648), True, 'import numpy as np\n'), ((9064, 9077), 'jackal_active_inference_versus_kalman_filter.msg.filt_output', 'filt_output', ([], {}), '()\n', (9075, 9077), False, 'from jackal_active_inference_versus_kalman_filter.msg import filt_output\n'), ((9129, 9166), 'rospy.init_node', 'rospy.init_node', (['"""ai"""'], {'anonymous': '(True)'}), "('ai', anonymous=True)\n", (9144, 9166), False, 'import rospy\n'), ((9192, 9254), 'rospy.Publisher', 'rospy.Publisher', (['"""filter/ai/output"""', 'filt_output'], {'queue_size': '(1)'}), "('filter/ai/output', filt_output, queue_size=1)\n", (9207, 9254), False, 'import rospy\n'), ((9263, 9353), 'rospy.Subscriber', 'rospy.Subscriber', (['"""filter/y_coloured_noise"""', 'gazebo_model_states_noise', 'self.callback'], {}), "('filter/y_coloured_noise', gazebo_model_states_noise, self\n .callback)\n", (9279, 9353), False, 'import rospy\n'), ((9357, 9369), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (9367, 9369), False, 'import rospy\n'), ((10203, 10227), 'numpy.kron', 'np.kron', (['temp', 'self.ai.z'], {}), '(temp, self.ai.z)\n', (10210, 10227), True, 'import numpy as np\n'), ((2268, 2302), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.n_states, 1)'}), '(shape=(self.n_states, 1))\n', (2276, 2302), True, 'import numpy as np\n'), ((2336, 2385), 'numpy.zeros', 'np.zeros', ([], {'shape': '((1 + self.p) * self.n_states, 1)'}), '(shape=((1 + self.p) * self.n_states, 1))\n', (2344, 2385), True, 'import numpy as np\n'), ((2449, 2498), 'numpy.zeros', 'np.zeros', ([], {'shape': '((1 + self.p) * self.n_states, 1)'}), '(shape=((1 + self.p) * self.n_states, 1))\n', (2457, 2498), True, 'import numpy as np\n'), ((2587, 2621), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.n_inputs, 1)'}), '(shape=(self.n_inputs, 1))\n', (2595, 2621), True, 'import numpy as np\n'), ((2652, 2687), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.n_outputs, 1)'}), '(shape=(self.n_outputs, 1))\n', (2660, 2687), True, 'import numpy as np\n'), ((2754, 2777), 'numpy.eye', 'np.eye', (['(1 + self.p)'], {'k': '(1)'}), '(1 + self.p, k=1)\n', (2760, 2777), True, 'import numpy as np\n'), ((3094, 3112), 'numpy.eye', 'np.eye', (['(1 + self.p)'], {}), '(1 + self.p)\n', (3100, 3112), True, 'import numpy as np\n'), ((3441, 3472), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1 + self.p, 1)'}), '(shape=(1 + self.p, 1))\n', (3449, 3472), True, 'import numpy as np\n'), ((4775, 4789), 'numpy.amax', 'np.amax', (['SIGMA'], {}), '(SIGMA)\n', (4782, 4789), True, 'import numpy as np\n'), ((5294, 5311), 'numpy.kron', 'np.kron', (['V', 'SIGMA'], {}), '(V, SIGMA)\n', (5301, 5311), True, 'import numpy as np\n'), ((5329, 5355), 'numpy.linalg.inv', 'np.linalg.inv', (['SIGMA_tilde'], {}), '(SIGMA_tilde)\n', (5342, 5355), True, 'import numpy as np\n'), ((5467, 5487), 'numpy.linalg.inv', 'np.linalg.inv', (['SIGMA'], {}), '(SIGMA)\n', (5480, 5487), True, 'import numpy as np\n'), ((8792, 8826), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.n_states, 1)'}), '(shape=(self.n_states, 1))\n', (8800, 8826), True, 'import numpy as np\n'), ((10122, 10153), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1 + self.p, 1)'}), '(shape=(1 + self.p, 1))\n', (10130, 10153), True, 'import numpy as np\n'), ((2793, 2814), 'numpy.eye', 'np.eye', (['self.n_states'], {}), '(self.n_states)\n', (2799, 2814), True, 'import numpy as np\n'), ((3251, 3269), 'numpy.eye', 'np.eye', (['(1 + self.p)'], {}), '(1 + self.p)\n', (3257, 3269), True, 'import numpy as np\n'), ((4267, 4288), 'numpy.eye', 'np.eye', (['self.n_states'], {}), '(self.n_states)\n', (4273, 4288), True, 'import numpy as np\n'), ((4516, 4537), 'numpy.eye', 'np.eye', (['self.n_states'], {}), '(self.n_states)\n', (4522, 4537), True, 'import numpy as np\n'), ((5007, 5037), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 2 * k - 1)'}), '(shape=(1, 2 * k - 1))\n', (5015, 5037), True, 'import numpy as np\n'), ((5060, 5077), 'numpy.cumprod', 'np.cumprod', (['(1 - l)'], {}), '(1 - l)\n', (5070, 5077), True, 'import numpy as np\n'), ((5133, 5155), 'numpy.zeros', 'np.zeros', ([], {'shape': '(k, k)'}), '(shape=(k, k))\n', (5141, 5155), True, 'import numpy as np\n'), ((5410, 5440), 'numpy.zeros', 'np.zeros', ([], {'shape': '(k * n, k * n)'}), '(shape=(k * n, k * n))\n', (5418, 5440), True, 'import numpy as np\n'), ((5077, 5087), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5084, 5087), True, 'import numpy as np\n')]
|
from utils import load, save, path_list, DEAD_PMTS
import nets
import torch
import numpy as np
import pandas as pd
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
from itertools import repeat
from multiprocessing import Pool
def neural_residual(root_dir):
# model selection
net_type = load(root_dir + '/configuration.json')['net_type']
if net_type == 'Net':
net = nets.Net()
elif net_type == 'Net2c':
net = nets.Net2c()
elif net_type == 'CNN1c':
net = nets.CNN1c()
elif net_type == 'CNN2c':
net = nets.CNN2c()
else:
print('invalide net type')
raise ValueError
# get the latest model for neural network
epoch_path = path_list(root_dir + '/models/')[-1]
model_path = path_list(epoch_path, filter='pt')[-1]
net.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
# get inputs, labels, outputs and residuals
inputs = load(root_dir + '/test_inputs.tensor').float()
labels = load(root_dir + '/test_labels.tensor').float().numpy()
outputs = net(inputs).detach().cpu().clone().numpy()
residuals = (outputs - labels) * 1000
return residuals.T
def cwm_residual(root_dir):
try:
interp_r = load('src/interp_r')
interp_z = load('src/interp_z')
except FileNotFoundError:
with open('src/WeightingCorrection_att.dat', 'r') as f:
df = []
while True:
line = f.readline().split(' ')
line = list(filter(lambda a: a != '', line))
try:
line[3] = line[3][:-1]
except IndexError:
break
df.append(line)
df = pd.DataFrame(df, dtype=float)
# calculate interpolation
R = df[0]
Z = df[1]
weight_R = df[2]
weight_Z = df[3]
interp_r = interpolate.interp2d(R, Z, weight_R, kind='linear')
interp_z = interpolate.interp2d(R, Z, weight_Z, kind='linear')
save(interp_r, 'src/interp_r')
save(interp_z, 'src/interp_z')
pmt_positions = load('src/pmtcoordinates_ID.json')
testpaths = load(root_dir + '/testpaths.list')
# multiprocessing
p = Pool(processes=40)
residuals = []
total = len(testpaths)
for i in range(5):
print('getting cwm residuals... %i' % i)
paths_batch = testpaths[int(0.2 * i * total):int(0.2 * (i + 1) * total)]
residuals += p.starmap(__job, zip(paths_batch,
repeat(interp_r),
repeat(interp_z),
repeat(pmt_positions)
)
)
residuals = [r for r in residuals if r]
return np.array(residuals).T
def __job(path, interp_r, interp_z, pmt_positions):
f = load(path)
capture_time = f['capture_time'] # scalar value
hits = int(f['photon_hits']) # scalar value
hit_counts = f['hit_count'] # vector value
hit_pmts = f['hit_pmt'] # vector value
hit_time = f['hit_time'] # vector value
true_vertex = [f['positron_x'], f['positron_y'], f['positron_z']]
x = np.zeros(354)
for i in range(hits):
pmt = hit_pmts[i]
count = hit_counts[i]
t = hit_time[i]
if pmt in DEAD_PMTS:
continue
if t < capture_time:
x[pmt] += count
# if the entry is valid, reconstruct the vertex
if sum(x) > 0:
# calculate cwm vertex
reco_vertex = np.array([.0, .0, .0])
for pmt_id, hits in enumerate(x):
pmt_pos = pmt_positions[str(pmt_id)]
reco_vertex += hits * np.array([pmt_pos['x'], pmt_pos['y'], pmt_pos['z']], dtype=float)
# normalize
reco_vertex = reco_vertex / sum(x)
# correction 1
weight1r = interp_r(np.linalg.norm(reco_vertex[:2]), abs(reco_vertex[2]))
weight1z = interp_z(np.linalg.norm(reco_vertex[:2]), abs(reco_vertex[2]))
reco_vertex[:2] *= weight1r
reco_vertex[2] *= weight1z
# correction 2
weight2 = 0.8784552 - 0.0000242758 * np.linalg.norm(reco_vertex[:2])
reco_vertex *= weight2
return (reco_vertex - true_vertex).tolist()
else:
return False
def filter_nsigma(outputs, n):
ns, bins = np.histogram(outputs, bins=200)
peak = bins[np.argmax(ns)]
std = np.std(outputs)
return [output for output in outputs if (peak - n * std) < output < (peak + n * std)]
def main():
# control group
print('control group root: ')
control_root = str(input())
print('control group name:')
control_name = str(input())
# experimental group
print('# of experimental groups:')
ex_number = int(input())
print('experimental group roots (%i):' % ex_number)
ex_root = [str(input()) for _ in range(ex_number)]
print('experimental group names')
ex_names = []
for i in range(ex_number):
print('name for ' + ex_root[i])
ex_names.append(str(input()))
# get residuals
print('calculating residuals')
control_residual = cwm_residual(root_dir=control_root)
ex_residuals = [neural_residual(root_dir=ex_root[i]) for i in range(ex_number)]
# draw histograms
print('drawing histograms')
fig, axes = plt.subplots(1, 3, figsize=(14, 4))
for axis in range(3):
axes[axis].hist(control_residual[axis],
bins=200,
density=True,
histtype='step',
linestyle=':',
color='black',
label=control_name)
for i in range(ex_number):
axes[axis].hist(ex_residuals[i][axis],
bins=200,
density=True,
histtype='step',
label=ex_names[i])
# Text on filtered sigma
control_filtered_std = np.std(filter_nsigma(control_residual[axis], n=2))
ex_filtered_std = [np.std(filter_nsigma(ex_residuals[i][axis], n=2)) for i in range(ex_number)]
text_std = '$\\sigma_{%s}=%.1fmm$' % (control_name, control_filtered_std)
for i in range(ex_number):
text_std += '\n$\\sigma_{%s}=%.1fmm$' % (ex_names[i], ex_filtered_std[i])
axes[axis].text(200, 0.78/100,
text_std,
ha='left', va='top',
fontsize=8,
bbox=dict(boxstyle='square', fc='w'))
# axes properties
axis_name = ['x', 'y', 'z'][axis]
axes[axis].set_xlabel(r'$%s_{rec} - %s_{real} $ (mm)' % (axis_name, axis_name))
axes[axis].set_ylabel('portion')
axes[axis].yaxis.set_major_formatter(PercentFormatter(1))
axes[axis].set_xlim([-1000, 1000])
axes[axis].set_ylim([0, 0.8/100])
axes[axis].grid()
axes[axis].legend(fontsize=8, loc='upper left')
plt.tight_layout()
plt.savefig('MC_vis_histogram.png')
plt.close()
if __name__ == '__main__':
main()
|
[
"matplotlib.ticker.PercentFormatter",
"utils.load",
"numpy.array",
"numpy.linalg.norm",
"scipy.interpolate.interp2d",
"nets.Net",
"itertools.repeat",
"numpy.histogram",
"matplotlib.pyplot.close",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"nets.CNN1c",
"nets.Net2c",
"numpy.argmax",
"utils.path_list",
"numpy.std",
"torch.device",
"utils.save",
"nets.CNN2c",
"numpy.zeros",
"multiprocessing.Pool",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
] |
[((2208, 2242), 'utils.load', 'load', (['"""src/pmtcoordinates_ID.json"""'], {}), "('src/pmtcoordinates_ID.json')\n", (2212, 2242), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2259, 2293), 'utils.load', 'load', (["(root_dir + '/testpaths.list')"], {}), "(root_dir + '/testpaths.list')\n", (2263, 2293), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2325, 2343), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(40)'}), '(processes=40)\n', (2329, 2343), False, 'from multiprocessing import Pool\n'), ((3001, 3011), 'utils.load', 'load', (['path'], {}), '(path)\n', (3005, 3011), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((3372, 3385), 'numpy.zeros', 'np.zeros', (['(354)'], {}), '(354)\n', (3380, 3385), True, 'import numpy as np\n'), ((4527, 4558), 'numpy.histogram', 'np.histogram', (['outputs'], {'bins': '(200)'}), '(outputs, bins=200)\n', (4539, 4558), True, 'import numpy as np\n'), ((4600, 4615), 'numpy.std', 'np.std', (['outputs'], {}), '(outputs)\n', (4606, 4615), True, 'import numpy as np\n'), ((5514, 5549), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(14, 4)'}), '(1, 3, figsize=(14, 4))\n', (5526, 5549), True, 'import matplotlib.pyplot as plt\n'), ((7194, 7212), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7210, 7212), True, 'import matplotlib.pyplot as plt\n'), ((7217, 7252), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""MC_vis_histogram.png"""'], {}), "('MC_vis_histogram.png')\n", (7228, 7252), True, 'import matplotlib.pyplot as plt\n'), ((7257, 7268), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7266, 7268), True, 'import matplotlib.pyplot as plt\n'), ((358, 396), 'utils.load', 'load', (["(root_dir + '/configuration.json')"], {}), "(root_dir + '/configuration.json')\n", (362, 396), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((449, 459), 'nets.Net', 'nets.Net', ([], {}), '()\n', (457, 459), False, 'import nets\n'), ((765, 797), 'utils.path_list', 'path_list', (["(root_dir + '/models/')"], {}), "(root_dir + '/models/')\n", (774, 797), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((819, 853), 'utils.path_list', 'path_list', (['epoch_path'], {'filter': '"""pt"""'}), "(epoch_path, filter='pt')\n", (828, 853), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((1298, 1318), 'utils.load', 'load', (['"""src/interp_r"""'], {}), "('src/interp_r')\n", (1302, 1318), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((1338, 1358), 'utils.load', 'load', (['"""src/interp_z"""'], {}), "('src/interp_z')\n", (1342, 1358), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2917, 2936), 'numpy.array', 'np.array', (['residuals'], {}), '(residuals)\n', (2925, 2936), True, 'import numpy as np\n'), ((3726, 3751), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3734, 3751), True, 'import numpy as np\n'), ((4575, 4588), 'numpy.argmax', 'np.argmax', (['ns'], {}), '(ns)\n', (4584, 4588), True, 'import numpy as np\n'), ((504, 516), 'nets.Net2c', 'nets.Net2c', ([], {}), '()\n', (514, 516), False, 'import nets\n'), ((1002, 1040), 'utils.load', 'load', (["(root_dir + '/test_inputs.tensor')"], {}), "(root_dir + '/test_inputs.tensor')\n", (1006, 1040), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((4056, 4087), 'numpy.linalg.norm', 'np.linalg.norm', (['reco_vertex[:2]'], {}), '(reco_vertex[:2])\n', (4070, 4087), True, 'import numpy as np\n'), ((4138, 4169), 'numpy.linalg.norm', 'np.linalg.norm', (['reco_vertex[:2]'], {}), '(reco_vertex[:2])\n', (4152, 4169), True, 'import numpy as np\n'), ((7001, 7020), 'matplotlib.ticker.PercentFormatter', 'PercentFormatter', (['(1)'], {}), '(1)\n', (7017, 7020), False, 'from matplotlib.ticker import PercentFormatter\n'), ((561, 573), 'nets.CNN1c', 'nets.CNN1c', ([], {}), '()\n', (571, 573), False, 'import nets\n'), ((918, 937), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (930, 937), False, 'import torch\n'), ((1779, 1808), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {'dtype': 'float'}), '(df, dtype=float)\n', (1791, 1808), True, 'import pandas as pd\n'), ((1974, 2025), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['R', 'Z', 'weight_R'], {'kind': '"""linear"""'}), "(R, Z, weight_R, kind='linear')\n", (1994, 2025), False, 'from scipy import interpolate\n'), ((2049, 2100), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['R', 'Z', 'weight_Z'], {'kind': '"""linear"""'}), "(R, Z, weight_Z, kind='linear')\n", (2069, 2100), False, 'from scipy import interpolate\n'), ((2113, 2143), 'utils.save', 'save', (['interp_r', '"""src/interp_r"""'], {}), "(interp_r, 'src/interp_r')\n", (2117, 2143), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2156, 2186), 'utils.save', 'save', (['interp_z', '"""src/interp_z"""'], {}), "(interp_z, 'src/interp_z')\n", (2160, 2186), False, 'from utils import load, save, path_list, DEAD_PMTS\n'), ((2641, 2657), 'itertools.repeat', 'repeat', (['interp_r'], {}), '(interp_r)\n', (2647, 2657), False, 'from itertools import repeat\n'), ((2701, 2717), 'itertools.repeat', 'repeat', (['interp_z'], {}), '(interp_z)\n', (2707, 2717), False, 'from itertools import repeat\n'), ((2761, 2782), 'itertools.repeat', 'repeat', (['pmt_positions'], {}), '(pmt_positions)\n', (2767, 2782), False, 'from itertools import repeat\n'), ((3874, 3939), 'numpy.array', 'np.array', (["[pmt_pos['x'], pmt_pos['y'], pmt_pos['z']]"], {'dtype': 'float'}), "([pmt_pos['x'], pmt_pos['y'], pmt_pos['z']], dtype=float)\n", (3882, 3939), True, 'import numpy as np\n'), ((4332, 4363), 'numpy.linalg.norm', 'np.linalg.norm', (['reco_vertex[:2]'], {}), '(reco_vertex[:2])\n', (4346, 4363), True, 'import numpy as np\n'), ((618, 630), 'nets.CNN2c', 'nets.CNN2c', ([], {}), '()\n', (628, 630), False, 'import nets\n'), ((1062, 1100), 'utils.load', 'load', (["(root_dir + '/test_labels.tensor')"], {}), "(root_dir + '/test_labels.tensor')\n", (1066, 1100), False, 'from utils import load, save, path_list, DEAD_PMTS\n')]
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Runs a power flow.
"""
from sys import stdout, stderr
from os.path import dirname, join
from time import time
from numpy import r_, c_, ix_, zeros, pi, ones, exp, argmax,angle
from numpy import flatnonzero as find
#from pypower.bustypes import bustypes
#from pypower.ext2int import ext2int
#from pypower.loadcase import loadcase
#from pypower.ppoption import ppoption
#from pypower.ppver import ppver
#from pypower.makeBdc import makeBdc
from pypower.makeSbus import makeSbus
#from pypower.dcpf import dcpf
#from pypower.makeYbus import makeYbus
from pypower.newtonpf_fast import newtonpf_fast
#from pypower.fdpf import fdpf
#from pypower.gausspf import gausspf
#from pypower.makeB import makeB
#from pypower.pfsoln import pfsoln
#from pypower.printpf import printpf
#from pypower.savecase import savecase
#from pypower.int2ext import int2ext
from pypower.idx_bus import PD, QD, VM, VA, GS, BUS_TYPE, PQ, REF
from pypower.idx_brch import PF, PT, QF, QT
from pypower.idx_gen import PG, QG, VG, QMAX, QMIN, GEN_BUS, GEN_STATUS
def runpf_fast(Ybus, Yf,Yt,ref, pv, pq,on,ppc, ppopt=None, fname='', solvedcase=''):
"""Runs a power flow.
Runs a power flow [full AC Newton's method by default] and optionally
returns the solved values in the data matrices, a flag which is C{True} if
the algorithm was successful in finding a solution, and the elapsed
time in seconds. All input arguments are optional. If C{casename} is
provided it specifies the name of the input data file or dict
containing the power flow data. The default value is 'case9'.
If the ppopt is provided it overrides the default PYPOWER options
vector and can be used to specify the solution algorithm and output
options among other things. If the 3rd argument is given the pretty
printed output will be appended to the file whose name is given in
C{fname}. If C{solvedcase} is specified the solved case will be written
to a case file in PYPOWER format with the specified name. If C{solvedcase}
ends with '.mat' it saves the case as a MAT-file otherwise it saves it
as a Python-file.
If the C{ENFORCE_Q_LIMS} options is set to C{True} [default is false] then
if any generator reactive power limit is violated after running the AC
power flow, the corresponding bus is converted to a PQ bus, with Qg at
the limit, and the case is re-run. The voltage magnitude at the bus
will deviate from the specified value in order to satisfy the reactive
power limit. If the reference bus is converted to PQ, the first
remaining PV bus will be used as the slack bus for the next iteration.
This may result in the real power output at this generator being
slightly off from the specified values.
Enforcing of generator Q limits inspired by contributions from Mu Lin,
Lincoln University, New Zealand (1/14/05).
@author: <NAME> (PSERC Cornell)
"""
## default arguments
## options
## read data
#ppc = loadcase(casedata)
## convert to internal indexing
ppc["branch"][:,[0,1]]-=1
ppc["bus"][:,0]-=1
ppc["gen"][:,0]-=1
baseMVA, bus, gen, branch = \
ppc["baseMVA"], ppc["bus"], ppc["gen"], ppc["branch"]
## get bus index lists of each type of bus
#ref, pv, pq = bustypes(bus, gen)
#
# generator info
#print(gen[:, GEN_STATUS])
#on = find(gen[:, GEN_STATUS] > 0) ## which generators are on?
gbus = gen[on, GEN_BUS].astype(int) ## what buses are they at?
##----- run the power flow -----
t0 = time()
V0 = bus[:, VM] * exp(1j * 0.017453292519943295 * bus[:, VA])
V0[gbus] = gen[on, VG] / abs(V0[gbus]) * V0[gbus]
## build admittance matrices
#Ybus, Yf, Yt = makeYbus(baseMVA, bus, branch)
## compute complex bus power injections [generation - load]
Sbus = makeSbus(baseMVA, bus, gen)
## run the power flow
V, success, i = newtonpf_fast(Ybus, Sbus, V0, ref, pv, pq, ppopt)
## update data matrices with solution
#bus, gen, branch = pfsoln(baseMVA, bus, gen, branch, Ybus, Yf, Yt, V, ref, pv, pq)
bus[:, VM] = abs(V)
bus[:, VA] = angle(V) * 180 / pi
#UNTIL HERE
ppc["et"] = time() - t0
ppc["success"] = success
##----- output results -----
## convert back to original bus numbering & print results
ppc["bus"], ppc["gen"], ppc["branch"] = bus, gen, branch
ppc["branch"][:,[0,1]]+=1
ppc["bus"][:,0]+=1
ppc["gen"][:,0]+=1
return ppc, success,i
if __name__ == '__main__':
runpf()
|
[
"numpy.angle",
"numpy.exp",
"pypower.newtonpf_fast.newtonpf_fast",
"time.time",
"pypower.makeSbus.makeSbus"
] |
[((3722, 3728), 'time.time', 'time', ([], {}), '()\n', (3726, 3728), False, 'from time import time\n'), ((4014, 4041), 'pypower.makeSbus.makeSbus', 'makeSbus', (['baseMVA', 'bus', 'gen'], {}), '(baseMVA, bus, gen)\n', (4022, 4041), False, 'from pypower.makeSbus import makeSbus\n'), ((4090, 4139), 'pypower.newtonpf_fast.newtonpf_fast', 'newtonpf_fast', (['Ybus', 'Sbus', 'V0', 'ref', 'pv', 'pq', 'ppopt'], {}), '(Ybus, Sbus, V0, ref, pv, pq, ppopt)\n', (4103, 4139), False, 'from pypower.newtonpf_fast import newtonpf_fast\n'), ((3754, 3799), 'numpy.exp', 'exp', (['(1.0j * 0.017453292519943295 * bus[:, VA])'], {}), '(1.0j * 0.017453292519943295 * bus[:, VA])\n', (3757, 3799), False, 'from numpy import r_, c_, ix_, zeros, pi, ones, exp, argmax, angle\n'), ((4365, 4371), 'time.time', 'time', ([], {}), '()\n', (4369, 4371), False, 'from time import time\n'), ((4312, 4320), 'numpy.angle', 'angle', (['V'], {}), '(V)\n', (4317, 4320), False, 'from numpy import r_, c_, ix_, zeros, pi, ones, exp, argmax, angle\n')]
|
# This program imports the federal reserve economic data consumer price index
# values from 1990 and uses those values to get the real values or infaltion adjusted
# values of the sepcific commodities/markets.
# Then when a commdoity hits a specific low infaltion based price, the algo
# enters into a long psoiton and exits when the commodity/market hits a relativley
# high price.
import numpy
import csv
#elemnt zero is the oldest elment, in this case, inflation from 2/1/1990
def cpi_array():
cpi_array = numpy.zeros((328))
count = 0
with open("CPI_Spyder.csv", 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
cpi = float(row[1])
cpi_array[count] = cpi
count += 1
csvfile.close()
return cpi_array
#market dicitonary [buy price, sell price, current pos, iniital entry pos, fall by price, add to pos price
#if it falls by 'fall by price', # of times added to the pos]
def market_dictionary():
market_dictionary = {}
market_dictionary[0] = [10000.0,12500.0,0,.5,.08,.1, 0]
market_dictionary[1] = [8000.0,12000.0,0,.5,.12,.1, 0]
market_dictionary[2] = [20000.0,25000.0,0,.5,.1,.1, 0]
market_dictionary[3] = [15000.0,20000.0,0,.5,.06,.1, 0]
market_dictionary[4] = [26000.0,36000.0,0,.5,.07,.1, 0]
market_dictionary[5] = [25000.0,30000.0,0,.5,.08,.1, 0]
market_dictionary[6] = [20000.0,21000.0,0,.5,.05,.1, 0]
market_dictionary[7] = [14000.0,17000.0,0,.5,.07,.1, 0]
market_dictionary[8] = [15000.0,20000.0,0,.5,.07,.1, 0]
market_dictionary[9] = [5000.0,6000.0,0,.5,.1,.1, 0]
market_dictionary[10] = [13000.0,19500.0,0,.5,.075,.1, 0]
return market_dictionary
def myTradingSystem(DATE, OPEN, HIGH, LOW, CLOSE, VOL, exposure, equity, settings):
#initalzie the basics
nMarkets = CLOSE.shape[1]
pos = numpy.zeros(nMarkets)
i = 0
settings['countDays'] += 1
#setting the cpi multiplyer to get compare prices reltivlely
settings['CPI_muliplyer'] = (settings['BASE_CPI'] / settings['cpi_array'][ settings['count']])
# constantly get a new cpi every month by adding to count
if settings['countDays'] % 21 == 0:
settings['count'] += 1
#entering the pos
for i in range(nMarkets - 1):
if (CLOSE[-1, i] * settings['CPI_muliplyer']) <= settings['market_dictionary'][i][0]:
settings['market_dictionary'][i][2] = settings['market_dictionary'][i][3]
# pyramding to a falling posiiton - stage 1
if (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 5)) and settings['market_dictionary'][i][6] == 4):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 4)) and settings['market_dictionary'][i][6] == 3):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][4] * 3)) and settings['market_dictionary'][i][6] == 2):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] /
(1+(settings['market_dictionary'][i][5] * 2)) and settings['market_dictionary'][i][6] == 1):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
elif (CLOSE[-1,i] * settings['CPI_muliplyer']) <= (settings['market_dictionary'][i][0] / (1+settings['market_dictionary'][i][4])
and settings['market_dictionary'][i][6] == 0):
settings['market_dictionary'][i][6] += 1
settings['market_dictionary'][i][3] += settings['market_dictionary'][i][5]
#closing the position
if (CLOSE[-1, i] * settings['CPI_muliplyer']) >= settings['market_dictionary'][i][1]:
settings['market_dictionary'][i][2] = 0
settings['market_dictionary'][i][6] = 0
#set posistion to be returned equal to market dictionary value 2
for i in range(nMarkets - 1):
pos[i] = settings['market_dictionary'][i][2]
pos[11] = 11
return pos, settings
def mySettings():
''' Define your trading system settings here '''
settings = {}
# Futures Contracts
settings['markets'] = ['F_C', 'F_CC', 'F_CL', 'F_CT', 'F_FC','F_KC',
'F_LC', 'F_LN', 'F_NG', 'F_O', 'F_PA', 'CASH']
#`19900104 - 20170710
settings['beginInSample'] = '19900104'
#settings['endInSample'] = '20170710'
settings['lookback'] = 21
settings['budget'] = 10**6
settings['slippage'] = 0.05
settings['countDays'] = 0
settings['count'] = 0
settings['cpi_array'] = cpi_array()
settings['market_dictionary'] = market_dictionary()
settings['BASE_CPI'] = settings['cpi_array'][0]
settings['CPI_muliplyer'] = 0
return settings
# Evaluate trading system defined in current file.
if __name__ == '__main__':
import quantiacsToolbox
results = quantiacsToolbox.runts(__file__)
print(results['stats'])
|
[
"numpy.zeros",
"csv.reader",
"quantiacsToolbox.runts"
] |
[((516, 532), 'numpy.zeros', 'numpy.zeros', (['(328)'], {}), '(328)\n', (527, 532), False, 'import numpy\n'), ((1860, 1881), 'numpy.zeros', 'numpy.zeros', (['nMarkets'], {}), '(nMarkets)\n', (1871, 1881), False, 'import numpy\n'), ((5505, 5537), 'quantiacsToolbox.runts', 'quantiacsToolbox.runts', (['__file__'], {}), '(__file__)\n', (5527, 5537), False, 'import quantiacsToolbox\n'), ((615, 634), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (625, 634), False, 'import csv\n')]
|
import numpy as np
gama = 0.5
alfa = 0.75
data = np.array([[1, 1, 1], [1, 2, -1], [2, 1, 1]]) #(s, s', R)
Q = np.zeros((data.shape[0]+1, 2)) #(iterations, |S|)
k = 1
for d in range(data.shape[0]):
R = data[d, 2] #inmediate reward
idx_s = data[d, 0] - 1 # index of state s in Q
idx_sp = data[d, 1] - 1 #index of state s' in Q
# Q[k, idx_s] = (1 - alfa) * Q[k - 1, idx_s] + alfa * (R + gama * np.max(Q[0:k, idx_sp]))
Q[k, idx_s] = (1 - alfa) * Q[k - 1, idx_s] + alfa * (R + gama * Q[k-1, idx_sp])
k += 1
print(Q)
|
[
"numpy.array",
"numpy.zeros"
] |
[((50, 94), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 2, -1], [2, 1, 1]]'], {}), '([[1, 1, 1], [1, 2, -1], [2, 1, 1]])\n', (58, 94), True, 'import numpy as np\n'), ((111, 143), 'numpy.zeros', 'np.zeros', (['(data.shape[0] + 1, 2)'], {}), '((data.shape[0] + 1, 2))\n', (119, 143), True, 'import numpy as np\n')]
|
import os
import numpy as np
import warnings
warnings.filterwarnings('ignore')
import torch
import torch.nn as nn
from torchvision.utils import save_image
from utils import get_lr_scheduler, sample_images, inference
# Reproducibility #
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Device Configuration #
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def train_srcnns(train_loader, val_loader, model, device, args):
# Loss Function #
criterion = nn.L1Loss()
# Optimizers #
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999))
optimizer_scheduler = get_lr_scheduler(optimizer=optimizer, args=args)
# Lists #
losses = list()
# Train #
print("Training {} started with total epoch of {}.".format(str(args.model).upper(), args.num_epochs))
for epoch in range(args.num_epochs):
for i, (high, low) in enumerate(train_loader):
# Data Preparation #
high = high.to(device)
low = low.to(device)
# Forward Data #
generated = model(low)
# Calculate Loss #
loss = criterion(generated, high)
# Initialize Optimizer #
optimizer.zero_grad()
# Back Propagation and Update #
loss.backward()
optimizer.step()
# Add items to Lists #
losses.append(loss.item())
# Print Statistics #
if (i+1) % args.print_every == 0:
print("{} | Epoch [{}/{}] | Iterations [{}/{}] | Loss {:.4f}"
.format(str(args.model).upper(), epoch+1, args.num_epochs, i+1, len(train_loader), np.average(losses)))
# Save Sample Images #
sample_images(val_loader, args.batch_size, args.upscale_factor, model, epoch, args.samples_path, device)
# Adjust Learning Rate #
optimizer_scheduler.step()
# Save Model Weights and Inference #
if (epoch+1) % args.save_every == 0:
torch.save(model.state_dict(), os.path.join(args.weights_path, '{}_Epoch_{}.pkl'.format(model.__class__.__name__, epoch+1)))
inference(val_loader, model, args.upscale_factor, epoch, args.inference_path, device)
|
[
"numpy.average",
"utils.inference",
"torch.nn.L1Loss",
"utils.get_lr_scheduler",
"torch.cuda.is_available",
"utils.sample_images",
"warnings.filterwarnings"
] |
[((45, 78), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (68, 78), False, 'import warnings\n'), ((365, 390), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (388, 390), False, 'import torch\n'), ((508, 519), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (517, 519), True, 'import torch.nn as nn\n'), ((651, 699), 'utils.get_lr_scheduler', 'get_lr_scheduler', ([], {'optimizer': 'optimizer', 'args': 'args'}), '(optimizer=optimizer, args=args)\n', (667, 699), False, 'from utils import get_lr_scheduler, sample_images, inference\n'), ((2201, 2291), 'utils.inference', 'inference', (['val_loader', 'model', 'args.upscale_factor', 'epoch', 'args.inference_path', 'device'], {}), '(val_loader, model, args.upscale_factor, epoch, args.\n inference_path, device)\n', (2210, 2291), False, 'from utils import get_lr_scheduler, sample_images, inference\n'), ((1787, 1895), 'utils.sample_images', 'sample_images', (['val_loader', 'args.batch_size', 'args.upscale_factor', 'model', 'epoch', 'args.samples_path', 'device'], {}), '(val_loader, args.batch_size, args.upscale_factor, model,\n epoch, args.samples_path, device)\n', (1800, 1895), False, 'from utils import get_lr_scheduler, sample_images, inference\n'), ((1710, 1728), 'numpy.average', 'np.average', (['losses'], {}), '(losses)\n', (1720, 1728), True, 'import numpy as np\n')]
|
##############################
## MFP_K1000.py ##
## <NAME> ##
## Version 2020.03.25 ##
##############################
import os
import os.path as osp
import time
import subprocess as spc
import numpy as np
import scipy as sp
import astropy.io.fits as fits
import healpy as hp
import treecorr as tree
import commonFunctions as cf
import HEALPixFunctions as hpf
################################################################################
## Parameters
class Parameters:
KiDSPath = 'data/KiDS/'
dataPath = 'data/mockFootprint/'
absDataPath = '/disk05/calin/91_Data/mockFootprint/'
## Mask parameters
area_BOSS = 9329 ## [deg^2]
area_BOSS_reduced = 1274.319868 ## From my own calculations
area_BOSS_wcs = 408.321
area_BOSS_4Band = 339.298
area_BOSS_9Band = 319.506
area_2dFLenS_SGP = 510.803964 ## [deg^2]
area_2dFLenS_wcs = 424.508017
area_2dFLenS_gri = 355.283139
area_2dFLenS_9Band = 341.888289
area_KiDS = 773.286 ## [deg^2]
area_KiDS_North = 334.138
area_KiDS_South = 439.148
area_KiDS_North_new = 371.801
area_KiDS_South_new = 401.485
## Galaxy number density
n_gal_BOSS_reduced_z0 = 0.014496
n_gal_BOSS_reduced_z1 = 0.016595
n_gal_BOSS_wcs_z0 = 0.014437
n_gal_BOSS_wcs_z1 = 0.016265
n_gal_2dFLenS_SGP_z0 = 0.005813
n_gal_2dFLenS_SGP_z1 = 0.006067
n_gal_2dFLenS_wcs_z0 = 0.005857
n_gal_2dFLenS_wcs_z1 = 0.006031
n_gal_2dFLenS_gri_z0 = 0.002891
n_gal_2dFLenS_gri_z1 = 0.003677
################################################################################
## Functions related to masks - I
## This function load BOSS random catalogues
def loadFitsLenCat(surveyTag, zInd, bitMaskTag='reduced'):
P = Parameters()
if bitMaskTag in ['all', 'reduced', 'SGP']: ## No selection
bitMask = 000000
elif bitMaskTag == 'wcs': ## KiDS wcs
bitMask = 0x4000
elif bitMaskTag == 'gri':
bitMask = 0x6FFC ## KiDS gri overlap
elif bitMaskTag == '9Band':
bitMask = 0x681C ## KiDS 9-band overlap
else:
raise ValueError('Bad bit mask option: \"%s\"' % bitMaskTag)
name = '%sKiDS-1000_GGLCATS/%s_z%d.fits' % (P.KiDSPath, surveyTag, zInd+1)
data = fits.getdata(name, 1)
print('Loaded \"%s\"' % name)
flag = data.field('KIDSMASK')
ind = np.logical_not(np.array(flag.astype(int) & bitMask, dtype=bool))
return data[ind]
## This function loads BOSS random catalogues & pour them onto a HEALPix map.
def saveFitsCountMap_BOSS(nside, bitMaskTag='wcs'):
P = Parameters()
nbPix = 12 * nside * nside
full = np.zeros(nbPix, dtype=int)
## Fill catalogues
for zInd in range(2):
data = loadFitsLenCat('BOSS_random', zInd, bitMaskTag=bitMaskTag)
RA = data.field('ALPHA_J2000')
DEC = data.field('DELTA_J2000')
pix = hpf.RADECToPatch(nside, RA, DEC)
for i in pix:
full[i] += 1
## Save
name = '%sKiDS-1000_for_mocks/countMap_BOSS_%s_nside%d.fits' % (P.KiDSPath, bitMaskTag, nside)
hpf.saveFitsFullMap(name, full, verbose=True)
return
def saveFitsCountMap_overlap(surveyTag_K, surveyTag_L, nside_L):
P = Parameters()
nside_K = 4096
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag_L, nside_L)
count_L = hpf.loadFitsFullMap(name)
count_L = hpf.increaseResolution(count_L, nside_K)
name = '%sKiDS-1000_for_mocks/mask_%s_fromArea_nside%d.fits' % (P.KiDSPath, surveyTag_K, nside_K)
mask_K = hpf.loadFitsFullMap(name)
ind = mask_K.astype(bool)
del mask_K
count_L[~ind] = 0
del ind
## Save
surveyTag_o = 'BOSS_KiDS_overlap' if 'BOSS' in surveyTag_L else '2dFLenS_KiDS_overlap'
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag_o, nside_K)
hpf.saveFitsFullMap(name, count_L)
del count_L
return
## 'BOSS_wcs' is called
def saveFitsMask_fromCountMap(surveyTag):
P = Parameters()
if surveyTag == 'BOSS_reduced':
nside = 2048
elif surveyTag == 'BOSS_wcs':
nside = 2048
elif surveyTag == '2dFLenS_SGP':
nside = 4096
elif surveyTag == '2dFLenS_wcs':
nside = 4096
else:
raise NotImplementedError('surveyTag = \"%s\" not implemented' % surveyTag)
name = '%sKiDS-1000_for_mocks/countMap_%s_nside%d.fits' % (P.KiDSPath, surveyTag, nside)
mask = hpf.loadFitsFullMap(name)
mask = np.fmin(mask, 1)
if nside == 2048:
nside2 = 4096
mask = hpf.increaseResolution(mask, nside2)
name = '%sKiDS-1000_for_mocks/mask_%s_fromCountMap2048_nside%d.fits' % (P.KiDSPath, surveyTag, nside2)
hpf.saveFitsFullMap(name, mask)
return
## Save
name = '%sKiDS-1000_for_mocks/mask_%s_fromCountMap_nside%d.fits' % (P.KiDSPath, surveyTag, nside)
hpf.saveFitsFullMap(name, mask)
return
# This function combines the 2dFLenS mask and BOSS mask into one
def saveFitsLensMask():
P = Parameters()
name = '%sKiDS-1000_for_mocks/mask_BOSS_wcs_fromCountMap2048_nside4096.fits' % P.KiDSPath
mask_B = hpf.loadFitsFullMap(name)
name = '%sKiDS-1000_for_mocks/mask_2dFLenS_wcs_fromCountMap_nside4096.fits' % P.KiDSPath
mask_2 = hpf.loadFitsFullMap(name)
mask_L = mask_B + mask_2
mask_L = np.fmin(mask_L, 1)
name = '%sKiDS-1000_for_mocks/mask_BOSS_2dFLenS_wcs_nside4096.fits' % P.KiDSPath
hpf.saveFitsFullMap(name, mask_L)
return
## Then I called the following & used the output of the 2nd line
## saveFitsCountMap_BOSS(2048, 'wcs') ## Need external
## saveFitsMask_fromCountMap('BOSS_wcs')
###############################################################################
|
[
"HEALPixFunctions.increaseResolution",
"HEALPixFunctions.loadFitsFullMap",
"HEALPixFunctions.saveFitsFullMap",
"astropy.io.fits.getdata",
"numpy.zeros",
"HEALPixFunctions.RADECToPatch",
"numpy.fmin"
] |
[((2281, 2302), 'astropy.io.fits.getdata', 'fits.getdata', (['name', '(1)'], {}), '(name, 1)\n', (2293, 2302), True, 'import astropy.io.fits as fits\n'), ((2653, 2679), 'numpy.zeros', 'np.zeros', (['nbPix'], {'dtype': 'int'}), '(nbPix, dtype=int)\n', (2661, 2679), True, 'import numpy as np\n'), ((3065, 3110), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'full'], {'verbose': '(True)'}), '(name, full, verbose=True)\n', (3084, 3110), True, 'import HEALPixFunctions as hpf\n'), ((3332, 3357), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (3351, 3357), True, 'import HEALPixFunctions as hpf\n'), ((3370, 3410), 'HEALPixFunctions.increaseResolution', 'hpf.increaseResolution', (['count_L', 'nside_K'], {}), '(count_L, nside_K)\n', (3392, 3410), True, 'import HEALPixFunctions as hpf\n'), ((3525, 3550), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (3544, 3550), True, 'import HEALPixFunctions as hpf\n'), ((3827, 3861), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'count_L'], {}), '(name, count_L)\n', (3846, 3861), True, 'import HEALPixFunctions as hpf\n'), ((4366, 4391), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (4385, 4391), True, 'import HEALPixFunctions as hpf\n'), ((4401, 4417), 'numpy.fmin', 'np.fmin', (['mask', '(1)'], {}), '(mask, 1)\n', (4408, 4417), True, 'import numpy as np\n'), ((4776, 4807), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'mask'], {}), '(name, mask)\n', (4795, 4807), True, 'import HEALPixFunctions as hpf\n'), ((5037, 5062), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (5056, 5062), True, 'import HEALPixFunctions as hpf\n'), ((5171, 5196), 'HEALPixFunctions.loadFitsFullMap', 'hpf.loadFitsFullMap', (['name'], {}), '(name)\n', (5190, 5196), True, 'import HEALPixFunctions as hpf\n'), ((5264, 5282), 'numpy.fmin', 'np.fmin', (['mask_L', '(1)'], {}), '(mask_L, 1)\n', (5271, 5282), True, 'import numpy as np\n'), ((5407, 5440), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'mask_L'], {}), '(name, mask_L)\n', (5426, 5440), True, 'import HEALPixFunctions as hpf\n'), ((2883, 2915), 'HEALPixFunctions.RADECToPatch', 'hpf.RADECToPatch', (['nside', 'RA', 'DEC'], {}), '(nside, RA, DEC)\n', (2899, 2915), True, 'import HEALPixFunctions as hpf\n'), ((4470, 4506), 'HEALPixFunctions.increaseResolution', 'hpf.increaseResolution', (['mask', 'nside2'], {}), '(mask, nside2)\n', (4492, 4506), True, 'import HEALPixFunctions as hpf\n'), ((4618, 4649), 'HEALPixFunctions.saveFitsFullMap', 'hpf.saveFitsFullMap', (['name', 'mask'], {}), '(name, mask)\n', (4637, 4649), True, 'import HEALPixFunctions as hpf\n')]
|
"""
Abinit workflows
"""
from __future__ import division, print_function
import sys
import os
import os.path
import shutil
import abc
import collections
import functools
import numpy as np
from pprint import pprint
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.design_patterns import Enum, AttrDict
from pymatgen.core.physical_constants import Bohr2Ang, Ang2Bohr, Ha2eV, Ha_eV, Ha2meV
from pymatgen.serializers.json_coders import MSONable, json_pretty_dump
from pymatgen.io.smartio import read_structure
from pymatgen.util.num_utils import iterator_from_slice, chunks
from pymatgen.io.abinitio.task import task_factory, Task
from .utils import abinit_output_iscomplete, File
from .netcdf import GSR_Reader
from .abiobjects import Smearing, AbiStructure, KSampling, Electrons
from .pseudos import Pseudo, PseudoDatabase, PseudoTable, get_abinit_psp_dir
from .strategies import ScfStrategy
from .task import RunMode
#import logging
#logger = logging.getLogger(__name__)
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
#__all__ = [
#]
################################################################################
def map_method(method):
"Decorator that calls item.method for all items in a iterable object."
@functools.wraps(method)
def wrapped(iter_obj, *args, **kwargs):
return [getattr(item, method.__name__)(*args, **kwargs)
for item in iter_obj]
return wrapped
################################################################################
class Product(object):
"""
A product represents a file produced by an AbinitTask instance, file
that is needed by another task in order to start the calculation.
"""
# TODO
# It would be nice to pass absolute paths to abinit with getden_path
# so that I can avoid creating symbolic links before running but
# the presence of the C-bindings complicates the implementation
# (gfortran SIGFAULTs if I add strings to dataset_type!
_ext2abivars = {
"_DEN": {"irdden": 1},
"_WFK": {"irdwfk": 1},
"_SCR": {"irdscr": 1},
"_QPS": {"irdqps": 1},
}
def __init__(self, ext, path):
self.ext = ext
self.file = File(path)
def __str__(self):
return "ext = %s, file = %s" % (self.ext, self.file)
def get_filepath(self):
return self.file.path
def get_abivars(self):
return self._ext2abivars[self.ext].copy()
class WorkLink(object):
"""
This object describes the dependencies among the tasks contained in a Work instance.
A WorkLink is a task that produces a list of products (files) that are
reused by the other tasks belonging to a Work instance.
One usually instantiates the object by calling work.register_task and produces_exts.
Example:
# Register the SCF task in work and get the link.
scf_link = work.register_task(scf_strategy)
# Register the NSCF calculation and its dependency on the SCF run.
nscf_link = work.register_task(nscf_strategy, links=scf_link.produces_exts("_DEN"))
"""
def __init__(self, task, exts=None):
"""
Args:
task:
The task associated to the link.
exts:
Extensions of the output files that are needed for running the other tasks.
"""
self._task = task
self._products = []
if exts is not None:
if isinstance(exts, str):
exts = [exts,]
for ext in exts:
prod = Product(ext, task.odata_path_from_ext(ext))
self._products.append(prod)
def __str__(self):
s = "%s: task %s with products\n %s" % (
self.__class__.__name__, repr(self._task), "\n".join(str(p) for p in self.products))
return s
@property
def products(self):
return self._products
def produces_exts(self, exts):
return WorkLink(self._task, exts=exts)
def get_abivars(self):
"""
Returns a dictionary with the abinit variables that must
be added to the input file in order to connect the two tasks.
"""
abivars = {}
for prod in self._products:
abivars.update(prod.get_abivars())
return abivars
def get_filepaths_and_exts(self):
"Returns the paths of the output files produced by self and its extensions"
filepaths = [prod.get_filepath() for prod in self._products]
exts = [prod.ext for prod in self._products]
return filepaths, exts
@property
def status(self):
"The status of the link, equivalent to the task status"
return self._task.status
################################################################################
class WorkflowError(Exception):
"Base class for the exceptions raised by Workflow objects"
class BaseWorkflow(object):
__metaclass__ = abc.ABCMeta
Error = WorkflowError
# interface modeled after subprocess.Popen
@abc.abstractproperty
def processes(self):
"Return a list of objects that support the subprocess.Popen protocol."
def poll(self):
"""
Check if all child processes have terminated. Set and return
returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode
attributes.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncpus_reserved(self):
"Returns the number of CPUs reserved in this moment."
ncpus = 0
for task in self:
if task.status in [task.S_SUB, task.S_RUN]:
ncpus += task.tot_ncpus
return ncpus
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or None if no task can be submitted at present"
Raises StopIteration if all tasks are done.
"""
for task in self:
# The task is ready to run if its status is S_READY and all the other links (if any) are done!
if (task.status == task.S_READY) and all([link_stat==task.S_DONE for link_stat in task.links_status]):
return task
# All the tasks are done so raise an exception that will be handled by the client code.
if all([task.status == task.S_DONE for task in self]):
raise StopIteration
# No task found, this usually happens when we have dependencies. Beware of possible deadlocks here!
return None
@abc.abstractmethod
def setup(self, *args, **kwargs):
"Method called before submitting the calculations."
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def get_results(self, *args, **kwargs):
"""
Method called once the calculations completes.
The base version returns a dictionary task_name : TaskResults for each task in self.
"""
return WorkFlowResults(task_results={task.name: task.results for task in self})
##########################################################################################
class WorkFlowResults(dict, MSONable):
"""
Dictionary used to store some of the results produce by a Task object
"""
_mandatory_keys = [
"task_results",
]
EXC_KEY = "_exceptions"
def __init__(self, *args, **kwargs):
super(WorkFlowResults, self).__init__(*args, **kwargs)
if self.EXC_KEY not in self:
self[self.EXC_KEY] = []
@property
def exceptions(self):
return self[self.EXC_KEY]
def push_exceptions(self, *exceptions):
for exc in exceptions:
newstr = str(exc)
if newstr not in self.exceptions:
self[self.EXC_KEY] += [newstr,]
def assert_valid(self):
"""
Returns empty string if results seem valid.
The try assert except trick allows one to get a string with info on the exception.
We use the += operator so that sub-classes can add their own message.
"""
# Validate tasks.
for tres in self.task_results:
self[self.EXC_KEY] += tres.assert_valid()
return self[self.EXC_KEY]
@property
def to_dict(self):
d = {k: v for k,v in self.items()}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
mydict = {k: v for k,v in d.items() if k not in ["@module", "@class",]}
return cls(mydict)
def json_dump(self, filename):
json_pretty_dump(self.to_dict, filename)
@classmethod
def json_load(cls, filename):
return cls.from_dict(json_load(filename))
##########################################################################################
class Workflow(BaseWorkflow, MSONable):
"""
A Workflow is a list of (possibly connected) tasks.
"""
Error = WorkflowError
#@classmethod
#def from_task(cls, task):
# "Build a Work instance from a task object"
# workdir, tail = os.path.dirname(task.workdir)
# new = cls(workdir, taks.runmode)
# new.register_task(task.input)
# return new
def __init__(self, workdir, runmode, **kwargs):
"""
Args:
workdir:
Path to the working directory.
runmode:
RunMode instance or string "sequential"
"""
self.workdir = os.path.abspath(workdir)
self.runmode = RunMode.asrunmode(runmode)
self._kwargs = kwargs
self._tasks = []
# Dict with the dependencies of each task, indexed by task.id
self._links_dict = collections.defaultdict(list)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def chunks(self, chunk_size):
"Yield successive chunks of tasks of lenght chunk_size."
for tasks in chunks(self, chunk_size):
yield tasks
def __getitem__(self, slice):
return self._tasks[slice]
def __repr__(self):
return "<%s at %s, workdir = %s>" % (self.__class__.__name__, id(self), str(self.workdir))
@property
def to_dict(self):
d = {"workdir": self.workdir,
"runmode": self.runmode.to_dict,
"kwargs" : self._kwargs,
}
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@staticmethod
def from_dict(d):
return Work(d["workdir"], d["runmode"], **d["kwargs"])
@property
def alldone(self):
return all([task.status == Task.S_DONE for task in self])
@property
def isnc(self):
"True if norm-conserving calculation"
return all(task.isnc for task in self)
@property
def ispaw(self):
"True if PAW calculation"
return all(task.ispaw for task in self)
def path_in_workdir(self, filename):
"Create the absolute path of filename in the workind directory."
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
#def show_inputs(self, stream=sys.stdout):
# lines = []
# app = lines.append
# width = 120
# for task in self:
# app("\n")
# app(repr(task))
# app("\ninput: %s" % task.input_file.path)
# app("\n")
# app(str(task.input))
# app(width*"=" + "\n")
# stream.write("\n".join(lines))
def register_task(self, strategy, links=()):
"""
Registers a new task:
- creates a new AbinitTask from the input strategy.
- adds the new task to the internal list, taking into account possible dependencies.
Returns: WorkLink object
"""
task_id = len(self) + 1
task_workdir = os.path.join(self.workdir, "task_" + str(task_id))
# Handle possible dependencies.
if links:
if not isinstance(links, collections.Iterable):
links = [links,]
# Create the new task (note the factory so that we create subclasses easily).
task = task_factory(strategy, task_workdir, self.runmode, task_id=task_id, links=links)
self._tasks.append(task)
if links:
self._links_dict[task_id].extend(links)
print("task_id %s neeeds\n %s" % (task_id, [str(l) for l in links]))
return WorkLink(task)
def build(self, *args, **kwargs):
"Creates the top level directory"
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
def get_status(self, only_highest_rank=False):
"Get the status of the tasks in self."
status_list = [task.status for task in self]
if only_highest_rank:
return max(status_list)
else:
return status_list
@property
def processes(self):
return [task.process for task in self]
def rmtree(self, *args, **kwargs):
"""
Remove all calculation files and directories.
Keyword arguments:
force: (False)
Do not ask confirmation.
verbose: (0)
Print message if verbose is not zero.
"""
if kwargs.pop('verbose', 0):
print('Removing directory tree: %s' % self.workdir)
shutil.rmtree(self.workdir)
def move(self, dst, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dst is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dst = os.path.join(os.path.dirname(self.workdir), dst)
shutil.move(self.workdir, dst)
def submit_tasks(self, *args, **kwargs):
"""
Submits the task in self.
"""
for task in self:
task.start(*args, **kwargs)
# FIXME
task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then the tasks are submitted.
Non-blocking call
"""
# Build dirs and files.
self.build(*args, **kwargs)
# Initial setup
self._setup(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(*args, **kwargs)
def read_etotal(self):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.alldone:
raise self.Error("Some task is still in running/submitted state")
etotal = []
for task in self:
# Open the GSR file and read etotal (Hartree)
with GSR_Reader(task.odata_path_from_ext("_GSR")) as ncdata:
etotal.append(ncdata.read_value("etotal"))
return etotal
################################################################################
class IterativeWork(Workflow):
"""
TODO
"""
__metaclass__ = abc.ABCMeta
def __init__(self, workdir, runmode, strategy_generator, max_niter=25):
"""
Args:
workdir:
Working directory.
strategy_generator:
Strategy generator.
max_niter:
Maximum number of iterations. A negative value or zero value
is equivalent to having an infinite number of iterations.
"""
super(IterativeWork, self).__init__(workdir, runmode)
self.strategy_generator = strategy_generator
self.max_niter = max_niter
def next_task(self):
"""
Generate and register a new task
Return: task object
"""
try:
next_strategy = next(self.strategy_generator)
except StopIteration:
raise StopIteration
self.register_task(next_strategy)
assert len(self) == self.niter
return self[-1]
def submit_tasks(self, *args, **kwargs):
"""
Run the tasks till self.exit_iteration says to exit or the number of iterations exceeds self.max_niter
Return dictionary with the final results
"""
self.niter = 1
while True:
if self.max_niter > 0 and self.niter > self.max_niter:
print("niter %d > max_niter %d" % (self.niter, self.max_niter))
break
try:
task = self.next_task()
except StopIteration:
break
# Start the task and block till completion.
task.start(*args, **kwargs)
task.wait()
data = self.exit_iteration(*args, **kwargs)
if data["exit"]:
break
self.niter += 1
@abc.abstractmethod
def exit_iteration(self, *args, **kwargs):
"""
Return a dictionary with the results produced at the given iteration.
The dictionary must contains an entry "converged" that evaluates to
True if the iteration should be stopped.
"""
##########################################################################################
def strictly_increasing(values):
return all(x<y for x, y in zip(values, values[1:]))
def strictly_decreasing(values):
return all(x>y for x, y in zip(values, values[1:]))
def non_increasing(values):
return all(x>=y for x, y in zip(values, values[1:]))
def non_decreasing(values):
return all(x<=y for x, y in zip(values, values[1:]))
def monotonic(values, mode="<", atol=1.e-8):
"""
Returns False if values are not monotonic (decreasing|increasing).
mode is "<" for a decreasing sequence, ">" for an increasing sequence.
Two numbers are considered equal if they differ less that atol.
.. warning:
Not very efficient for large data sets.
>>> values = [1.2, 1.3, 1.4]
>>> monotonic(values, mode="<")
False
>>> monotonic(values, mode=">")
True
"""
if len(values) == 1:
return True
if mode == ">":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp <= v:
return False
elif mode == "<":
for i in range(len(values)-1):
v, vp = values[i], values[i+1]
if abs(vp - v) > atol and vp >= v:
return False
else:
raise ValueError("Wrong mode %s" % mode)
return True
def check_conv(values, tol, min_numpts=1, mode="abs", vinf=None):
"""
Given a list of values and a tolerance tol, returns the leftmost index for which
abs(value[i] - vinf) < tol if mode == "abs"
or
abs(value[i] - vinf) / vinf < tol if mode == "rel"
returns -1 if convergence is not achieved. By default, vinf = values[-1]
Args:
tol:
Tolerance
min_numpts:
Minimum number of points that must be converged.
mode:
"abs" for absolute convergence, "rel" for relative convergence.
vinf:
Used to specify an alternative value instead of values[-1].
"""
vinf = values[-1] if vinf is None else vinf
if mode == "abs":
vdiff = [abs(v - vinf) for v in values]
elif mode == "rel":
vdiff = [abs(v - vinf) / vinf for v in values]
else:
raise ValueError("Wrong mode %s" % mode)
numpts = len(vdiff)
i = -2
if (numpts > min_numpts) and vdiff[-2] < tol:
for i in range(numpts-1, -1, -1):
if vdiff[i] > tol:
break
if (numpts - i -1) < min_numpts: i = -2
return i + 1
def compute_hints(ecut_list, etotal, atols_mev, pseudo, min_numpts=1, stream=sys.stdout):
de_low, de_normal, de_high = [a / (1000 * Ha_eV) for a in atols_mev]
num_ene = len(etotal)
etotal_inf = etotal[-1]
ihigh = check_conv(etotal, de_high, min_numpts=min_numpts)
inormal = check_conv(etotal, de_normal)
ilow = check_conv(etotal, de_low)
accidx = {"H": ihigh, "N": inormal, "L": ilow}
table = []
app = table.append
app(["iter", "ecut", "etotal", "et-e_inf [meV]", "accuracy",])
for idx, (ec, et) in enumerate(zip(ecut_list, etotal)):
line = "%d %.1f %.7f %.3f" % (idx, ec, et, (et-etotal_inf)* Ha_eV * 1.e+3)
row = line.split() + ["".join(c for c,v in accidx.items() if v == idx)]
app(row)
if stream is not None:
from pymatgen.util.string_utils import pprint_table
stream.write("pseudo: %s\n" % pseudo.name)
pprint_table(table, out=stream)
ecut_high, ecut_normal, ecut_low = 3 * (None,)
exit = (ihigh != -1)
if exit:
ecut_low = ecut_list[ilow]
ecut_normal = ecut_list[inormal]
ecut_high = ecut_list[ihigh]
aug_ratios = [1,]
aug_ratio_low, aug_ratio_normal, aug_ratio_high = 3 * (1,)
data = {
"exit" : ihigh != -1,
"etotal" : list(etotal),
"ecut_list" : ecut_list,
"aug_ratios" : aug_ratios,
"low" : {"ecut": ecut_low, "aug_ratio": aug_ratio_low},
"normal" : {"ecut": ecut_normal, "aug_ratio": aug_ratio_normal},
"high" : {"ecut": ecut_high, "aug_ratio": aug_ratio_high},
"pseudo_name": pseudo.name,
"pseudo_path": pseudo.path,
"atols_mev" : atols_mev,
"dojo_level" : 0,
}
return data
##########################################################################################
def plot_etotal(ecut_list, etotals, aug_ratios, show=True, savefig=None, *args, **kwargs):
"""
Uses Matplotlib to plot the energy curve as function of ecut
Args:
ecut_list:
List of cutoff energies
etotals:
Total energies in Hartree, see aug_ratios
aug_ratios:
List augmentation rations. [1,] for norm-conserving, [4, ...] for PAW
The number of elements in aug_ration must equal the number of (sub)lists
in etotals. Example:
- NC: etotals = [3.4, 4,5 ...], aug_ratios = [1,]
- PAW: etotals = [[3.4, ...], [3.6, ...]], aug_ratios = [4,6]
show:
True to show the figure
savefig:
'abc.png' or 'abc.eps'* to save the figure to a file.
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
npts = len(ecut_list)
if len(aug_ratios) != 1 and len(aug_ratios) != len(etotals):
raise ValueError("The number of sublists in etotal must equal the number of aug_ratios")
if len(aug_ratios) == 1:
etotals = [etotals,]
lines, legends = [], []
emax = -np.inf
for (aratio, etot) in zip(aug_ratios, etotals):
emev = Ha2meV(etot)
emev_inf = npts * [emev[-1]]
yy = emev - emev_inf
emax = max(emax, np.max(yy))
line, = ax.plot(ecut_list, yy, "-->", linewidth=3.0, markersize=10)
lines.append(line)
legends.append("aug_ratio = %s" % aratio)
ax.legend(lines, legends, 'upper right', shadow=True)
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Ecut [Ha]")
ax.set_ylabel("$\Delta$ Etotal [meV]")
ax.set_xticks(ecut_list)
#ax.yaxis.set_view_interval(-10, emax + 0.01 * abs(emax))
ax.yaxis.set_view_interval(-10, 20)
ax.set_title("$\Delta$ Etotal Vs Ecut")
if show:
plt.show()
if savefig is not None:
fig.savefig(savefig)
##########################################################################################
class PseudoConvergence(Workflow):
def __init__(self, workdir, pseudo, ecut_list, atols_mev,
runmode="sequential", spin_mode="polarized", acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV",):
super(PseudoConvergence, self).__init__(workdir, runmode)
# Temporary object used to build the strategy.
generator = PseudoIterativeConvergence(workdir, pseudo, ecut_list, atols_mev,
spin_mode = spin_mode,
acell = acell,
smearing = smearing,
max_niter = len(ecut_list),
)
self.atols_mev = atols_mev
self.pseudo = Pseudo.aspseudo(pseudo)
self.ecut_list = []
for ecut in ecut_list:
strategy = generator.strategy_with_ecut(ecut)
self.ecut_list.append(ecut)
self.register_task(strategy)
def get_results(self, *args, **kwargs):
# Get the results of the tasks.
wf_results = super(PseudoConvergence, self).get_results()
etotal = self.read_etotal()
data = compute_hints(self.ecut_list, etotal, self.atols_mev, self.pseudo)
plot_etotal(data["ecut_list"], data["etotal"], data["aug_ratios"],
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(etotal, mode="<", atol=1.0e-5):
print("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing")
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
class PseudoIterativeConvergence(IterativeWork):
def __init__(self, workdir, pseudo, ecut_list_or_slice, atols_mev,
runmode="sequential", spin_mode="polarized", acell=(8, 9, 10), smearing="fermi_dirac:0.1 eV", max_niter=50,):
"""
Args:
workdir:
Working directory.
pseudo:
string or Pseudo instance
ecut_list_or_slice:
List of cutoff energies or slice object (mainly used for infinite iterations).
atols_mev:
List of absolute tolerances in meV (3 entries corresponding to accuracy ["low", "normal", "high"]
spin_mode:
Defined how the electronic spin will be treated.
acell:
Lengths of the periodic box in Bohr.
smearing:
Smearing instance or string in the form "mode:tsmear". Default: FemiDirac with T=0.1 eV
"""
self.pseudo = Pseudo.aspseudo(pseudo)
self.atols_mev = atols_mev
self.spin_mode = spin_mode
self.smearing = Smearing.assmearing(smearing)
self.acell = acell
if isinstance(ecut_list_or_slice, slice):
self.ecut_iterator = iterator_from_slice(ecut_list_or_slice)
else:
self.ecut_iterator = iter(ecut_list_or_slice)
# Construct a generator that returns strategy objects.
def strategy_generator():
for ecut in self.ecut_iterator:
yield self.strategy_with_ecut(ecut)
super(PseudoIterativeConvergence, self).__init__(
workdir, runmode, strategy_generator(), max_niter=max_niter)
if not self.isnc:
raise NotImplementedError("PAW convergence tests are not supported yet")
def strategy_with_ecut(self, ecut):
"Return a Strategy instance with given cutoff energy ecut"
# Define the system: one atom in a box of lenghts acell.
boxed_atom = AbiStructure.boxed_atom(self.pseudo, acell=self.acell)
# Gamma-only sampling.
gamma_only = KSampling.gamma_only()
# Setup electrons.
electrons = Electrons(spin_mode=self.spin_mode, smearing=self.smearing)
# Don't write WFK files.
extra_abivars = {
"ecut" : ecut,
"prtwf": 0,
}
strategy = ScfStrategy(boxed_atom, self.pseudo, gamma_only,
spin_mode=self.spin_mode, smearing=self.smearing,
charge=0.0, scf_algorithm=None,
use_symmetries=True, **extra_abivars)
return strategy
@property
def ecut_list(self):
"""The list of cutoff energies computed so far"""
return [float(task.strategy.ecut) for task in self]
def check_etotal_convergence(self, *args, **kwargs):
return compute_hints(self.ecut_list, self.read_etotal(), self.atols_mev,
self.pseudo)
def exit_iteration(self, *args, **kwargs):
return self.check_etotal_convergence(self, *args, **kwargs)
def get_results(self, *args, **kwargs):
# Get the results of the tasks.
wf_results = super(PseudoIterativeConvergence, self).get_results()
data = self.check_etotal_convergence()
plot_etotal(data["ecut_list"], data["etotal"], data["aug_ratios"],
show=False, savefig=self.path_in_workdir("etotal.pdf"))
wf_results.update(data)
if not monotonic(data["etotal"], mode="<", atol=1.0e-5):
print("E(ecut) is not decreasing")
wf_results.push_exceptions("E(ecut) is not decreasing")
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
return wf_results
################################################################################
class BandStructure(Workflow):
def __init__(self, workdir, runmode, scf_strategy, nscf_strategy,
dos_strategy=None):
super(BandStructure, self).__init__(workdir, runmode)
# Register the GS-SCF run.
scf_link = self.register_task(scf_strategy)
# Register the NSCF run and its dependency
self.register_task(nscf_strategy, links=scf_link.produces_exts("_DEN"))
# Add DOS computation
if dos_strategy is not None:
self.register_task(dos_strategy,
links=scf_link.produces_exts("_DEN"))
################################################################################
class Relaxation(Workflow):
def __init__(self, workdir, runmode, relax_strategy):
super(Relaxation, self).__init__(workdir, runmode)
link = self.register_task(relax_strategy)
################################################################################
class DeltaTest(Workflow):
def __init__(self, workdir, runmode, structure_or_cif, pseudos, kppa,
spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
accuracy="normal",
ecut=None, ecutsm=0.05, chksymbreak=0): # FIXME Hack
super(DeltaTest, self).__init__(workdir, runmode)
if isinstance(structure_or_cif, Structure):
structure = structure_or_cif
else:
# Assume CIF file
structure = read_structure(structure_or_cif)
structure = AbiStructure.asabistructure(structure)
smearing = Smearing.assmearing(smearing)
self._input_structure = structure
v0 = structure.volume
self.volumes = v0 * np.arange(90, 112, 2) / 100.
for vol in self.volumes:
new_lattice = structure.lattice.scale(vol)
new_structure = Structure(new_lattice, structure.species,
structure.frac_coords)
new_structure = AbiStructure.asabistructure(new_structure)
extra_abivars = {
"ecutsm": ecutsm,
"prtwf" : 0,
}
if ecut is not None:
extra_abivars.update({"ecut": ecut})
ksampling = KSampling.automatic_density(new_structure, kppa,
chksymbreak=chksymbreak)
scf_strategy = ScfStrategy(new_structure, pseudos, ksampling,
accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, **extra_abivars)
self.register_task(scf_strategy)
def get_results(self, *args, **kwargs):
num_sites = self._input_structure.num_sites
etotal = Ha2eV(self.read_etotal())
wf_results = super(DeltaTest, self).get_results()
wf_results.update({
"etotal" : list(etotal),
"volumes" : list(self.volumes),
"natom" : num_sites,
"dojo_level": 1,
})
from .eos import EOS
try:
eos_fit = EOS.Murnaghan().fit(self.volumes, etotal)
print(eos_fit)
eos_fit.plot(show=False, savefig=self.path_in_workdir("eos.pdf"))
wf_results.update({
"v0": eos_fit.v0,
"b" : eos_fit.b,
"bp": eos_fit.bp,
})
except EOS.Error as exc:
wf_results.push_exceptions(exc)
if kwargs.get("json_dump", True):
wf_results.json_dump(self.path_in_workdir("results.json"))
# Write data for the computation of the delta factor
with open(self.path_in_workdir("deltadata.txt"), "w") as fh:
fh.write("# Volume/natom [Ang^3] Etotal/natom [eV]\n")
for (v, e) in zip(self.volumes, etotal):
fh.write("%s %s\n" % (v/num_sites, e/num_sites))
return wf_results
################################################################################
class GW_Workflow(Workflow):
def __init__(self, workdir, runmode, scf_strategy, nscf_strategy,
scr_strategy, sigma_strategy):
"""
Workflow for GW calculations.
Args:
workdir:
Working directory of the calculation.
runmode:
Run mode.
scf_strategy:
SCFStrategy instance
nscf_strategy:
NSCFStrategy instance
scr_strategy:
Strategy for the screening run.
sigma_strategy:
Strategy for the self-energy run.
"""
super(GW_Workflow, self).__init__(workdir, runmode)
# Register the GS-SCF run.
scf_link = self.register_task(scf_strategy)
# Construct the input for the NSCF run.
nscf_link = self.register_task(nscf_strategy,
links=scf_link.produces_exts("_DEN"))
# Register the SCR run.
screen_link = self.register_task(scr_strategy,
links=nscf_link.produces_exts("_WFK"))
# Register the SIGMA run.
sigma_links = [nscf_link.produces_exts("_WFK"),
screen_link.produces_exts("_SCR"),]
self.register_task(sigma_strategy, links=sigma_links)
################################################################################
|
[
"pymatgen.core.physical_constants.Ha2meV",
"pymatgen.util.num_utils.chunks",
"pymatgen.core.structure.Structure",
"pymatgen.util.num_utils.iterator_from_slice",
"numpy.arange",
"os.path.exists",
"shutil.move",
"pymatgen.serializers.json_coders.json_pretty_dump",
"functools.wraps",
"numpy.max",
"pymatgen.util.string_utils.pprint_table",
"os.path.dirname",
"pymatgen.io.smartio.read_structure",
"pymatgen.io.abinitio.task.task_factory",
"matplotlib.pyplot.show",
"os.makedirs",
"os.path.join",
"matplotlib.pyplot.figure",
"collections.defaultdict",
"shutil.rmtree",
"os.path.abspath"
] |
[((1455, 1478), 'functools.wraps', 'functools.wraps', (['method'], {}), '(method)\n', (1470, 1478), False, 'import functools\n'), ((24139, 24151), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24149, 24151), True, 'import matplotlib.pyplot as plt\n'), ((9780, 9820), 'pymatgen.serializers.json_coders.json_pretty_dump', 'json_pretty_dump', (['self.to_dict', 'filename'], {}), '(self.to_dict, filename)\n', (9796, 9820), False, 'from pymatgen.serializers.json_coders import MSONable, json_pretty_dump\n'), ((10671, 10695), 'os.path.abspath', 'os.path.abspath', (['workdir'], {}), '(workdir)\n', (10686, 10695), False, 'import os\n'), ((10902, 10931), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (10925, 10931), False, 'import collections\n'), ((11172, 11196), 'pymatgen.util.num_utils.chunks', 'chunks', (['self', 'chunk_size'], {}), '(self, chunk_size)\n', (11178, 11196), False, 'from pymatgen.util.num_utils import iterator_from_slice, chunks\n'), ((12285, 12321), 'os.path.join', 'os.path.join', (['self.workdir', 'filename'], {}), '(self.workdir, filename)\n', (12297, 12321), False, 'import os\n'), ((13529, 13614), 'pymatgen.io.abinitio.task.task_factory', 'task_factory', (['strategy', 'task_workdir', 'self.runmode'], {'task_id': 'task_id', 'links': 'links'}), '(strategy, task_workdir, self.runmode, task_id=task_id, links=links\n )\n', (13541, 13614), False, 'from pymatgen.io.abinitio.task import task_factory, Task\n'), ((14745, 14772), 'shutil.rmtree', 'shutil.rmtree', (['self.workdir'], {}), '(self.workdir)\n', (14758, 14772), False, 'import shutil\n'), ((15353, 15383), 'shutil.move', 'shutil.move', (['self.workdir', 'dst'], {}), '(self.workdir, dst)\n', (15364, 15383), False, 'import shutil\n'), ((22332, 22363), 'pymatgen.util.string_utils.pprint_table', 'pprint_table', (['table'], {'out': 'stream'}), '(table, out=stream)\n', (22344, 22363), False, 'from pymatgen.util.string_utils import pprint_table\n'), ((24549, 24561), 'pymatgen.core.physical_constants.Ha2meV', 'Ha2meV', (['etot'], {}), '(etot)\n', (24555, 24561), False, 'from pymatgen.core.physical_constants import Bohr2Ang, Ang2Bohr, Ha2eV, Ha_eV, Ha2meV\n'), ((25201, 25211), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25209, 25211), True, 'import matplotlib.pyplot as plt\n'), ((13923, 13951), 'os.path.exists', 'os.path.exists', (['self.workdir'], {}), '(self.workdir)\n', (13937, 13951), False, 'import os\n'), ((13965, 13990), 'os.makedirs', 'os.makedirs', (['self.workdir'], {}), '(self.workdir)\n', (13976, 13990), False, 'import os\n'), ((24654, 24664), 'numpy.max', 'np.max', (['yy'], {}), '(yy)\n', (24660, 24664), True, 'import numpy as np\n'), ((28390, 28429), 'pymatgen.util.num_utils.iterator_from_slice', 'iterator_from_slice', (['ecut_list_or_slice'], {}), '(ecut_list_or_slice)\n', (28409, 28429), False, 'from pymatgen.util.num_utils import iterator_from_slice, chunks\n'), ((32513, 32545), 'pymatgen.io.smartio.read_structure', 'read_structure', (['structure_or_cif'], {}), '(structure_or_cif)\n', (32527, 32545), False, 'from pymatgen.io.smartio import read_structure\n'), ((32907, 32971), 'pymatgen.core.structure.Structure', 'Structure', (['new_lattice', 'structure.species', 'structure.frac_coords'], {}), '(new_lattice, structure.species, structure.frac_coords)\n', (32916, 32971), False, 'from pymatgen.core.structure import Structure\n'), ((15308, 15337), 'os.path.dirname', 'os.path.dirname', (['self.workdir'], {}), '(self.workdir)\n', (15323, 15337), False, 'import os\n'), ((32759, 32780), 'numpy.arange', 'np.arange', (['(90)', '(112)', '(2)'], {}), '(90, 112, 2)\n', (32768, 32780), True, 'import numpy as np\n')]
|
import numpy as np
import cv2
import glob
import itertools
import os
from tqdm import tqdm
from ..models.config import IMAGE_ORDERING
from .augmentation import augment_seg
import random
random.seed(0)
class_colors = [ ( random.randint(0,255),random.randint(0,255),random.randint(0,255) ) for _ in range(5000) ]
def get_pairs_from_paths( images_path , segs_path ):
images = glob.glob( os.path.join(images_path,"*.png") ) + glob.glob( os.path.join(images_path,"*.jpg") ) + glob.glob( os.path.join(images_path,"*.jpeg") )
segmentations = glob.glob( os.path.join(segs_path,"*.png") )
segmentations_d = dict( zip(segmentations,segmentations ))
ret = []
for im in images:
seg_bnme = os.path.basename(im).replace(".jpg" , ".png").replace(".jpeg" , ".png")
seg = os.path.join( segs_path , seg_bnme )
#this line i have commented as error was showing
#assert ( seg in segmentations_d ), (im + " is present in "+images_path +" but "+seg_bnme+" is not found in "+segs_path + " . Make sure annotation image are in .png" )
ret.append((im , seg) )
return ret
def get_image_arr( path , width , height , imgNorm="sub_mean" , odering='channels_first' ):
if type( path ) is np.ndarray:
img = path
else:
img = cv2.imread(path, 1)
if imgNorm == "sub_and_divide":
img = np.float32(cv2.resize(img, ( width , height ))) / 127.5 - 1
elif imgNorm == "sub_mean":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img[:,:,0] -= 103.939
img[:,:,1] -= 116.779
img[:,:,2] -= 123.68
img = img[ : , : , ::-1 ]
elif imgNorm == "divide":
img = cv2.resize(img, ( width , height ))
img = img.astype(np.float32)
img = img/255.0
if odering == 'channels_first':
img = np.rollaxis(img, 2, 0)
return img
def get_segmentation_arr( path , nClasses , width , height , no_reshape=False ):
seg_labels = np.zeros(( height , width , nClasses ))
if type( path ) is np.ndarray:
img = path
else:
img = cv2.imread(path, 1)
img = cv2.resize(img, ( width , height ) , interpolation=cv2.INTER_NEAREST )
img = img[:, : , 0]
for c in range(nClasses):
seg_labels[: , : , c ] = (img == c ).astype(int)
if no_reshape:
return seg_labels
seg_labels = np.reshape(seg_labels, ( width*height , nClasses ))
return seg_labels
def verify_segmentation_dataset( images_path , segs_path , n_classes ):
img_seg_pairs = get_pairs_from_paths( images_path , segs_path )
assert len(img_seg_pairs)>0 , "Dataset looks empty or path is wrong "
for im_fn , seg_fn in tqdm(img_seg_pairs) :
img = cv2.imread( im_fn )
seg = cv2.imread( seg_fn )
assert ( img.shape[0]==seg.shape[0] and img.shape[1]==seg.shape[1] ) , "The size of image and the annotation does not match or they are corrupt "+ im_fn + " " + seg_fn
assert ( np.max(seg[:,:,0]) < n_classes) , "The pixel values of seg image should be from 0 to "+str(n_classes-1) + " . Found pixel value "+str(np.max(seg[:,:,0]))
print("Dataset verified! ")
def image_segmentation_generator( images_path , segs_path , batch_size, n_classes , input_height , input_width , output_height , output_width , do_augment=False ):
img_seg_pairs = get_pairs_from_paths( images_path , segs_path )
random.shuffle( img_seg_pairs )
zipped = itertools.cycle( img_seg_pairs )
while True:
X = []
Y = []
for _ in range( batch_size) :
im , seg = next(zipped)
im = cv2.imread(im , 1 )
seg = cv2.imread(seg , 1 )
if do_augment:
img , seg[:,:,0] = augment_seg( img , seg[:,:,0] )
X.append( get_image_arr(im , input_width , input_height ,odering=IMAGE_ORDERING ) )
Y.append( get_segmentation_arr( seg , n_classes , output_width , output_height ) )
yield np.array(X) , np.array(Y)
|
[
"itertools.cycle",
"numpy.reshape",
"random.shuffle",
"tqdm.tqdm",
"os.path.join",
"numpy.rollaxis",
"random.seed",
"numpy.max",
"numpy.array",
"numpy.zeros",
"os.path.basename",
"cv2.resize",
"cv2.imread",
"random.randint"
] |
[((189, 203), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (200, 203), False, 'import random\n'), ((1887, 1922), 'numpy.zeros', 'np.zeros', (['(height, width, nClasses)'], {}), '((height, width, nClasses))\n', (1895, 1922), True, 'import numpy as np\n'), ((2020, 2085), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img, (width, height), interpolation=cv2.INTER_NEAREST)\n', (2030, 2085), False, 'import cv2\n'), ((2246, 2296), 'numpy.reshape', 'np.reshape', (['seg_labels', '(width * height, nClasses)'], {}), '(seg_labels, (width * height, nClasses))\n', (2256, 2296), True, 'import numpy as np\n'), ((2558, 2577), 'tqdm.tqdm', 'tqdm', (['img_seg_pairs'], {}), '(img_seg_pairs)\n', (2562, 2577), False, 'from tqdm import tqdm\n'), ((3241, 3270), 'random.shuffle', 'random.shuffle', (['img_seg_pairs'], {}), '(img_seg_pairs)\n', (3255, 3270), False, 'import random\n'), ((3283, 3313), 'itertools.cycle', 'itertools.cycle', (['img_seg_pairs'], {}), '(img_seg_pairs)\n', (3298, 3313), False, 'import itertools\n'), ((224, 246), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (238, 246), False, 'import random\n'), ((246, 268), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (260, 268), False, 'import random\n'), ((268, 290), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (282, 290), False, 'import random\n'), ((565, 597), 'os.path.join', 'os.path.join', (['segs_path', '"""*.png"""'], {}), "(segs_path, '*.png')\n", (577, 597), False, 'import os\n'), ((790, 823), 'os.path.join', 'os.path.join', (['segs_path', 'seg_bnme'], {}), '(segs_path, seg_bnme)\n', (802, 823), False, 'import os\n'), ((1262, 1281), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (1272, 1281), False, 'import cv2\n'), ((1749, 1771), 'numpy.rollaxis', 'np.rollaxis', (['img', '(2)', '(0)'], {}), '(img, 2, 0)\n', (1760, 1771), True, 'import numpy as np\n'), ((1992, 2011), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (2002, 2011), False, 'import cv2\n'), ((2588, 2605), 'cv2.imread', 'cv2.imread', (['im_fn'], {}), '(im_fn)\n', (2598, 2605), False, 'import cv2\n'), ((2616, 2634), 'cv2.imread', 'cv2.imread', (['seg_fn'], {}), '(seg_fn)\n', (2626, 2634), False, 'import cv2\n'), ((497, 532), 'os.path.join', 'os.path.join', (['images_path', '"""*.jpeg"""'], {}), "(images_path, '*.jpeg')\n", (509, 532), False, 'import os\n'), ((1421, 1453), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (1431, 1453), False, 'import cv2\n'), ((2819, 2839), 'numpy.max', 'np.max', (['seg[:, :, 0]'], {}), '(seg[:, :, 0])\n', (2825, 2839), True, 'import numpy as np\n'), ((3418, 3435), 'cv2.imread', 'cv2.imread', (['im', '(1)'], {}), '(im, 1)\n', (3428, 3435), False, 'import cv2\n'), ((3447, 3465), 'cv2.imread', 'cv2.imread', (['seg', '(1)'], {}), '(seg, 1)\n', (3457, 3465), False, 'import cv2\n'), ((396, 430), 'os.path.join', 'os.path.join', (['images_path', '"""*.png"""'], {}), "(images_path, '*.png')\n", (408, 430), False, 'import os\n'), ((446, 480), 'os.path.join', 'os.path.join', (['images_path', '"""*.jpg"""'], {}), "(images_path, '*.jpg')\n", (458, 480), False, 'import os\n'), ((1622, 1654), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (1632, 1654), False, 'import cv2\n'), ((2953, 2973), 'numpy.max', 'np.max', (['seg[:, :, 0]'], {}), '(seg[:, :, 0])\n', (2959, 2973), True, 'import numpy as np\n'), ((3727, 3738), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3735, 3738), True, 'import numpy as np\n'), ((3741, 3752), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3749, 3752), True, 'import numpy as np\n'), ((1335, 1367), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (1345, 1367), False, 'import cv2\n'), ((710, 730), 'os.path.basename', 'os.path.basename', (['im'], {}), '(im)\n', (726, 730), False, 'import os\n')]
|
# coding: utf-8
#! /usr/bin/env python
# FrequencyJumpLibrary
import numpy as np
from scipy import stats
import math as math
def KM (y, delta_t=1, Moments = [1,2,4,6,8], bandwidth = 1.5, Lowerbound = False, Upperbound = False, Kernel = 'Epanechnikov'): #Kernel-based Regression
Moments = [0] + Moments
length=len(Moments)
n = 5000
Mn = int(n * bandwidth / 10) #Minor n
res = np.zeros([n + Mn, length])
# Epanechnikov kernel: 3/4(1 - x²), x=-1 to x=1
# #Uniform kernel: 1/2, , x=-1 to x=1
Kernel = (3 * (1 - (np.linspace(-1 * bandwidth, 1 * bandwidth, Mn) / bandwidth) ** 2)) / (4 * bandwidth) # Kernel1 = ones([Mn]) / (2 * bandwidth)
yDist = y[1:] - y[:-1]
if (Lowerbound == False):
Min = min(y)
else:
Min = Lowerbound
if (Upperbound == False):
Max = max(y)
else:
Max = Upperbound
space = np.linspace(Min, Max, n + Mn)
b = ((((y[:-1]-Min) / (abs(Max - Min))) * (n))).astype(int)
trueb = np.unique(b[(b>=0)*(b<n)])
for i in trueb:
r = yDist[b==i]
for l in range(length):
res[i:i + Mn, l] += Kernel * (sum(r ** Moments[l]))
res[:, 0][res[:, 0]==0]=1.
for l in range(length-1):
res[:, l+1] = np.divide(res[:, l+1],(res[:, 0] * math.factorial(Moments[l+1]) * (delta_t)))
return res, space
|
[
"math.factorial",
"numpy.zeros",
"numpy.linspace",
"numpy.unique"
] |
[((437, 463), 'numpy.zeros', 'np.zeros', (['[n + Mn, length]'], {}), '([n + Mn, length])\n', (445, 463), True, 'import numpy as np\n'), ((943, 972), 'numpy.linspace', 'np.linspace', (['Min', 'Max', '(n + Mn)'], {}), '(Min, Max, n + Mn)\n', (954, 972), True, 'import numpy as np\n'), ((1049, 1081), 'numpy.unique', 'np.unique', (['b[(b >= 0) * (b < n)]'], {}), '(b[(b >= 0) * (b < n)])\n', (1058, 1081), True, 'import numpy as np\n'), ((1348, 1378), 'math.factorial', 'math.factorial', (['Moments[l + 1]'], {}), '(Moments[l + 1])\n', (1362, 1378), True, 'import math as math\n'), ((587, 633), 'numpy.linspace', 'np.linspace', (['(-1 * bandwidth)', '(1 * bandwidth)', 'Mn'], {}), '(-1 * bandwidth, 1 * bandwidth, Mn)\n', (598, 633), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 10:58:27 2020
Experiments where one marginal is fixed
"""
import os
import numpy as np
from joblib import Parallel, delayed
import torch
import ot
from unbalancedgw.batch_stable_ugw_solver import log_batch_ugw_sinkhorn
from unbalancedgw._batch_utils import compute_batch_flb_plan
import utils
from partial_gw import compute_cost_matrices
folder = "marginals_without_rescaling"
path = os.getcwd() + "/saved_plans"
if not os.path.isdir(path):
os.mkdir(path)
path = path + "/" + folder
if not os.path.isdir(path):
os.mkdir(path)
def euclid_dist(x, y):
"""
Computes the euclidean distance between two pointclouds, returning a
matrix whose coordinates are the distance between two points.
Parameters
----------
x: torch.Tensor of size [size_X, dim]
coordinates of the first group of vectors of R^d.
y: torch.Tensor of size [size_Y, dim]
coordinates of the second group of vectors of R^d.
Returns
-------
torch.Tensor of size [size_X, size_Y]
Matrix of all pairwise distances.
"""
return (x[:, None, :] - y[None, :, :]).norm(p=2, dim=2)
def prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl, prior, nb_try):
"""
Compute the tensor used as initialization for UGW.
The init is obtained by solving partial EMD as in Chapel et al. when the
domains are the same.
Parameters
----------
dataset_p: string
name of the dataset used for positive data
dataset_u: string
name of the dataset used for unlabeled data
n_pos: int
number of positives samples
n_unl: int
number of unlabeled samples
prior: float
proportion of positive samples in the unlabeled dataset
nb_try: int
number of folds to perform PU learning
Returns
-------
init_plan: torch.Tensor of size [nb_try, n_pos, n_unl]
Set of initialization plans used to init UGW.
"""
init_plan = torch.zeros([nb_try, n_pos, n_unl])
for i in range(nb_try):
# Draw dataset
P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,
n_unl, prior, seed_nb=i)
Ctot, C1, C2, mu, nu = compute_cost_matrices(P, U, prior,
nb_dummies=10)
# Compute init
init_plan[i] = torch.tensor(ot.emd(mu, nu, Ctot)[:n_pos, :])
return init_plan
def compute_plan_ugw(dataset_p, dataset_u, n_pos, n_unl, prior, eps, rho, rho2,
nb_try, device=0):
# Set default type and GPU device
torch.cuda.set_device(device)
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# keep constant to normalize cost, uniform over folds by taking first batch
# P, U, _ = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos, n_unl,
# prior, 0)
# U = torch.tensor(U.values,dtype=torch.float) # Convert to torch
# cst_norm = euclid_dist(U, U).max()
# Draw cost for all seeds as batch
Cx = torch.zeros([nb_try, n_pos, n_pos])
Cy = torch.zeros([nb_try, n_unl, n_unl])
for i in range(nb_try):
P, U, y_u = utils.draw_p_u_dataset_scar(dataset_p, dataset_u, n_pos,
n_unl, prior, seed_nb=i)
P, U = torch.tensor(P.values, dtype=torch.float), \
torch.tensor(U.values, dtype=torch.float)
cx, cy = euclid_dist(P, P), euclid_dist(U, U)
Cx[i], Cy[i] = cx, cy
# Cx[i], Cy[i] = cx / cst_norm, cy / cst_norm
del cx, cy
# Compute init and weights
mu = (torch.ones([n_pos]) / n_pos).expand(nb_try, -1)
nu = (torch.ones([n_unl]) / n_unl).expand(nb_try, -1)
if P.shape[1] == U.shape[1]: # If domains are the same
init_plan = prepare_initialisation(dataset_p, dataset_u, n_pos, n_unl,
prior, nb_try)
else:
_, _, init_plan = compute_batch_flb_plan(
mu, Cx, nu, Cy, eps=eps, rho=rho, rho2=rho2,
nits_sinkhorn=50000, tol_sinkhorn=1e-5)
# Compute the marginal of init and save as file
pi_numpy = init_plan.sum(dim=1).cpu().data.numpy()
fname = f'/ugw_init_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \
f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'
np.save(path + fname, pi_numpy)
# Set params and start the grid wrt entropic param eps
pi = log_batch_ugw_sinkhorn(mu, Cx, nu, Cy, init=init_plan,
eps=eps, rho=rho, rho2=rho2,
nits_plan=3000, tol_plan=1e-5,
nits_sinkhorn=3000, tol_sinkhorn=1e-6)
if torch.any(torch.isnan(pi)):
raise Exception(f"Solver got NaN plan with params (eps, rho) = "
f"{dataset_p, dataset_u, nb_try, eps, rho, rho2}")
# Compute the marginal and save as file
pi_numpy = pi.sum(dim=1).cpu().data.numpy()
fname = f'/ugw_plan_{dataset_p}_{n_pos}_{dataset_u}_{n_unl}_' \
f'prior{prior}_eps{eps}_rho{rho}_rho{rho2}_reps{nb_try}.npy'
np.save(path + fname, pi_numpy)
print(
f"DONE = Dataset {dataset_p, dataset_u}, eps = {eps}, "
f"rho = {rho, rho2}, reps = {nb_try}")
return
if __name__ == '__main__':
parallel_gpu = True
# epsilon Set to 2**-9 but an be optimized via grid-search
grid_eps = [2. ** k for k in range(-9, -8, 1)]
grid_rho = [2. ** k for k in range(-10, -4, 1)]
nb_try = 40
# List all tasks for the Caltech datasets
list_tasks = []
# # Matching similar features - prior set to 10%
n_pos, n_unl, prior = 100, 100, 0.1
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',
'decaf_dslr']
list_data = [('surf_Caltech', d) for d in list_surf] + [
('decaf_caltech', d) for d in list_decaf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# # Matching similar features - prior set to 20%
n_pos, n_unl, prior = 100, 100, 0.2
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']
list_data = [('surf_Caltech', d) for d in list_surf] + [
('decaf_caltech', d) for d in list_decaf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# Matching different features - prior set to 10%
n_pos, n_unl, prior = 100, 100, 0.1
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam', 'surf_dslr']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam',
'decaf_dslr']
list_data = [('surf_Caltech', d) for d in list_decaf] + [
('decaf_caltech', d) for d in list_surf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
# # Matching different features - prior set to 20%
n_pos, n_unl, prior = 100, 100, 0.2
list_surf = ['surf_Caltech', 'surf_amazon', 'surf_webcam']
list_decaf = ['decaf_caltech', 'decaf_amazon', 'decaf_webcam']
list_data = [('surf_Caltech', d) for d in list_decaf] + [
('decaf_caltech', d) for d in list_surf]
list_tasks = list_tasks + [
(data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2, nb_try)
for (data_pos, data_unl) in list_data for eps in grid_eps
for rho in grid_rho for rho2 in grid_rho]
if parallel_gpu:
assert torch.cuda.is_available()
list_device = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
total_devices = torch.cuda.device_count()
print(
f"Parallel computation // Total GPUs available = {total_devices}")
pll = Parallel(n_jobs=total_devices)
iterator = (
delayed(compute_plan_ugw)(data_pos, data_unl, n_pos, n_unl, prior,
eps, rho, rho2, nb_try,
device=list_device[k % total_devices])
for
k, (
data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,
nb_try) in
enumerate(list_tasks))
pll(iterator)
else:
print("Not Parallel")
for (data_pos, data_unl, n_pos, n_unl, prior, eps, rho, rho2,
nb_try) in list_tasks:
compute_plan_ugw(data_pos, data_unl, n_pos, n_unl, prior, eps, rho,
rho2, nb_try)
print(f'{data_pos, data_unl} done.')
|
[
"unbalancedgw._batch_utils.compute_batch_flb_plan",
"unbalancedgw.batch_stable_ugw_solver.log_batch_ugw_sinkhorn",
"torch.cuda.device_count",
"torch.cuda.is_available",
"numpy.save",
"utils.draw_p_u_dataset_scar",
"torch.set_default_tensor_type",
"partial_gw.compute_cost_matrices",
"os.path.isdir",
"os.mkdir",
"ot.emd",
"torch.cuda.set_device",
"os.getcwd",
"joblib.Parallel",
"torch.tensor",
"joblib.delayed",
"torch.isnan",
"torch.zeros",
"torch.ones"
] |
[((461, 472), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (470, 472), False, 'import os\n'), ((497, 516), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (510, 516), False, 'import os\n'), ((522, 536), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (530, 536), False, 'import os\n'), ((571, 590), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (584, 590), False, 'import os\n'), ((596, 610), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (604, 610), False, 'import os\n'), ((1993, 2028), 'torch.zeros', 'torch.zeros', (['[nb_try, n_pos, n_unl]'], {}), '([nb_try, n_pos, n_unl])\n', (2004, 2028), False, 'import torch\n'), ((2637, 2666), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (2658, 2666), False, 'import torch\n'), ((2671, 2726), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (2700, 2726), False, 'import torch\n'), ((3103, 3138), 'torch.zeros', 'torch.zeros', (['[nb_try, n_pos, n_pos]'], {}), '([nb_try, n_pos, n_pos])\n', (3114, 3138), False, 'import torch\n'), ((3148, 3183), 'torch.zeros', 'torch.zeros', (['[nb_try, n_unl, n_unl]'], {}), '([nb_try, n_unl, n_unl])\n', (3159, 3183), False, 'import torch\n'), ((4399, 4430), 'numpy.save', 'np.save', (['(path + fname)', 'pi_numpy'], {}), '(path + fname, pi_numpy)\n', (4406, 4430), True, 'import numpy as np\n'), ((4500, 4663), 'unbalancedgw.batch_stable_ugw_solver.log_batch_ugw_sinkhorn', 'log_batch_ugw_sinkhorn', (['mu', 'Cx', 'nu', 'Cy'], {'init': 'init_plan', 'eps': 'eps', 'rho': 'rho', 'rho2': 'rho2', 'nits_plan': '(3000)', 'tol_plan': '(1e-05)', 'nits_sinkhorn': '(3000)', 'tol_sinkhorn': '(1e-06)'}), '(mu, Cx, nu, Cy, init=init_plan, eps=eps, rho=rho,\n rho2=rho2, nits_plan=3000, tol_plan=1e-05, nits_sinkhorn=3000,\n tol_sinkhorn=1e-06)\n', (4522, 4663), False, 'from unbalancedgw.batch_stable_ugw_solver import log_batch_ugw_sinkhorn\n'), ((5171, 5202), 'numpy.save', 'np.save', (['(path + fname)', 'pi_numpy'], {}), '(path + fname, pi_numpy)\n', (5178, 5202), True, 'import numpy as np\n'), ((2098, 2183), 'utils.draw_p_u_dataset_scar', 'utils.draw_p_u_dataset_scar', (['dataset_p', 'dataset_u', 'n_pos', 'n_unl', 'prior'], {'seed_nb': 'i'}), '(dataset_p, dataset_u, n_pos, n_unl, prior,\n seed_nb=i)\n', (2125, 2183), False, 'import utils\n'), ((2257, 2306), 'partial_gw.compute_cost_matrices', 'compute_cost_matrices', (['P', 'U', 'prior'], {'nb_dummies': '(10)'}), '(P, U, prior, nb_dummies=10)\n', (2278, 2306), False, 'from partial_gw import compute_cost_matrices\n'), ((3232, 3317), 'utils.draw_p_u_dataset_scar', 'utils.draw_p_u_dataset_scar', (['dataset_p', 'dataset_u', 'n_pos', 'n_unl', 'prior'], {'seed_nb': 'i'}), '(dataset_p, dataset_u, n_pos, n_unl, prior,\n seed_nb=i)\n', (3259, 3317), False, 'import utils\n'), ((4013, 4125), 'unbalancedgw._batch_utils.compute_batch_flb_plan', 'compute_batch_flb_plan', (['mu', 'Cx', 'nu', 'Cy'], {'eps': 'eps', 'rho': 'rho', 'rho2': 'rho2', 'nits_sinkhorn': '(50000)', 'tol_sinkhorn': '(1e-05)'}), '(mu, Cx, nu, Cy, eps=eps, rho=rho, rho2=rho2,\n nits_sinkhorn=50000, tol_sinkhorn=1e-05)\n', (4035, 4125), False, 'from unbalancedgw._batch_utils import compute_batch_flb_plan\n'), ((4767, 4782), 'torch.isnan', 'torch.isnan', (['pi'], {}), '(pi)\n', (4778, 4782), False, 'import torch\n'), ((7996, 8021), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8019, 8021), False, 'import torch\n'), ((8111, 8136), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8134, 8136), False, 'import torch\n'), ((8245, 8275), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'total_devices'}), '(n_jobs=total_devices)\n', (8253, 8275), False, 'from joblib import Parallel, delayed\n'), ((3377, 3418), 'torch.tensor', 'torch.tensor', (['P.values'], {'dtype': 'torch.float'}), '(P.values, dtype=torch.float)\n', (3389, 3418), False, 'import torch\n'), ((3437, 3478), 'torch.tensor', 'torch.tensor', (['U.values'], {'dtype': 'torch.float'}), '(U.values, dtype=torch.float)\n', (3449, 3478), False, 'import torch\n'), ((2419, 2439), 'ot.emd', 'ot.emd', (['mu', 'nu', 'Ctot'], {}), '(mu, nu, Ctot)\n', (2425, 2439), False, 'import ot\n'), ((3674, 3693), 'torch.ones', 'torch.ones', (['[n_pos]'], {}), '([n_pos])\n', (3684, 3693), False, 'import torch\n'), ((3732, 3751), 'torch.ones', 'torch.ones', (['[n_unl]'], {}), '([n_unl])\n', (3742, 3751), False, 'import torch\n'), ((8309, 8334), 'joblib.delayed', 'delayed', (['compute_plan_ugw'], {}), '(compute_plan_ugw)\n', (8316, 8334), False, 'from joblib import Parallel, delayed\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 18:44:04 2018
@author: JavaWizards
"""
import numpy as np
file = "/Users/nuno_chicoria/Downloads/b_should_be_easy.in"
handle = open(file)
R, C, F, N, B, T = handle.readline().split()
rides = []
index = []
for i in range(int(N)):
index.append(i)
for line in handle:
rides.append(line.split())
rides_np = np.asarray(rides)
rides_np = np.column_stack([rides_np, index])
rides_np = rides_np.astype(np.int)
rides_np = rides_np[rides_np[:,5].argsort()]
vehicles = {}
for i in range(int(F)):
vehicles [i] = ["A", [0, 0], [0, 0], [0, 0], []]
for i in range(int(T)):
rides_np = rides_np[rides_np[:,5] > i]
for item in range(len(vehicles)):
if vehicles[item][0] == "A":
if rides_np.size != 0:
if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i >= rides_np[0, 4]:
if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i + abs(rides_np[0,0] - rides_np[0,2]) + abs(rides_np[0,1] - rides_np[0,3]) <= rides_np[0, 5]:
vehicles[item][0] = "C"
vehicles[item][2] = [rides_np[0, 0], rides_np[0, 1]]
vehicles[item][3] = [rides_np[0, 2], rides_np[0, 3]]
vehicles[item][4].append(rides_np[0, 6])
rides_np = np.delete(rides_np, (0), axis=0)
else:
rides_np = np.delete(rides_np, (0), axis=0)
for item in range(len(vehicles)):
if vehicles[item][0] == "C":
if vehicles[item][1][0] < vehicles[item][2][0]:
vehicles[item][1][0] = vehicles[item][1][0] + 1
elif vehicles[item][1][0] > vehicles[item][2][0]:
vehicles[item][1][0] = vehicles[item][1][0] - 1
elif vehicles[item][1][0] == vehicles[item][2][0]:
if vehicles[item][1][1] < vehicles[item][2][1]:
vehicles[item][1][1] = vehicles[item][1][1] + 1
elif vehicles[item][1][1] > vehicles[item][2][1]:
vehicles[item][1][1] = vehicles[item][1][1] - 1
else:
vehicles[item][0] = "D"
for item in range(len(vehicles)):
if vehicles[item][0] == "D":
if vehicles[item][1][0] < vehicles[item][3][0]:
vehicles[item][1][0] += 1
elif vehicles[item][1][0] > vehicles[item][3][0]:
vehicles[item][1][0] -= 1
elif vehicles[item][1][0] == vehicles[item][3][0]:
if vehicles[item][1][1] < vehicles[item][3][1]:
vehicles[item][1][1] += 1
elif vehicles[item][1][1] > vehicles[item][3][1]:
vehicles[item][1][1] -= 1
else:
vehicles[item][0] = "A"
vehicles[item][2] = None
vehicles[item][3] = None
results = open("ghc2018.txt", "w+")
for item in range(len(vehicles)):
if len(vehicles[item][4]) !=0:
results.write(str(len(vehicles[item][4])))
for ride in vehicles[item][4]:
results.write(" ")
results.write(str(ride))
results.write("\n")
results.close()
|
[
"numpy.delete",
"numpy.asarray",
"numpy.column_stack"
] |
[((397, 414), 'numpy.asarray', 'np.asarray', (['rides'], {}), '(rides)\n', (407, 414), True, 'import numpy as np\n'), ((426, 460), 'numpy.column_stack', 'np.column_stack', (['[rides_np, index]'], {}), '([rides_np, index])\n', (441, 460), True, 'import numpy as np\n'), ((1455, 1485), 'numpy.delete', 'np.delete', (['rides_np', '(0)'], {'axis': '(0)'}), '(rides_np, 0, axis=0)\n', (1464, 1485), True, 'import numpy as np\n'), ((1549, 1579), 'numpy.delete', 'np.delete', (['rides_np', '(0)'], {'axis': '(0)'}), '(rides_np, 0, axis=0)\n', (1558, 1579), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""A rate network for neutral hydrogen following
Katz, Weinberg & Hernquist 1996, eq. 28-32."""
import os.path
import math
import numpy as np
import scipy.interpolate as interp
import scipy.optimize
class RateNetwork(object):
"""A rate network for neutral hydrogen following
Katz, Weinberg & Hernquist 1996, astro-ph/9509107, eq. 28-32.
Most internal methods are CamelCapitalized and follow a convention that
they are named like the process and then the ion they refer to.
eg:
CollisionalExciteHe0 is the neutral Helium collisional excitation rate.
RecombHp is the recombination rate for ionized hydrogen.
Externally useful methods (the API) are named like get_*.
These are:
get_temp() - gets the temperature from the density and internal energy.
get_cooling_rate() - gets the total cooling rate from density and internal energy.
get_neutral_fraction() - gets the neutral fraction from the rate network given density and internal energy.
Two useful helper functions:
get_equilib_ne() - gets the equilibrium electron density.
get_ne_by_nh() - gets the above, divided by the hydrogen density (Gadget reports this as ElectronAbundance).
Constructor arguments:
redshift - the redshift at which to evaluate the cooling. Affects the photoionization rate,
the Inverse Compton cooling and the self shielding threshold.
photo_factor - Factor by which to multiply the UVB amplitude.
f_bar - Baryon fraction. Omega_b / Omega_cdm.
converge - Tolerance to which the rate network should be converged.
selfshield - Flag to enable self-shielding following Rahmati 2013
cool - which cooling rate coefficient table to use.
Supported are: KWH (original Gadget rates)
Nyx (rates used in Nyx (Lukic 2015))
Sherwood (rates used in Sherwood simulations (Bolton 2017))
Default is Sherwood
recomb - which recombination rate table to use.
Supported are: C92 (Cen 1992, the Gadget default)
V96 (Verner & Ferland 1996, more accurate rates).
B06 (Badnell 2006 rates, current cloudy defaults. Very similar to V96).
collisional - Flag to enable collisional ionizations.
treecool_file - File to read a UV background from. Matches format used by Gadget.
"""
def __init__(self,redshift, photo_factor = 1., f_bar = 0.17, converge = 1e-7, selfshield=True, cool="Sherwood", recomb="V96", collisional=True, treecool_file="data/TREECOOL_ep_2018p"):
if recomb == "V96":
self.recomb = RecombRatesVerner96()
elif recomb == "B06":
self.recomb = RecombRatesBadnell()
else:
self.recomb = RecombRatesCen92()
self.photo = PhotoRates(treecool_file=treecool_file)
self.photo_factor = photo_factor
self.f_bar = f_bar
if cool == "KWH":
self.cool = CoolingRatesKWH92()
elif cool == "Sherwood":
self.cool = CoolingRatesSherwood()
elif cool == "Nyx":
self.cool = CoolingRatesNyx()
else:
raise ValueError("Not supported")
#Extra helium reionization photoheating model
self.hub = 0.7
self.he_thresh = 10
self.he_amp = 1
self.he_exp = 0
self.he_model_on = False
#proton mass in g
self.protonmass = 1.67262178e-24
self.redshift = redshift
self.converge = converge
self.selfshield = selfshield
self.collisional = collisional
zz = [0, 1, 2, 3, 4, 5, 6, 7, 8]
#Tables for the self-shielding correction. Note these are not well-measured for z > 5!
gray_opac = [2.59e-18,2.37e-18,2.27e-18, 2.15e-18, 2.02e-18, 1.94e-18, 1.82e-18, 1.71e-18, 1.60e-18]
self.Gray_ss = interp.InterpolatedUnivariateSpline(zz, gray_opac)
def get_temp(self, density, ienergy, helium=0.24):
"""Get the equilibrium temperature at given internal energy.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction"""
ne = self.get_equilib_ne(density, ienergy, helium)
nh = density * (1-helium)
return self._get_temp(ne/nh, ienergy, helium)
def get_cooling_rate(self, density, ienergy, helium=0.24, photoheating=False):
"""Get the total cooling rate for a temperature and density. Negative means heating."""
ne = self.get_equilib_ne(density, ienergy, helium)
nh = density * (1-helium)
temp = self._get_temp(ne/nh, ienergy, helium)
nH0 = self._nH0(nh, temp, ne)
nHe0 = self._nHe0(nh, temp, ne)
nHp = self._nHp(nh, temp, ne)
nHep = self._nHep(nh, temp, ne)
nHepp = self._nHepp(nh, temp, ne)
#This is the collisional excitation and ionisation rate.
LambdaCollis = ne * (self.cool.CollisionalH0(temp) * nH0 +
self.cool.CollisionalHe0(temp) * nHe0 +
self.cool.CollisionalHeP(temp) * nHep)
LambdaRecomb = ne * (self.cool.RecombHp(temp) * nHp +
self.cool.RecombHeP(temp) * nHep +
self.cool.RecombHePP(temp) * nHepp)
LambdaFF = ne * (self.cool.FreeFree(temp, 1)*(nHp + nHep) + self.cool.FreeFree(temp, 2)*nHepp)
LambdaCmptn = ne * self.cool.InverseCompton(temp, self.redshift)
Lambda = LambdaCollis + LambdaRecomb + LambdaFF + LambdaCmptn
Heating = 0
if photoheating:
Heating = nH0 * self.photo.epsH0(self.redshift)
Heating += nHe0 * self.photo.epsHe0(self.redshift)
Heating += nHep * self.photo.epsHep(self.redshift)
Heating *= self.photo_factor
if self.he_model_on:
Heating *= self._he_reion_factor(density)
return Lambda - Heating
def get_equilib_ne(self, density, ienergy,helium=0.24):
"""Solve the system of equations for photo-ionisation equilibrium,
starting with ne = nH and continuing until convergence.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction.
"""
#Get hydrogen number density
nh = density * (1-helium)
rooted = lambda ne: self._ne(nh, self._get_temp(ne/nh, ienergy, helium=helium), ne, helium=helium)
ne = scipy.optimize.fixed_point(rooted, nh,xtol=self.converge)
assert np.all(np.abs(rooted(ne) - ne) < self.converge)
return ne
def get_ne_by_nh(self, density, ienergy, helium=0.24):
"""Same as above, but get electrons per proton."""
return self.get_equilib_ne(density, ienergy, helium)/(density*(1-helium))
def get_neutral_fraction(self, density, ienergy, helium=0.24):
"""Get the neutral hydrogen fraction at a given temperature and density.
density is gas density in protons/cm^3
Internal energy is in J/kg == 10^-10 ergs/g.
helium is a mass fraction.
"""
ne = self.get_equilib_ne(density, ienergy, helium=helium)
nh = density * (1-helium)
temp = self._get_temp(ne/nh, ienergy, helium)
return self._nH0(nh, temp, ne) / nh
def _nH0(self, nh, temp, ne):
"""The neutral hydrogen number density. Eq. 33 of KWH."""
alphaHp = self.recomb.alphaHp(temp)
GammaeH0 = self.collisional * self.recomb.GammaeH0(temp)
photorate = self.photo.gH0(self.redshift)/ne*self.photo_factor*self._self_shield_corr(nh, temp)
return nh * alphaHp/ (alphaHp + GammaeH0 + photorate)
def _nHp(self, nh, temp, ne):
"""The ionised hydrogen number density. Eq. 34 of KWH."""
return nh - self._nH0(nh, temp, ne)
def _nHep(self, nh, temp, ne):
"""The ionised helium number density, divided by the helium number fraction. Eq. 35 of KWH."""
alphaHep = self.recomb.alphaHep(temp) + self.recomb.alphad(temp)
alphaHepp = self.recomb.alphaHepp(temp)
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHe0 = self.collisional * self.recomb.GammaeHe0(temp) + self.photo.gHe0(self.redshift)/ne*photofac
GammaHep = self.collisional * self.recomb.GammaeHep(temp) + self.photo.gHep(self.redshift)/ne*photofac
return nh / (1 + alphaHep / GammaHe0 + GammaHep/alphaHepp)
def _nHe0(self, nh, temp, ne):
"""The neutral helium number density, divided by the helium number fraction. Eq. 36 of KWH."""
alphaHep = self.recomb.alphaHep(temp) + self.recomb.alphad(temp)
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHe0 = self.collisional * self.recomb.GammaeHe0(temp) + self.photo.gHe0(self.redshift)/ne*photofac
return self._nHep(nh, temp, ne) * alphaHep / GammaHe0
def _nHepp(self, nh, temp, ne):
"""The doubly ionised helium number density, divided by the helium number fraction. Eq. 37 of KWH."""
photofac = self.photo_factor*self._self_shield_corr(nh, temp)
GammaHep = self.collisional * self.recomb.GammaeHep(temp) + self.photo.gHep(self.redshift)/ne*photofac
alphaHepp = self.recomb.alphaHepp(temp)
return self._nHep(nh, temp, ne) * GammaHep / alphaHepp
def _ne(self, nh, temp, ne, helium=0.24):
"""The electron number density. Eq. 38 of KWH."""
yy = helium / 4 / (1 - helium)
return self._nHp(nh, temp, ne) + yy * self._nHep(nh, temp, ne) + 2* yy * self._nHepp(nh, temp, ne)
def _self_shield_corr(self, nh, temp):
"""Photoionisation rate as a function of density from Rahmati 2012, eq. 14.
Calculates Gamma_{Phot} / Gamma_{UVB}.
Inputs: hydrogen density, temperature
n_H
The coefficients are their best-fit from appendix A."""
if not self.selfshield:
return np.ones_like(nh)
nSSh = 1.003*self._self_shield_dens(self.redshift, temp)
return 0.98*(1+(nh/nSSh)**1.64)**-2.28+0.02*(1+nh/nSSh)**-0.84
def _self_shield_dens(self,redshift, temp):
"""Calculate the critical self-shielding density. Rahmati 202 eq. 13.
gray_opac is a parameter of the UVB used.
gray_opac is in cm^2 (2.49e-18 is HM01 at z=3)
temp is particle temperature in K
f_bar is the baryon fraction. 0.17 is roughly 0.045/0.265
Returns density in atoms/cm^3"""
T4 = temp/1e4
G12 = self.photo.gH0(redshift)/1e-12
return 6.73e-3 * (self.Gray_ss(redshift) / 2.49e-18)**(-2./3)*(T4)**0.17*(G12)**(2./3)*(self.f_bar/0.17)**(-1./3)
def _he_reion_factor(self, density):
"""Compute a density dependent correction factor to the heating rate which can model the effect of helium reionization.
Argument: Gas density in protons/cm^3."""
#Newton's constant (cgs units)
gravity = 6.672e-8
#100 km/s/Mpc in h/sec
hubble = 3.2407789e-18
omegab = 0.0483
atime = 1/(1+self.redshift)
rhoc = 3 * (self.hub* hubble)**2 /(8* math.pi * gravity)
overden = self.protonmass * density /(omegab * rhoc * atime**(-3))
if overden >= self.he_thresh:
overden = self.he_thresh
return self.he_amp * overden**self.he_exp
def _get_temp(self, nebynh, ienergy, helium=0.24):
"""Compute temperature (in K) from internal energy and electron density.
Uses: internal energy
electron abundance per H atom (ne/nH)
hydrogen mass fraction (0.76)
Internal energy is in J/kg, internal gadget units, == 10^-10 ergs/g.
Factor to convert U (J/kg) to T (K) : U = N k T / (γ - 1)
T = U (γ-1) μ m_P / k_B
where k_B is the Boltzmann constant
γ is 5/3, the perfect gas constant
m_P is the proton mass
μ = 1 / (mean no. molecules per unit atomic weight)
= 1 / (X + Y /4 + E)
where E = Ne * X, and Y = (1-X).
Can neglect metals as they are heavy.
Leading contribution is from electrons, which is already included
[+ Z / (12->16)] from metal species
[+ Z/16*4 ] for OIV from electrons."""
#convert U (J/kg) to T (K) : U = N k T / (γ - 1)
#T = U (γ-1) μ m_P / k_B
#where k_B is the Boltzmann constant
#γ is 5/3, the perfect gas constant
#m_P is the proton mass
#μ is 1 / (mean no. molecules per unit atomic weight) calculated in loop.
#Internal energy units are 10^-10 erg/g
hy_mass = 1 - helium
muienergy = 4 / (hy_mass * (3 + 4*nebynh) + 1)*ienergy*1e10
#Boltzmann constant (cgs)
boltzmann=1.38066e-16
gamma=5./3
#So for T in K, boltzmann in erg/K, internal energy has units of erg/g
temp = (gamma-1) * self.protonmass / boltzmann * muienergy
return temp
class RecombRatesCen92(object):
"""Recombination rates and collisional ionization rates, as a function of temperature.
This is taken from KWH 06, astro-ph/9509107, Table 2, based on Cen 1992.
Illustris uses these rates."""
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
return 8.4e-11 / np.sqrt(temp) / np.power(temp/1000, 0.2) / (1+ np.power(temp/1e6, 0.7))
def alphaHep(self,temp):
"""Recombination rate for He+, ionized helium, in cm^3/s.
Temp in K."""
return 1.5e-10 / np.power(temp,0.6353)
def alphad(self, temp):
"""Recombination rate for dielectronic recombination, in cm^3/s.
Temp in K."""
return 1.9e-3 / np.power(temp,1.5) * np.exp(-4.7e5/temp)*(1+0.3*np.exp(-9.4e4/temp))
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s.
Temp in K."""
return 4 * self.alphaHp(temp)
def GammaeH0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 5.85e-11 * np.sqrt(temp) * np.exp(-157809.1/temp) / (1+ np.sqrt(temp/1e5))
def GammaeHe0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 2.38e-11 * np.sqrt(temp) * np.exp(-285335.4/temp) / (1+ np.sqrt(temp/1e5))
def GammaeHep(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K"""
return 5.68e-12 * np.sqrt(temp) * np.exp(-631515.0/temp) / (1+ np.sqrt(temp/1e5))
class RecombRatesVerner96(object):
"""Recombination rates and collisional ionization rates, as a function of temperature.
Recombination rates are the fit from Verner & Ferland 1996 (astro-ph/9509083).
Collisional rates are the fit from Voronov 1997 (http://www.sciencedirect.com/science/article/pii/S0092640X97907324).
In a very photoionised medium this changes the neutral hydrogen abundance by approximately 10% compared to Cen 1992.
These rates are those used by Nyx.
"""
def _Verner96Fit(self, temp, aa, bb, temp0, temp1):
"""Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)."""
sqrttt0 = np.sqrt(temp/temp0)
sqrttt1 = np.sqrt(temp/temp1)
return aa / ( sqrttt0 * (1 + sqrttt0)**(1-bb)*(1+sqrttt1)**(1+bb) )
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
The V&F 96 fitting formula is accurate to < 1% in the worst case.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=7.982e-11, bb=0.748, temp0=3.148, temp1=7.036e+05)
def alphaHep(self,temp):
"""Recombination rate for He+, ionized helium, in cm^3/s.
Accurate to ~2% for T < 10^6 and 5% for T< 10^10.
Temp in K."""
#VF96 give two rates. The first is more accurate for T < 10^6, the second is valid up to T = 10^10.
#We use the most accurate allowed. See lines 2 and 3 of Table 1 of VF96.
lowTfit = self._Verner96Fit(temp, aa=3.294e-11, bb=0.6910, temp0=1.554e+01, temp1=3.676e+07)
highTfit = self._Verner96Fit(temp, aa=9.356e-10, bb=0.7892, temp0=4.266e-02, temp1=4.677e+06)
#Note that at 10^6K the two fits differ by ~10%. This may lead one to disbelieve the quoted accuracies!
#We thus switch over at a slightly lower temperature.
#The two fits cross at T ~ 3e5K.
swtmp = 7e5
deltat = 1e5
upper = swtmp + deltat
lower = swtmp - deltat
#In order to avoid a sharp feature at 10^6 K, we linearly interpolate between the two fits around 10^6 K.
interpfit = (lowTfit * (upper - temp) + highTfit * (temp - lower))/(2*deltat)
return (temp < lower)*lowTfit + (temp > upper)*highTfit + (upper > temp)*(temp > lower)*interpfit
def alphad(self, temp):
"""Recombination rate for dielectronic recombination, in cm^3/s.
This is the value from Aldrovandi & Pequignot 73, as used in Nyx, Sherwood and Cen 1992.
It is corrected from the value in Aldrovandi & Pequignot 1973 by Burgess & Tworkowski 1976 (fig1)
by a factor of 0.65. The exponent is also made slightly more accurate.
Temp in K."""
return 1.23e-3 / np.power(temp,1.5) * np.exp(-4.72e5/temp)*(1+0.3*np.exp(-9.4e4/temp))
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s. Accurate to 2%.
Temp in K."""
#See line 4 of V&F96 table 1.
return self._Verner96Fit(temp, aa=1.891e-10, bb=0.7524, temp0=9.370, temp1=2.774e6)
def _Voronov96Fit(self, temp, dE, PP, AA, XX, KK):
"""Fitting function for collisional rates. Eq. 1 of Voronov 1997. Accurate to 10%,
but data is only accurate to 50%."""
bolevk = 8.61734e-5 # Boltzmann constant in units of eV/K
UU = dE / (bolevk * temp)
return AA * (1 + PP * np.sqrt(UU))/(XX+UU) * UU**KK * np.exp(-UU)
def GammaeH0(self,temp):
"""Collisional ionization rate for H0 in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 13.6, 0, 0.291e-07, 0.232, 0.39)
def GammaeHe0(self,temp):
"""Collisional ionization rate for He0 in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 24.6, 0, 0.175e-07, 0.180, 0.35)
def GammaeHep(self,temp):
"""Collisional ionization rate for HeI in cm^3/s. Temp in K. Voronov 97, Table 1."""
return self._Voronov96Fit(temp, 54.4, 1, 0.205e-08, 0.265, 0.25)
class RecombRatesBadnell(RecombRatesVerner96):
"""Recombination rates and collisional ionization rates, as a function of temperature.
Recombination rates are the fit from Badnell's website: http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial.
"""
def _RecombRateFit_lowcharge_ion(self, temp, aa, bb, cc, temp0, temp1, temp2):
"""Formula used as a fitting function in Verner & Ferland 1996 (astro-ph/9509083)/ See http://amdpp.phys.strath.ac.uk/tamoc/RR/#partial."""
sqrttt0 = np.sqrt(temp/temp0)
sqrttt1 = np.sqrt(temp/temp1)
BB = bb + cc*np.exp(-temp2/temp)
return aa / ( sqrttt0 * (1 + sqrttt0)**(1-BB)*(1+sqrttt1)**(1+BB) )
def alphaHp(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=8.318e-11, bb=0.7472, temp0=2.965, temp1=7.001e5)
def alphaHep(self,temp):
"""Recombination rate for H+, ionized hydrogen, in cm^3/s.
Temp in K."""
#See line 1 of V&F96 table 1.
return self._Verner96Fit(temp, aa=1.818E-10, bb=0.7492, temp0=10.17, temp1=2.786e6)
def alphaHepp(self, temp):
"""Recombination rate for doubly ionized helium, in cm^3/s.
Temp in K."""
#See line 4 of V&F96 table 1.
return self._RecombRateFit_lowcharge_ion(temp, aa=5.235E-11, bb=0.6988, cc=0.0829, temp0=7.301, temp1=4.475e6, temp2 = 1.682e5)
class PhotoRates(object):
"""The photoionization rates for a given species.
Eq. 29 of KWH 96. This is loaded from a TREECOOL table."""
def __init__(self, treecool_file="data/TREECOOL_ep_2018p"):
#Format of the treecool table:
# log_10(1+z), Gamma_HI, Gamma_HeI, Gamma_HeII, Qdot_HI, Qdot_HeI, Qdot_HeII,
# where 'Gamma' is the photoionization rate and 'Qdot' is the photoheating rate.
# The Gamma's are in units of s^-1, and the Qdot's are in units of erg s^-1.
try:
data = np.loadtxt(treecool_file)
except OSError:
treefile = os.path.join(os.path.dirname(os.path.realpath(__file__)), treecool_file)
data = np.loadtxt(treefile)
redshifts = data[:,0]
photo_rates = data[:,1:4]
photo_heat = data[:,4:7]
assert np.shape(redshifts)[0] == np.shape(photo_rates)[0]
self.Gamma_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,0])
self.Gamma_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,1])
self.Gamma_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_rates[:,2])
self.Eps_HI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,0])
self.Eps_HeI = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,1])
self.Eps_HeII = interp.InterpolatedUnivariateSpline(redshifts, photo_heat[:,2])
def gHe0(self,redshift):
"""Get photo rate for neutral Helium"""
log1z = np.log10(1+redshift)
return self.Gamma_HeI(log1z)
def gHep(self,redshift):
"""Get photo rate for singly ionized Helium"""
log1z = np.log10(1+redshift)
return self.Gamma_HeII(log1z)
def gH0(self,redshift):
"""Get photo rate for neutral Hydrogen"""
log1z = np.log10(1+redshift)
return self.Gamma_HI(log1z)
def epsHe0(self,redshift):
"""Get photo heating rate for neutral Helium"""
log1z = np.log10(1+redshift)
return self.Eps_HeI(log1z)
def epsHep(self,redshift):
"""Get photo heating rate for singly ionized Helium"""
log1z = np.log10(1+redshift)
return self.Eps_HeII(log1z)
def epsH0(self,redshift):
"""Get photo heating rate for neutral Hydrogen"""
log1z = np.log10(1+redshift)
return self.Eps_HI(log1z)
class CoolingRatesKWH92(object):
"""The cooling rates from KWH92, in erg s^-1 cm^-3 (cgs).
All rates are divided by the abundance of the ions involved in the interaction.
So we are computing the cooling rate divided by n_e n_X. Temperatures in K.
None of these rates are original to KWH92, but are taken from Cen 1992,
and originally from older references. The hydrogen rates in particular are probably inaccurate.
Cen 1992 modified (arbitrarily) the excitation and ionisation rates for high temperatures.
There is no collisional excitation rate for He0 - not sure why.
References:
Black 1981, from Lotz 1967, Seaton 1959, Burgess & Seaton 1960.
Recombination rates are from Spitzer 1978.
Free-free: Spitzer 1978.
Collisional excitation and ionisation cooling rates are merged.
"""
def __init__(self, tcmb=2.7255, t5_corr=1e5, recomb=None):
self.tcmb = tcmb
if recomb is None:
self.recomb = RecombRatesCen92()
else:
self.recomb = recomb
self.t5_corr = t5_corr
#1 eV in ergs
self.eVinergs = 1.60218e-12
#boltzmann constant in erg/K
self.kB = 1.38064852e-16
def _t5(self, temp):
"""Commonly used Cen 1992 correction factor for large temperatures.
This is implemented so that the cooling rates have the right
asymptotic behaviour. However, Cen erroneously imposes this correction at T=1e5,
which is too small: the Black 1981 rates these are based on should be good
until 5e5 at least, where the correction factor has a 10% effect already.
More modern tables thus impose it at T=5e7, which is still arbitrary but should be harmless.
"""
return 1+(temp/t5_corr)**0.5
def CollisionalExciteH0(self, temp):
"""Collisional excitation cooling rate for n_H0 and n_e. Gadget calls this BetaH0."""
return 7.5e-19 * np.exp(-118348.0/temp) /self._t5(temp)
def CollisionalExciteHeP(self, temp):
"""Collisional excitation cooling rate for n_He+ and n_e. Gadget calls this BetaHep."""
return 5.54e-17 * temp**(-0.397)*np.exp(-473638./temp)/self._t5(temp)
def CollisionalExciteHe0(self, temp):
"""This is listed in Cen 92 but neglected in KWH 97, presumably because it is very small."""
#return 0
return 9.1e-27 * temp**(-0.1687) * np.exp(-473638/temp) / self._t5(temp)
def CollisionalIonizeH0(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeH0."""
#Ionisation potential of H0
return 13.5984 * self.eVinergs * self.recomb.GammaeH0(temp)
def CollisionalIonizeHe0(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeHe0."""
return 24.5874 * self.eVinergs * self.recomb.GammaeHe0(temp)
def CollisionalIonizeHeP(self, temp):
"""Collisional ionisation cooling rate for n_H0 and n_e. Gadget calls this GammaeHep."""
return 54.417760 * self.eVinergs * self.recomb.GammaeHep(temp)
def CollisionalH0(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteH0(temp) + self.CollisionalIonizeH0(temp)
def CollisionalHe0(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteHe0(temp) + self.CollisionalIonizeHe0(temp)
def CollisionalHeP(self, temp):
"""Total collisional cooling for H0"""
return self.CollisionalExciteHeP(temp) + self.CollisionalIonizeHeP(temp)
def RecombHp(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHp."""
return 0.75 * self.kB * temp * self.recomb.alphaHp(temp)
def RecombHeP(self, temp):
"""Recombination cooling rate for He+ and e. Gadget calls this AlphaHep."""
#I'm not sure why they use 0.75 kT as the free energy of an electron.
#I would guess this is explained in Spitzer 1978.
return 0.75 * self.kB * temp * self.recomb.alphaHep(temp)+ self._RecombDielect(temp)
def RecombHePP(self, temp):
"""Recombination cooling rate for He++ and e. Gadget calls this AlphaHepp."""
return 0.75 * self.kB * temp * self.recomb.alphaHepp(temp)
def _RecombDielect(self, temp):
"""Dielectric recombination rate for He+ and e. Gadget calls this Alphad."""
#What is this magic number?
return 6.526e-11*self.recomb.alphad(temp)
def FreeFree(self, temp, zz):
"""Free-free cooling rate for electrons scattering on ions without being captured.
Factors here are n_e and total ionized species:
(FreeFree(zz=1)*(n_H+ + n_He+) + FreeFree(zz=2)*n_He++)"""
return 1.426e-27*np.sqrt(temp)*zz**2*self._gff(temp,zz)
def _gff(self, temp, zz):
"""Formula for the Gaunt factor. KWH takes this from Spitzer 1978."""
_ = zz
return 1.1+0.34*np.exp(-(5.5 - np.log10(temp))**2/3.)
def InverseCompton(self, temp, redshift):
"""Cooling rate for inverse Compton from the microwave background.
Multiply this only by n_e. Note the CMB temperature is hardcoded in KWH92 to 2.7."""
tcmb_red = self.tcmb * (1+redshift)
#Thompson cross-section in cm^2
sigmat = 6.6524e-25
#Radiation density constant, 4 sigma_stefan-boltzmann / c in erg cm^-3 K^-4
rad_dens = 7.5657e-15
#Electron mass in g
me = 9.10938e-28
#Speed of light in cm/s
cc = 2.99792e10
return 4 * sigmat * rad_dens / (me*cc) * tcmb_red**4 * self.kB * (temp - tcmb_red)
class CoolingRatesSherwood(CoolingRatesKWH92):
"""The cooling rates used in the Sherwood simulation, Bolton et al 2017, in erg s^-1 cm^-3 (cgs).
Differences from KWH92 are updated recombination and collisional ionization rates, and the use of a
larger temperature correction factor than Cen 92.
"""
def __init__(self, tcmb=2.7255, recomb=None):
CoolingRatesKWH92.__init__(tcmb = tcmb, t5_corr = 5e7, recomb=RecombRatesVerner96)
class CoolingRatesNyx(CoolingRatesKWH92):
"""The cooling rates used in the Nyx paper Lukic 2014, 1406.6361, in erg s^-1 cm^-3 (cgs).
All rates are divided by the abundance of the ions involved in the interaction.
So we are computing the cooling rate divided by n_e n_X. Temperatures in K.
Major differences from KWH are the use of the Scholz & Walter 1991
hydrogen collisional cooling rates, a less aggressive high temperature correction for helium, and
Shapiro & Kang 1987 for free free.
Older Black 1981 recombination cooling rates are used!
They use the recombination rates from Verner & Ferland 96, but do not change the cooling rates to match.
Ditto the ionization rates from Voronov 1997: they should also use these rates for collisional ionisation,
although this is harder because Sholz & Walter don't break their rates into ionization and excitation.
References:
Scholz & Walters 1991 (0.45% accuracy)
Black 1981 (recombination and helium)
Shapiro & Kang 1987
"""
def __init__(self, tcmb=2.7255, recomb=None):
CoolingRatesKWH92.__init__(tcmb = tcmb, t5_corr = 5e7, recomb=recomb)
def CollisionalH0(self, temp):
"""Collisional cooling rate for n_H0 and n_e. Gadget calls this BetaH0 + GammaeH0.
Formula from Eq. 23, Table 4 of Scholz & Walters, claimed good to 0.45 %.
Note though that they have two datasets which differ by a factor of two.
Differs from Cen 92 by a factor of two."""
#Technically only good for T > 2000.
y = np.log(temp)
#Constant is 0.75/k_B in Rydberg
Ryd = 2.1798741e-11
tot = -0.75/self.kB*Ryd/temp
coeffslowT = [213.7913, 113.9492, 25.06062, 2.762755, 0.1515352, 3.290382e-3]
coeffshighT = [271.25446, 98.019455, 14.00728, 0.9780842, 3.356289e-2, 4.553323e-4]
for j in range(6):
tot += ((temp < 1e5)*coeffslowT[j]+(temp >=1e5)*coeffshighT[j])*(-y)**j
return 1e-20 * np.exp(tot)
def RecombHp(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHp.
Differs by O(10%) until 3x10^6."""
return 2.851e-27 * np.sqrt(temp) * (5.914 - 0.5 * np.log(temp) + 0.01184 * temp**(1./3))
def RecombHePP(self, temp):
"""Recombination cooling rate for H+ and e. Gadget calls this AlphaHepp.
Differs from Cen 92 by 10% until ~10^7"""
return 1.140e-26 * np.sqrt(temp) * (6.607 - 0.5 * np.log(temp) + 7.459e-3 * temp**(1./3))
def _gff(self, temp, zz):
"""Formula for the Gaunt factor from Shapiro & Kang 1987. ZZ is 1 for H+ and He+ and 2 for He++.
This is almost identical to the KWH rate but not continuous."""
#This is not continuous. Check the original reference.
little = (temp/zz**2 <= 3.2e5)
lt = np.log10(temp/zz**2)
return little * (0.79464 + 0.1243*lt) + np.logical_not(little) * ( 2.13164 - 0.1240 * lt)
|
[
"numpy.ones_like",
"numpy.shape",
"numpy.log10",
"numpy.sqrt",
"numpy.power",
"numpy.log",
"numpy.logical_not",
"numpy.exp",
"scipy.interpolate.InterpolatedUnivariateSpline",
"numpy.loadtxt"
] |
[((3983, 4033), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['zz', 'gray_opac'], {}), '(zz, gray_opac)\n', (4018, 4033), True, 'import scipy.interpolate as interp\n'), ((15357, 15378), 'numpy.sqrt', 'np.sqrt', (['(temp / temp0)'], {}), '(temp / temp0)\n', (15364, 15378), True, 'import numpy as np\n'), ((15395, 15416), 'numpy.sqrt', 'np.sqrt', (['(temp / temp1)'], {}), '(temp / temp1)\n', (15402, 15416), True, 'import numpy as np\n'), ((19238, 19259), 'numpy.sqrt', 'np.sqrt', (['(temp / temp0)'], {}), '(temp / temp0)\n', (19245, 19259), True, 'import numpy as np\n'), ((19276, 19297), 'numpy.sqrt', 'np.sqrt', (['(temp / temp1)'], {}), '(temp / temp1)\n', (19283, 19297), True, 'import numpy as np\n'), ((21120, 21185), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_rates[:, 0]'], {}), '(redshifts, photo_rates[:, 0])\n', (21155, 21185), True, 'import scipy.interpolate as interp\n'), ((21210, 21275), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_rates[:, 1]'], {}), '(redshifts, photo_rates[:, 1])\n', (21245, 21275), True, 'import scipy.interpolate as interp\n'), ((21301, 21366), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_rates[:, 2]'], {}), '(redshifts, photo_rates[:, 2])\n', (21336, 21366), True, 'import scipy.interpolate as interp\n'), ((21388, 21452), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_heat[:, 0]'], {}), '(redshifts, photo_heat[:, 0])\n', (21423, 21452), True, 'import scipy.interpolate as interp\n'), ((21475, 21539), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_heat[:, 1]'], {}), '(redshifts, photo_heat[:, 1])\n', (21510, 21539), True, 'import scipy.interpolate as interp\n'), ((21563, 21627), 'scipy.interpolate.InterpolatedUnivariateSpline', 'interp.InterpolatedUnivariateSpline', (['redshifts', 'photo_heat[:, 2]'], {}), '(redshifts, photo_heat[:, 2])\n', (21598, 21627), True, 'import scipy.interpolate as interp\n'), ((21721, 21743), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (21729, 21743), True, 'import numpy as np\n'), ((21880, 21902), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (21888, 21902), True, 'import numpy as np\n'), ((22034, 22056), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (22042, 22056), True, 'import numpy as np\n'), ((22195, 22217), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (22203, 22217), True, 'import numpy as np\n'), ((22362, 22384), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (22370, 22384), True, 'import numpy as np\n'), ((22524, 22546), 'numpy.log10', 'np.log10', (['(1 + redshift)'], {}), '(1 + redshift)\n', (22532, 22546), True, 'import numpy as np\n'), ((30270, 30282), 'numpy.log', 'np.log', (['temp'], {}), '(temp)\n', (30276, 30282), True, 'import numpy as np\n'), ((31548, 31572), 'numpy.log10', 'np.log10', (['(temp / zz ** 2)'], {}), '(temp / zz ** 2)\n', (31556, 31572), True, 'import numpy as np\n'), ((10079, 10095), 'numpy.ones_like', 'np.ones_like', (['nh'], {}), '(nh)\n', (10091, 10095), True, 'import numpy as np\n'), ((13713, 13735), 'numpy.power', 'np.power', (['temp', '(0.6353)'], {}), '(temp, 0.6353)\n', (13721, 13735), True, 'import numpy as np\n'), ((18129, 18140), 'numpy.exp', 'np.exp', (['(-UU)'], {}), '(-UU)\n', (18135, 18140), True, 'import numpy as np\n'), ((20747, 20772), 'numpy.loadtxt', 'np.loadtxt', (['treecool_file'], {}), '(treecool_file)\n', (20757, 20772), True, 'import numpy as np\n'), ((30701, 30712), 'numpy.exp', 'np.exp', (['tot'], {}), '(tot)\n', (30707, 30712), True, 'import numpy as np\n'), ((13514, 13540), 'numpy.power', 'np.power', (['(temp / 1000)', '(0.2)'], {}), '(temp / 1000, 0.2)\n', (13522, 13540), True, 'import numpy as np\n'), ((13545, 13576), 'numpy.power', 'np.power', (['(temp / 1000000.0)', '(0.7)'], {}), '(temp / 1000000.0, 0.7)\n', (13553, 13576), True, 'import numpy as np\n'), ((13904, 13928), 'numpy.exp', 'np.exp', (['(-470000.0 / temp)'], {}), '(-470000.0 / temp)\n', (13910, 13928), True, 'import numpy as np\n'), ((14254, 14278), 'numpy.exp', 'np.exp', (['(-157809.1 / temp)'], {}), '(-157809.1 / temp)\n', (14260, 14278), True, 'import numpy as np\n'), ((14283, 14307), 'numpy.sqrt', 'np.sqrt', (['(temp / 100000.0)'], {}), '(temp / 100000.0)\n', (14290, 14307), True, 'import numpy as np\n'), ((14445, 14469), 'numpy.exp', 'np.exp', (['(-285335.4 / temp)'], {}), '(-285335.4 / temp)\n', (14451, 14469), True, 'import numpy as np\n'), ((14474, 14498), 'numpy.sqrt', 'np.sqrt', (['(temp / 100000.0)'], {}), '(temp / 100000.0)\n', (14481, 14498), True, 'import numpy as np\n'), ((14636, 14660), 'numpy.exp', 'np.exp', (['(-631515.0 / temp)'], {}), '(-631515.0 / temp)\n', (14642, 14660), True, 'import numpy as np\n'), ((14665, 14689), 'numpy.sqrt', 'np.sqrt', (['(temp / 100000.0)'], {}), '(temp / 100000.0)\n', (14672, 14689), True, 'import numpy as np\n'), ((17458, 17482), 'numpy.exp', 'np.exp', (['(-472000.0 / temp)'], {}), '(-472000.0 / temp)\n', (17464, 17482), True, 'import numpy as np\n'), ((19317, 19338), 'numpy.exp', 'np.exp', (['(-temp2 / temp)'], {}), '(-temp2 / temp)\n', (19323, 19338), True, 'import numpy as np\n'), ((20912, 20932), 'numpy.loadtxt', 'np.loadtxt', (['treefile'], {}), '(treefile)\n', (20922, 20932), True, 'import numpy as np\n'), ((21045, 21064), 'numpy.shape', 'np.shape', (['redshifts'], {}), '(redshifts)\n', (21053, 21064), True, 'import numpy as np\n'), ((21071, 21092), 'numpy.shape', 'np.shape', (['photo_rates'], {}), '(photo_rates)\n', (21079, 21092), True, 'import numpy as np\n'), ((24528, 24552), 'numpy.exp', 'np.exp', (['(-118348.0 / temp)'], {}), '(-118348.0 / temp)\n', (24534, 24552), True, 'import numpy as np\n'), ((24747, 24771), 'numpy.exp', 'np.exp', (['(-473638.0 / temp)'], {}), '(-473638.0 / temp)\n', (24753, 24771), True, 'import numpy as np\n'), ((24989, 25011), 'numpy.exp', 'np.exp', (['(-473638 / temp)'], {}), '(-473638 / temp)\n', (24995, 25011), True, 'import numpy as np\n'), ((30893, 30906), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (30900, 30906), True, 'import numpy as np\n'), ((31154, 31167), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (31161, 31167), True, 'import numpy as np\n'), ((31617, 31639), 'numpy.logical_not', 'np.logical_not', (['little'], {}), '(little)\n', (31631, 31639), True, 'import numpy as np\n'), ((13498, 13511), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (13505, 13511), True, 'import numpy as np\n'), ((13883, 13902), 'numpy.power', 'np.power', (['temp', '(1.5)'], {}), '(temp, 1.5)\n', (13891, 13902), True, 'import numpy as np\n'), ((13931, 13954), 'numpy.exp', 'np.exp', (['(-94000.0 / temp)'], {}), '(-94000.0 / temp)\n', (13937, 13954), True, 'import numpy as np\n'), ((14238, 14251), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (14245, 14251), True, 'import numpy as np\n'), ((14429, 14442), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (14436, 14442), True, 'import numpy as np\n'), ((14620, 14633), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (14627, 14633), True, 'import numpy as np\n'), ((17437, 17456), 'numpy.power', 'np.power', (['temp', '(1.5)'], {}), '(temp, 1.5)\n', (17445, 17456), True, 'import numpy as np\n'), ((17486, 17509), 'numpy.exp', 'np.exp', (['(-94000.0 / temp)'], {}), '(-94000.0 / temp)\n', (17492, 17509), True, 'import numpy as np\n'), ((27376, 27389), 'numpy.sqrt', 'np.sqrt', (['temp'], {}), '(temp)\n', (27383, 27389), True, 'import numpy as np\n'), ((30924, 30936), 'numpy.log', 'np.log', (['temp'], {}), '(temp)\n', (30930, 30936), True, 'import numpy as np\n'), ((31185, 31197), 'numpy.log', 'np.log', (['temp'], {}), '(temp)\n', (31191, 31197), True, 'import numpy as np\n'), ((18097, 18108), 'numpy.sqrt', 'np.sqrt', (['UU'], {}), '(UU)\n', (18104, 18108), True, 'import numpy as np\n'), ((27578, 27592), 'numpy.log10', 'np.log10', (['temp'], {}), '(temp)\n', (27586, 27592), True, 'import numpy as np\n')]
|
"""Test functions for pem.fluid.ecl module
"""
import pytest
from pytest import approx
import numpy as np
import digirock.fluids.ecl as fluid_ecl
from inspect import getmembers, isfunction
@pytest.fixture
def tol():
return {
"rel": 0.05, # relative testing tolerance in percent
"abs": 0.00001, # absolute testing tolerance
}
@pytest.mark.parametrize(
"pres, extrap, ans",
[
(325, "const", 1.4615),
(325, "pchip", 1.4615),
(np.r_[325, 375], "const", np.r_[1.4615, 1.4505]),
(np.r_[325, 375], "pchip", np.r_[1.4615, 1.4505]),
],
)
def test_oil_fvf_table(test_data, pres, ans, extrap, tol):
tab = np.loadtxt(test_data / "PVT_BO.inc")
assert np.allclose(
fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),
ans,
rtol=tol["rel"],
)
def test_oil_fvf_table_bad_pchi(test_data):
tab = np.loadtxt(test_data / "PVT_BO.inc")
# test bad extrap
with pytest.raises(ValueError):
assert fluid_ecl.oil_fvf_table(
tab[:, 0], tab[:, 1], 235, extrap="Unknown Extrap"
)
@pytest.mark.parametrize(
"pres, extrap, ans",
[
(325, "const", 1.4615),
(325, "pchip", 1.4615),
(np.r_[325, 375], "const", np.r_[1.4615, 1.4505]),
(np.r_[325, 375], "pchip", np.r_[1.4615, 1.4505]),
],
)
def test_oil_fvf_table(test_data, pres, ans, extrap, tol):
tab = np.loadtxt(test_data / "PVT_BO.inc")
assert np.allclose(
fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),
ans,
rtol=tol["rel"],
)
@pytest.mark.parametrize("api,ans", ((20, 0.933993399339934), (45, 0.8016997167138812)))
def test_e100_oil_density(api, ans, tol):
assert fluid_ecl.e100_oil_density(api) == approx(ans)
assert np.allclose(
fluid_ecl.e100_oil_density(np.r_[api, api]), np.r_[ans, ans], atol=tol["abs"]
)
|
[
"pytest.approx",
"digirock.fluids.ecl.oil_fvf_table",
"pytest.mark.parametrize",
"digirock.fluids.ecl.e100_oil_density",
"pytest.raises",
"numpy.loadtxt"
] |
[((375, 580), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pres, extrap, ans"""', "[(325, 'const', 1.4615), (325, 'pchip', 1.4615), (np.r_[325, 375], 'const',\n np.r_[1.4615, 1.4505]), (np.r_[325, 375], 'pchip', np.r_[1.4615, 1.4505])]"], {}), "('pres, extrap, ans', [(325, 'const', 1.4615), (325,\n 'pchip', 1.4615), (np.r_[325, 375], 'const', np.r_[1.4615, 1.4505]), (\n np.r_[325, 375], 'pchip', np.r_[1.4615, 1.4505])])\n", (398, 580), False, 'import pytest\n'), ((1166, 1371), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pres, extrap, ans"""', "[(325, 'const', 1.4615), (325, 'pchip', 1.4615), (np.r_[325, 375], 'const',\n np.r_[1.4615, 1.4505]), (np.r_[325, 375], 'pchip', np.r_[1.4615, 1.4505])]"], {}), "('pres, extrap, ans', [(325, 'const', 1.4615), (325,\n 'pchip', 1.4615), (np.r_[325, 375], 'const', np.r_[1.4615, 1.4505]), (\n np.r_[325, 375], 'pchip', np.r_[1.4615, 1.4505])])\n", (1189, 1371), False, 'import pytest\n'), ((1684, 1776), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""api,ans"""', '((20, 0.933993399339934), (45, 0.8016997167138812))'], {}), "('api,ans', ((20, 0.933993399339934), (45, \n 0.8016997167138812)))\n", (1707, 1776), False, 'import pytest\n'), ((701, 737), 'numpy.loadtxt', 'np.loadtxt', (["(test_data / 'PVT_BO.inc')"], {}), "(test_data / 'PVT_BO.inc')\n", (711, 737), True, 'import numpy as np\n'), ((947, 983), 'numpy.loadtxt', 'np.loadtxt', (["(test_data / 'PVT_BO.inc')"], {}), "(test_data / 'PVT_BO.inc')\n", (957, 983), True, 'import numpy as np\n'), ((1492, 1528), 'numpy.loadtxt', 'np.loadtxt', (["(test_data / 'PVT_BO.inc')"], {}), "(test_data / 'PVT_BO.inc')\n", (1502, 1528), True, 'import numpy as np\n'), ((772, 838), 'digirock.fluids.ecl.oil_fvf_table', 'fluid_ecl.oil_fvf_table', (['tab[:, 0]', 'tab[:, 1]', 'pres'], {'extrap': 'extrap'}), '(tab[:, 0], tab[:, 1], pres, extrap=extrap)\n', (795, 838), True, 'import digirock.fluids.ecl as fluid_ecl\n'), ((1017, 1042), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1030, 1042), False, 'import pytest\n'), ((1060, 1135), 'digirock.fluids.ecl.oil_fvf_table', 'fluid_ecl.oil_fvf_table', (['tab[:, 0]', 'tab[:, 1]', '(235)'], {'extrap': '"""Unknown Extrap"""'}), "(tab[:, 0], tab[:, 1], 235, extrap='Unknown Extrap')\n", (1083, 1135), True, 'import digirock.fluids.ecl as fluid_ecl\n'), ((1563, 1629), 'digirock.fluids.ecl.oil_fvf_table', 'fluid_ecl.oil_fvf_table', (['tab[:, 0]', 'tab[:, 1]', 'pres'], {'extrap': 'extrap'}), '(tab[:, 0], tab[:, 1], pres, extrap=extrap)\n', (1586, 1629), True, 'import digirock.fluids.ecl as fluid_ecl\n'), ((1827, 1858), 'digirock.fluids.ecl.e100_oil_density', 'fluid_ecl.e100_oil_density', (['api'], {}), '(api)\n', (1853, 1858), True, 'import digirock.fluids.ecl as fluid_ecl\n'), ((1862, 1873), 'pytest.approx', 'approx', (['ans'], {}), '(ans)\n', (1868, 1873), False, 'from pytest import approx\n'), ((1908, 1951), 'digirock.fluids.ecl.e100_oil_density', 'fluid_ecl.e100_oil_density', (['np.r_[api, api]'], {}), '(np.r_[api, api])\n', (1934, 1951), True, 'import digirock.fluids.ecl as fluid_ecl\n')]
|
'''
Created on: see version log.
@author: rigonz
coding: utf-8
IMPORTANT: requires py3.6 (rasterio)
Script that:
1) reads a series of raster files,
2) runs some checks,
3) makes charts showing the results.
The input data corresponds to a region of the world (ESP) and represents
the population density (pop/km2).
Each file has from a data provider, or different calculation conditions.
The checks consist in verifying that the input files refer to the same region
and to some intercomparison indicators.
The charts show the correlation among the different input data, as tuples
associated to the same geographical location.
Version log.
R0 (20210512):
First trials, seems to work well.
'''
# %% Imports.
import rasterio # IMPORTANT: requires py3.6
import numpy as np
from matplotlib import pyplot as plt
# %% Directories.
RootDirIn = 'D:/0 DOWN/zz EXTSave/GIS/POP/EUR/SHP/'
# Filenames:
FileNameI1 = RootDirIn + 'WP/ESP_clip_pd_2020_1km_UNadj.tif'
FileNameI2 = RootDirIn + 'WP/ESP_clip_ppp_2020_1km_Aggregated_UNadj_d.tif'
FileNameI3 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_rev11_2020_30_sec.tif'
FileNameI4 = RootDirIn + 'GPW/ESP_clip gpw_v4_population_density_adjusted_to_2015_unwpp_country_totals_rev11_2020_30_sec.tif'
# %% Read data.
# Open files:
print('Opening and reading the files...')
ds1 = rasterio.open(FileNameI1)
ds2 = rasterio.open(FileNameI2)
ds3 = rasterio.open(FileNameI3)
ds4 = rasterio.open(FileNameI4)
# Read data:
band1 = ds1.read(1)
band2 = ds2.read(1)
band3 = ds3.read(1)
band4 = ds4.read(1)
# %% Check the datasets.
print('Checking the data...')
# Bounds:
if not(ds1.bounds == ds2.bounds and ds2.bounds == ds3.bounds and
ds3.bounds == ds4.bounds):
print('WARNING: bounds are not the same:')
print(ds1.bounds)
print(ds2.bounds)
print(ds3.bounds)
print(ds4.bounds)
# Width and height:
if not(ds1.width == ds2.width and ds2.width == ds3.width and
ds3.width == ds4.width):
print('WARNING: widths are not the same:')
print(ds1.width)
print(ds2.width)
print(ds3.width)
print(ds4.width)
if not(ds1.height == ds2.height and ds2.height == ds3.height and
ds3.height == ds4.height):
print('WARNING: heights are not the same:')
print(ds1.height)
print(ds2.height)
print(ds3.height)
print(ds4.height)
# Bands:
if not(ds1.indexes[0] == ds2.indexes[0] and ds2.indexes[0] == ds3.indexes[0]
and ds3.indexes[0] == ds4.indexes[0]):
print('WARNING: bands are not the same:')
print(ds1.indexes[0])
print(ds2.indexes[0])
print(ds3.indexes[0])
print(ds4.indexes[0])
# Dimensions:
if not(ds1.shape == ds2.shape and ds2.shape == ds3.shape and
ds3.shape == ds4.shape):
print('WARNING: shapes are not the same:')
print(ds1.shape)
print(ds2.shape)
print(ds3.shape)
print(ds4.shape)
# CRS:
try:
if (ds1.crs.data['init'] != 'epsg:4326' or
ds2.crs.data['init'] != 'epsg:4326' or
ds3.crs.data['init'] != 'epsg:4326' or
ds4.crs.data['init'] != 'epsg:4326'):
print('WARNING: CRS is not EPSG:4326.')
except:
print('WARNING: CRS is not available or is not EPSG:4326:')
# %% Create new bands.
print('Checking the new bands...')
# Remain within the boundaries of data:
left = max(ds1.bounds.left, ds2.bounds.left, ds3.bounds.left, ds4.bounds.left)
top = min(ds1.bounds.top, ds2.bounds.top, ds3.bounds.top, ds4.bounds.top)
right = min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right)
bottom = max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom)
res = 1 / 120. # 30 arc-sec, approx 100 m; should be min() etc.
height = int(np.ceil((top - bottom) / res + 1))
width = int(np.ceil((right - left) / res + 1))
res_x = (right - left) / (width - 1)
res_y = (top - bottom) / (height - 1)
# Check (valid for east + north hemispheres only!):
if right > min(ds1.bounds.right, ds2.bounds.right, ds3.bounds.right, ds4.bounds.right):
print('WARNING: right boundary exceeded.')
if bottom > max(ds1.bounds.bottom, ds2.bounds.bottom, ds3.bounds.bottom, ds4.bounds.bottom):
print('WARNING: bottom boundary exceeded.')
# Create new bands:
print('Creating the new bands...')
b1 = np.full((height, width), 0.)
b2 = np.full((height, width), 0.)
b3 = np.full((height, width), 0.)
b4 = np.full((height, width), 0.)
# Populate the new bands:
count = 0
for i in range(0, height-1, 1):
for j in range(0, width-1, 1):
x, y = (left + j * res_x, top - i * res_y)
row, col = ds1.index(x, y)
b1[i, j] = band1[row, col]
row, col = ds2.index(x, y)
b2[i, j] = band2[row, col]
row, col = ds3.index(x, y)
b3[i, j] = band3[row, col]
row, col = ds4.index(x, y)
b4[i, j] = band4[row, col]
# Show the progress:
if count % height % 50 == 0:
print('Progress... {:4.1f}%'.format(count/height*100))
count += 1
# %% Flatten and clear nodata.
print('Preparing the new bands...')
b1f = b1.flatten()
b2f = b2.flatten()
b3f = b3.flatten()
b4f = b4.flatten()
# Remove only nodata, retain 0s:
b_mask = np.array(np.array([b1f, b2f, b3f, b4f]).min(axis=0) < 0)
b1fm = np.delete(b1f, b_mask)
b2fm = np.delete(b2f, b_mask)
b3fm = np.delete(b3f, b_mask)
b4fm = np.delete(b4f, b_mask)
# %% Compute correlations.
print('Pearson coeff. after removing the no-data:')
print('DS1-2 = {:4.3f}.'.format(np.corrcoef(b1fm, b2fm)[0, 1]))
print('DS1-3 = {:4.3f}.'.format(np.corrcoef(b1fm, b3fm)[0, 1]))
print('DS1-4 = {:4.3f}.'.format(np.corrcoef(b1fm, b4fm)[0, 1]))
print('DS2-3 = {:4.3f}.'.format(np.corrcoef(b2fm, b3fm)[0, 1]))
print('DS2-4 = {:4.3f}.'.format(np.corrcoef(b2fm, b4fm)[0, 1]))
print('DS3-4 = {:4.3f}.'.format(np.corrcoef(b3fm, b4fm)[0, 1]))
# %% Draw histograms.
# Auxiliaries:
color = ['k', 'r', 'b', 'g']
label = ['DS1', 'DS2', 'DS3', 'DS4']
# Plot:
plt.hist([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)
# Etc:
plt.title('DS=>0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('count')
plt.grid(True)
plt.legend()
plt.show()
# Zoom at the right tail:
# Plot:
plt.hist([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)
# Etc:
plt.title('DS>=0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('count')
plt.grid(True)
plt.legend()
#•plt.xlim(1500, 40000)
plt.ylim(0, 7500)
plt.show()
# %% Draw chart.
# Auxiliaries:
color = ['k', 'r', 'b', 'g']
# Plot:
plt.figure(1, figsize=(4, 4), dpi=300)
# plt.scatter(b1fm, b3fm, color=color[0], s=1.0, label='1-3', alpha=0.1)
# plt.scatter(b1fm, b4fm, color=color[1], s=1.0, label='1-4', alpha=0.1)
plt.scatter(b2fm, b3fm, color=color[2], s=1.0, label='2-3', alpha=0.1)
# Titles:
plt.title('PD>=0', loc='right')
plt.xlabel('pop. density, hab/km2')
plt.ylabel('pop. density, hab/km2')
# Etc:
plt.grid(True)
plt.legend()
plt.tight_layout()
# Take a look:
plt.show()
# %% Draw heatmap.
# Remove 0s:
b_mask = np.array(np.array([b1f, b2f, b3f, b4f]).min(axis=0) <= 0)
b1fm = np.delete(b1f, b_mask)
b2fm = np.delete(b2f, b_mask)
b3fm = np.delete(b3f, b_mask)
b4fm = np.delete(b4f, b_mask)
# Plot:
plt.hist2d(np.log10(b2fm), np.log10(b3fm), bins=100, cmap='binary')
# Colorbar:
cb = plt.colorbar()
cb.set_label('Number of entries')
# Etc:
plt.title('PD>0', loc='right')
plt.xlabel('log10_DS2 pop. density, hab/km2')
plt.ylabel('log10_DS3 pop. density, hab/km2')
plt.tight_layout()
plt.show()
# %% Script done.
print('\nScript completed. Thanks!')
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.ceil",
"numpy.corrcoef",
"rasterio.open",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.full"
] |
[((1327, 1352), 'rasterio.open', 'rasterio.open', (['FileNameI1'], {}), '(FileNameI1)\n', (1340, 1352), False, 'import rasterio\n'), ((1359, 1384), 'rasterio.open', 'rasterio.open', (['FileNameI2'], {}), '(FileNameI2)\n', (1372, 1384), False, 'import rasterio\n'), ((1391, 1416), 'rasterio.open', 'rasterio.open', (['FileNameI3'], {}), '(FileNameI3)\n', (1404, 1416), False, 'import rasterio\n'), ((1423, 1448), 'rasterio.open', 'rasterio.open', (['FileNameI4'], {}), '(FileNameI4)\n', (1436, 1448), False, 'import rasterio\n'), ((4218, 4247), 'numpy.full', 'np.full', (['(height, width)', '(0.0)'], {}), '((height, width), 0.0)\n', (4225, 4247), True, 'import numpy as np\n'), ((4252, 4281), 'numpy.full', 'np.full', (['(height, width)', '(0.0)'], {}), '((height, width), 0.0)\n', (4259, 4281), True, 'import numpy as np\n'), ((4286, 4315), 'numpy.full', 'np.full', (['(height, width)', '(0.0)'], {}), '((height, width), 0.0)\n', (4293, 4315), True, 'import numpy as np\n'), ((4320, 4349), 'numpy.full', 'np.full', (['(height, width)', '(0.0)'], {}), '((height, width), 0.0)\n', (4327, 4349), True, 'import numpy as np\n'), ((5172, 5194), 'numpy.delete', 'np.delete', (['b1f', 'b_mask'], {}), '(b1f, b_mask)\n', (5181, 5194), True, 'import numpy as np\n'), ((5202, 5224), 'numpy.delete', 'np.delete', (['b2f', 'b_mask'], {}), '(b2f, b_mask)\n', (5211, 5224), True, 'import numpy as np\n'), ((5232, 5254), 'numpy.delete', 'np.delete', (['b3f', 'b_mask'], {}), '(b3f, b_mask)\n', (5241, 5254), True, 'import numpy as np\n'), ((5262, 5284), 'numpy.delete', 'np.delete', (['b4f', 'b_mask'], {}), '(b4f, b_mask)\n', (5271, 5284), True, 'import numpy as np\n'), ((5862, 5936), 'matplotlib.pyplot.hist', 'plt.hist', (['[b1fm, b2fm, b3fm, b4fm]'], {'bins': '(20)', 'color': 'color[0:4]', 'label': 'label'}), '([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)\n', (5870, 5936), True, 'from matplotlib import pyplot as plt\n'), ((5945, 5976), 'matplotlib.pyplot.title', 'plt.title', (['"""DS=>0"""'], {'loc': '"""right"""'}), "('DS=>0', loc='right')\n", (5954, 5976), True, 'from matplotlib import pyplot as plt\n'), ((5977, 6012), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pop. density, hab/km2"""'], {}), "('pop. density, hab/km2')\n", (5987, 6012), True, 'from matplotlib import pyplot as plt\n'), ((6013, 6032), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (6023, 6032), True, 'from matplotlib import pyplot as plt\n'), ((6033, 6047), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6041, 6047), True, 'from matplotlib import pyplot as plt\n'), ((6048, 6060), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6058, 6060), True, 'from matplotlib import pyplot as plt\n'), ((6061, 6071), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6069, 6071), True, 'from matplotlib import pyplot as plt\n'), ((6107, 6181), 'matplotlib.pyplot.hist', 'plt.hist', (['[b1fm, b2fm, b3fm, b4fm]'], {'bins': '(20)', 'color': 'color[0:4]', 'label': 'label'}), '([b1fm, b2fm, b3fm, b4fm], bins=20, color=color[0:4], label=label)\n', (6115, 6181), True, 'from matplotlib import pyplot as plt\n'), ((6190, 6221), 'matplotlib.pyplot.title', 'plt.title', (['"""DS>=0"""'], {'loc': '"""right"""'}), "('DS>=0', loc='right')\n", (6199, 6221), True, 'from matplotlib import pyplot as plt\n'), ((6222, 6257), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pop. density, hab/km2"""'], {}), "('pop. density, hab/km2')\n", (6232, 6257), True, 'from matplotlib import pyplot as plt\n'), ((6258, 6277), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (6268, 6277), True, 'from matplotlib import pyplot as plt\n'), ((6278, 6292), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6286, 6292), True, 'from matplotlib import pyplot as plt\n'), ((6293, 6305), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6303, 6305), True, 'from matplotlib import pyplot as plt\n'), ((6330, 6347), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(7500)'], {}), '(0, 7500)\n', (6338, 6347), True, 'from matplotlib import pyplot as plt\n'), ((6348, 6358), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6356, 6358), True, 'from matplotlib import pyplot as plt\n'), ((6431, 6469), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(4, 4)', 'dpi': '(300)'}), '(1, figsize=(4, 4), dpi=300)\n', (6441, 6469), True, 'from matplotlib import pyplot as plt\n'), ((6616, 6686), 'matplotlib.pyplot.scatter', 'plt.scatter', (['b2fm', 'b3fm'], {'color': 'color[2]', 's': '(1.0)', 'label': '"""2-3"""', 'alpha': '(0.1)'}), "(b2fm, b3fm, color=color[2], s=1.0, label='2-3', alpha=0.1)\n", (6627, 6686), True, 'from matplotlib import pyplot as plt\n'), ((6699, 6730), 'matplotlib.pyplot.title', 'plt.title', (['"""PD>=0"""'], {'loc': '"""right"""'}), "('PD>=0', loc='right')\n", (6708, 6730), True, 'from matplotlib import pyplot as plt\n'), ((6731, 6766), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pop. density, hab/km2"""'], {}), "('pop. density, hab/km2')\n", (6741, 6766), True, 'from matplotlib import pyplot as plt\n'), ((6767, 6802), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pop. density, hab/km2"""'], {}), "('pop. density, hab/km2')\n", (6777, 6802), True, 'from matplotlib import pyplot as plt\n'), ((6811, 6825), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6819, 6825), True, 'from matplotlib import pyplot as plt\n'), ((6826, 6838), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6836, 6838), True, 'from matplotlib import pyplot as plt\n'), ((6839, 6857), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6855, 6857), True, 'from matplotlib import pyplot as plt\n'), ((6874, 6884), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6882, 6884), True, 'from matplotlib import pyplot as plt\n'), ((6992, 7014), 'numpy.delete', 'np.delete', (['b1f', 'b_mask'], {}), '(b1f, b_mask)\n', (7001, 7014), True, 'import numpy as np\n'), ((7022, 7044), 'numpy.delete', 'np.delete', (['b2f', 'b_mask'], {}), '(b2f, b_mask)\n', (7031, 7044), True, 'import numpy as np\n'), ((7052, 7074), 'numpy.delete', 'np.delete', (['b3f', 'b_mask'], {}), '(b3f, b_mask)\n', (7061, 7074), True, 'import numpy as np\n'), ((7082, 7104), 'numpy.delete', 'np.delete', (['b4f', 'b_mask'], {}), '(b4f, b_mask)\n', (7091, 7104), True, 'import numpy as np\n'), ((7200, 7214), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (7212, 7214), True, 'from matplotlib import pyplot as plt\n'), ((7257, 7287), 'matplotlib.pyplot.title', 'plt.title', (['"""PD>0"""'], {'loc': '"""right"""'}), "('PD>0', loc='right')\n", (7266, 7287), True, 'from matplotlib import pyplot as plt\n'), ((7288, 7333), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log10_DS2 pop. density, hab/km2"""'], {}), "('log10_DS2 pop. density, hab/km2')\n", (7298, 7333), True, 'from matplotlib import pyplot as plt\n'), ((7334, 7379), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log10_DS3 pop. density, hab/km2"""'], {}), "('log10_DS3 pop. density, hab/km2')\n", (7344, 7379), True, 'from matplotlib import pyplot as plt\n'), ((7380, 7398), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7396, 7398), True, 'from matplotlib import pyplot as plt\n'), ((7399, 7409), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7407, 7409), True, 'from matplotlib import pyplot as plt\n'), ((3670, 3703), 'numpy.ceil', 'np.ceil', (['((top - bottom) / res + 1)'], {}), '((top - bottom) / res + 1)\n', (3677, 3703), True, 'import numpy as np\n'), ((3717, 3750), 'numpy.ceil', 'np.ceil', (['((right - left) / res + 1)'], {}), '((right - left) / res + 1)\n', (3724, 3750), True, 'import numpy as np\n'), ((7125, 7139), 'numpy.log10', 'np.log10', (['b2fm'], {}), '(b2fm)\n', (7133, 7139), True, 'import numpy as np\n'), ((7141, 7155), 'numpy.log10', 'np.log10', (['b3fm'], {}), '(b3fm)\n', (7149, 7155), True, 'import numpy as np\n'), ((5397, 5420), 'numpy.corrcoef', 'np.corrcoef', (['b1fm', 'b2fm'], {}), '(b1fm, b2fm)\n', (5408, 5420), True, 'import numpy as np\n'), ((5461, 5484), 'numpy.corrcoef', 'np.corrcoef', (['b1fm', 'b3fm'], {}), '(b1fm, b3fm)\n', (5472, 5484), True, 'import numpy as np\n'), ((5525, 5548), 'numpy.corrcoef', 'np.corrcoef', (['b1fm', 'b4fm'], {}), '(b1fm, b4fm)\n', (5536, 5548), True, 'import numpy as np\n'), ((5589, 5612), 'numpy.corrcoef', 'np.corrcoef', (['b2fm', 'b3fm'], {}), '(b2fm, b3fm)\n', (5600, 5612), True, 'import numpy as np\n'), ((5653, 5676), 'numpy.corrcoef', 'np.corrcoef', (['b2fm', 'b4fm'], {}), '(b2fm, b4fm)\n', (5664, 5676), True, 'import numpy as np\n'), ((5717, 5740), 'numpy.corrcoef', 'np.corrcoef', (['b3fm', 'b4fm'], {}), '(b3fm, b4fm)\n', (5728, 5740), True, 'import numpy as np\n'), ((5117, 5147), 'numpy.array', 'np.array', (['[b1f, b2f, b3f, b4f]'], {}), '([b1f, b2f, b3f, b4f])\n', (5125, 5147), True, 'import numpy as np\n'), ((6936, 6966), 'numpy.array', 'np.array', (['[b1f, b2f, b3f, b4f]'], {}), '([b1f, b2f, b3f, b4f])\n', (6944, 6966), True, 'import numpy as np\n')]
|
import utils
from utils import format
import os
import tempfile
import urllib.request
import shutil
import zipfile
spire_dir = r"D:\Games\Slay the Spire Modded"
mod_dir = os.path.join("cache", "mod")
def build():
# STEP: clone FruityMod
if not os.path.exists(mod_dir):
print("Downloading {}".format("FruityMod"))
fruity_url = r"https://github.com/gskleres/FruityMod-StS/archive/v0.6.2b.zip"
utils.mkdir("cache")
download_file = tempfile.NamedTemporaryFile(suffix=".zip", dir="cache", delete=False).name
with urllib.request.urlopen(fruity_url) as response, open(download_file, "wb") as out_file:
shutil.copyfileobj(response, out_file)
utils.unzip(download_file, mod_dir, shift=1, remove=True)
# STEP: fetch libs
mod_jar = os.path.join(spire_dir, "ModTheSpire.jar")
if not os.path.exists(mod_jar):
print("Downloading ModTheSpire")
download_file = tempfile.NamedTemporaryFile(suffix=".zip", dir="..", delete=False).name
urllib.request.urlretrieve("https://github.com/kiooeht/ModTheSpire/releases/download/v2.6.0/ModTheSpire.zip", download_file)
with zipfile.ZipFile(download_file, "r") as archive, open(mod_jar, "wb") as file:
jar_data = archive.read("ModTheSpire.jar")
file.write(jar_data)
os.remove(download_file)
base_jar = os.path.join(spire_dir, "mods", "BaseMod.jar")
if not os.path.exists(base_jar):
print("Downloading BaseMod")
urllib.request.urlretrieve("https://github.com/daviscook477/BaseMod/releases/download/v2.9.1/BaseMod.jar", base_jar)
from spire import name_id
import textwrap
import io
import json
print("Generating data")
image_dir = os.path.join("assets", "images")
if os.path.exists(os.path.join("cache", "DEBUG")):
image_dir = os.path.join("todo", "images")
# STEP: generate cards
from engi_mod import cards
with open(os.path.join("templates", "card.java"), encoding="utf-8") as file:
card_template = file.read()
for card in cards:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\cards".split("\\"), name_id(card["name"]) + ".java"), "w", encoding="utf-8") as file:
file.write(format(card_template, card))
# STEP: patch code
templates_cache = os.path.join("cache", "templates")
if not os.path.exists(templates_cache):
utils.mkdir(templates_cache)
shutil.copy(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), os.path.join(templates_cache, "FruiyMod.java"))
shutil.copy(os.path.join(mod_dir, *r"src\main\java\fruitymod\characters\TheSeeker.java".split("\\")), os.path.join(templates_cache, "TheSeeker.java"))
shutil.copy(os.path.join(mod_dir, *r"src\main\resources\localization\FruityMod-CardStrings.json".split("\\")), os.path.join(templates_cache, "FruityMod-CardStrings.json"))
image_code = io.StringIO()
add_code = io.StringIO()
unlock_code = io.StringIO()
for card in cards:
id = name_id(card["name"], upper=True).lower()
image_file = os.path.join(image_dir, id + ".png")
image_file = "cards/{}.png".format(id if os.path.exists(image_file) else "runic_binding")
image_code.write(format(
'public static final String {{ name_id(card["name"], upper=True) }} = "{{ image_file }}";'
) + "\n")
if card["rarity"] != "special":
add_code.write(format(
'BaseMod.addCard(new {{ name_id(card["name"]) }}());'
) + "\n")
unlock_code.write(format(
'UnlockTracker.unlockCard("{{ card["name"] }}");'
) + "\n")
with open(os.path.join(templates_cache, "FruiyMod.java"), encoding="utf-8") as file:
fruity_lines = [line for line in file]
for i, line in enumerate(fruity_lines):
if "public static final String PHASE_COIL" in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(image_code.getvalue(), " " * 4))
break
for i, line in enumerate(fruity_lines):
if "BaseMod.addCard(new Nexus())" in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(add_code.getvalue(), " " * 4 * 2))
fruity_lines.insert(i + 2, "\n" + textwrap.indent(unlock_code.getvalue(), " " * 4 * 2))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(fruity_lines))
with open(os.path.join(templates_cache, "TheSeeker.java"), encoding="utf-8") as file:
seeker_lines = [line for line in file]
# STEP: starting relic
from engi_mod import relic
for i, line in enumerate(seeker_lines):
if "Arcanosphere" in line:
del seeker_lines[i:i+2]
seeker_lines.insert(i, "\n{}\n\n".format(textwrap.indent(textwrap.dedent(format("""
retVal.add("{{ relic }}");
UnlockTracker.markRelicAsSeen("{{ relic }}");
""")).strip(), " " * 4 * 2)))
break
# STEP: starting deck
from engi_mod import deck
if not deck:
deck = [card["name"] for card in cards if card["rarity"] != "special"]
for i, line in enumerate(seeker_lines):
if "Strike_P" in line:
for j, line in enumerate(seeker_lines):
if "AstralHaze" in line:
break
del seeker_lines[i:j+1]
seeker_lines.insert(i, "\n{}\n\n".format(textwrap.indent(
"\n".join('retVal.add("{}");'.format(card) for card in deck)
, " " * 4 * 2)))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\characters\TheSeeker.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(seeker_lines))
card_strings = json.load(open(os.path.join(templates_cache, "FruityMod-CardStrings.json"), encoding="utf-8"))
for card in cards:
data = {
"NAME": card["name"],
"DESCRIPTION": card["desc"],
}
desc = card.get("upgrade_desc")
if desc:
data["UPGRADE_DESCRIPTION"] = desc
card_strings[card["name"]] = data
json.dump(card_strings,
open(os.path.join(mod_dir, *r"src\main\resources\localization\FruityMod-CardStrings.json".split("\\")),
"w", encoding="utf-8"), sort_keys=True, indent=4)
# STEP: generate powers
from engi_mod import powers
with open(os.path.join("templates", "power.java"), encoding="utf-8") as file:
power_template = file.read()
for power in powers:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\powers".split("\\"), power["id"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(power_template, power))
# STEP: generate actions
from engi_mod import actions
with open(os.path.join("templates", "action.java"), encoding="utf-8") as file:
action_template = file.read()
for action in actions:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\actions\unique".split("\\"), action["id"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(action_template, action))
# STEP: generate java files
from engi_mod import javas
with open(os.path.join("templates", "java.java"), encoding="utf-8") as file:
java_template = file.read()
for java in javas:
with open(os.path.join(mod_dir, *r"src\main\java".split("\\"), *java["package"], java["name"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(java_template, java))
# STEP: card images
print("Generating images")
import numpy as np
portrait_masks = {}
for type in "attack skill power".split():
image = utils.open_data(os.path.join("templates", "1024Portraits_{}_mask.png".format(type)))
image = image / 255
image = np.repeat(image[:,:,:1], 4, axis=-1)
portrait_masks[type] = image
for card in cards:
id = name_id(card["name"], upper=True).lower()
image_file = os.path.join(image_dir, id + ".png")
target_p_file = os.path.join(mod_dir, *r"src\main\resources\img\cards".split("\\"), id + "_p" + ".png")
target_file = os.path.join(mod_dir, *r"src\main\resources\img\cards".split("\\"), id + ".png")
if os.path.exists(target_p_file):
continue
if os.path.exists(image_file):
image = utils.open_data(image_file)
from skimage.transform import resize
target = 500, 380
r = image.shape[0] / image.shape[1]
if r >= target[0] / target[1]:
size = np.ceil(target[1] * r).astype("int"), target[1]
x = np.round((size[0] - target[0]) / 2).astype("int")
image = resize(image, size, mode="edge")[x:x+target[0]]
else:
size = target[0], np.ceil(target[0] / r).astype("int")
image = resize(image, size, mode="edge")[:,:target[1]]
image *= portrait_masks[card["type"]]
from PIL import Image
img = Image.fromarray(np.round(image * 255).astype("uint8").transpose((1, 0, 2)))
img.save(target_p_file)
target = 250, 190
image = resize(image, target, mode="edge")
img = Image.fromarray(np.round(image * 255).astype("uint8").transpose((1, 0, 2)))
img.save(target_file)
# STEP: card borders
utils.sync(os.path.join("assets", "512"), os.path.join(mod_dir, *r"src\main\resources\img\512".split("\\")))
utils.sync(os.path.join("assets", "1024"), os.path.join(mod_dir, *r"src\main\resources\img\1024".split("\\")))
# STEP: keywords
from engi_mod import keywords
keyword_code = io.StringIO()
for name, keyword in keywords.items():
words = ", ".join('"{}"'.format(word) for word in [name.lower()] + keyword["words"])
keyword_code.write(format(
'BaseMod.addKeyword(new String[] {"{{ name }}", {{ words }}}, "{{ keyword["desc"] }}");'
) + "\n")
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), encoding="utf-8") as file:
fruity_lines = [line for line in file]
for i, line in enumerate(fruity_lines):
if '{"intangible", "Intangible"}, "All damage and HP loss you suffer is reduced to 1."' in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(keyword_code.getvalue(), " " * 4 * 2))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(fruity_lines))
# STEP: mod info
old_info = os.path.join(mod_dir, *r"src\main\resources\ModTheSpire.config".split("\\"))
if os.path.exists(old_info):
os.remove(old_info)
from engi_mod import info
json.dump(info, open(os.path.join(mod_dir, *r"src\main\resources\ModTheSpire.json".split("\\")), "w", encoding="utf-8"), indent=4)
# STEP: maven project
pom_template = os.path.join(templates_cache, "pom.xml")
if not os.path.exists(pom_template):
shutil.copy(os.path.join(mod_dir, "pom.xml"), pom_template)
with open(pom_template, encoding="utf-8") as file:
pom = file.read()
pom = pom.replace("${basedir}/../lib/ModTheSpire.jar", "/".join(spire_dir.split(os.path.sep) + ["ModTheSpire.jar"]))
pom = pom.replace("${basedir}/../lib/BaseMod.jar", "/".join(spire_dir.split(os.path.sep) + ["mods", "BaseMod.jar"]))
pom = pom.replace("${basedir}/../lib/desktop-1.0.jar", "/".join(spire_dir.split(os.path.sep) + ["desktop-1.0.jar"]))
jar_file = os.path.join(spire_dir, "mods", "EngiMod.jar")
pom = pom.replace("../_ModTheSpire/mods/FruityMod.jar", "/".join(jar_file.split(os.path.sep)))
with open(os.path.join(mod_dir, "pom.xml"), "w", encoding="utf-8") as file:
file.write(pom)
# STEP: compile
if os.path.exists(jar_file):
os.remove(jar_file)
with utils.cd(mod_dir):
os.system("mvn package")
if not os.path.exists(jar_file):
print("Compilation failed")
return
# STEP: test
with utils.cd(spire_dir):
os.system("ModTheSpire.jar")
if __name__ == "__main__":
build()
|
[
"zipfile.ZipFile",
"utils.cd",
"spire.name_id",
"os.remove",
"os.path.exists",
"numpy.repeat",
"utils.format",
"tempfile.NamedTemporaryFile",
"utils.open_data",
"io.StringIO",
"numpy.round",
"numpy.ceil",
"shutil.copyfileobj",
"skimage.transform.resize",
"engi_mod.keywords.items",
"os.path.join",
"utils.mkdir",
"utils.unzip",
"os.system"
] |
[((172, 200), 'os.path.join', 'os.path.join', (['"""cache"""', '"""mod"""'], {}), "('cache', 'mod')\n", (184, 200), False, 'import os\n'), ((800, 842), 'os.path.join', 'os.path.join', (['spire_dir', '"""ModTheSpire.jar"""'], {}), "(spire_dir, 'ModTheSpire.jar')\n", (812, 842), False, 'import os\n'), ((1375, 1421), 'os.path.join', 'os.path.join', (['spire_dir', '"""mods"""', '"""BaseMod.jar"""'], {}), "(spire_dir, 'mods', 'BaseMod.jar')\n", (1387, 1421), False, 'import os\n'), ((1748, 1780), 'os.path.join', 'os.path.join', (['"""assets"""', '"""images"""'], {}), "('assets', 'images')\n", (1760, 1780), False, 'import os\n'), ((2337, 2371), 'os.path.join', 'os.path.join', (['"""cache"""', '"""templates"""'], {}), "('cache', 'templates')\n", (2349, 2371), False, 'import os\n'), ((2956, 2969), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2967, 2969), False, 'import io\n'), ((2985, 2998), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2996, 2998), False, 'import io\n'), ((3017, 3030), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3028, 3030), False, 'import io\n'), ((9852, 9865), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (9863, 9865), False, 'import io\n'), ((9891, 9907), 'engi_mod.keywords.items', 'keywords.items', ([], {}), '()\n', (9905, 9907), False, 'from engi_mod import keywords\n'), ((10880, 10904), 'os.path.exists', 'os.path.exists', (['old_info'], {}), '(old_info)\n', (10894, 10904), False, 'import os\n'), ((11145, 11185), 'os.path.join', 'os.path.join', (['templates_cache', '"""pom.xml"""'], {}), "(templates_cache, 'pom.xml')\n", (11157, 11185), False, 'import os\n'), ((11754, 11800), 'os.path.join', 'os.path.join', (['spire_dir', '"""mods"""', '"""EngiMod.jar"""'], {}), "(spire_dir, 'mods', 'EngiMod.jar')\n", (11766, 11800), False, 'import os\n'), ((12032, 12056), 'os.path.exists', 'os.path.exists', (['jar_file'], {}), '(jar_file)\n', (12046, 12056), False, 'import os\n'), ((254, 277), 'os.path.exists', 'os.path.exists', (['mod_dir'], {}), '(mod_dir)\n', (268, 277), False, 'import os\n'), ((425, 445), 'utils.mkdir', 'utils.mkdir', (['"""cache"""'], {}), "('cache')\n", (436, 445), False, 'import utils\n'), ((704, 761), 'utils.unzip', 'utils.unzip', (['download_file', 'mod_dir'], {'shift': '(1)', 'remove': '(True)'}), '(download_file, mod_dir, shift=1, remove=True)\n', (715, 761), False, 'import utils\n'), ((854, 877), 'os.path.exists', 'os.path.exists', (['mod_jar'], {}), '(mod_jar)\n', (868, 877), False, 'import os\n'), ((1335, 1359), 'os.remove', 'os.remove', (['download_file'], {}), '(download_file)\n', (1344, 1359), False, 'import os\n'), ((1433, 1457), 'os.path.exists', 'os.path.exists', (['base_jar'], {}), '(base_jar)\n', (1447, 1457), False, 'import os\n'), ((1803, 1833), 'os.path.join', 'os.path.join', (['"""cache"""', '"""DEBUG"""'], {}), "('cache', 'DEBUG')\n", (1815, 1833), False, 'import os\n'), ((1856, 1886), 'os.path.join', 'os.path.join', (['"""todo"""', '"""images"""'], {}), "('todo', 'images')\n", (1868, 1886), False, 'import os\n'), ((2383, 2414), 'os.path.exists', 'os.path.exists', (['templates_cache'], {}), '(templates_cache)\n', (2397, 2414), False, 'import os\n'), ((2424, 2452), 'utils.mkdir', 'utils.mkdir', (['templates_cache'], {}), '(templates_cache)\n', (2435, 2452), False, 'import utils\n'), ((3130, 3166), 'os.path.join', 'os.path.join', (['image_dir', "(id + '.png')"], {}), "(image_dir, id + '.png')\n", (3142, 3166), False, 'import os\n'), ((7976, 8014), 'numpy.repeat', 'np.repeat', (['image[:, :, :1]', '(4)'], {'axis': '(-1)'}), '(image[:, :, :1], 4, axis=-1)\n', (7985, 8014), True, 'import numpy as np\n'), ((8149, 8185), 'os.path.join', 'os.path.join', (['image_dir', "(id + '.png')"], {}), "(image_dir, id + '.png')\n", (8161, 8185), False, 'import os\n'), ((8412, 8441), 'os.path.exists', 'os.path.exists', (['target_p_file'], {}), '(target_p_file)\n', (8426, 8441), False, 'import os\n'), ((8475, 8501), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (8489, 8501), False, 'import os\n'), ((9564, 9593), 'os.path.join', 'os.path.join', (['"""assets"""', '"""512"""'], {}), "('assets', '512')\n", (9576, 9593), False, 'import os\n'), ((9677, 9707), 'os.path.join', 'os.path.join', (['"""assets"""', '"""1024"""'], {}), "('assets', '1024')\n", (9689, 9707), False, 'import os\n'), ((10914, 10933), 'os.remove', 'os.remove', (['old_info'], {}), '(old_info)\n', (10923, 10933), False, 'import os\n'), ((11197, 11225), 'os.path.exists', 'os.path.exists', (['pom_template'], {}), '(pom_template)\n', (11211, 11225), False, 'import os\n'), ((12066, 12085), 'os.remove', 'os.remove', (['jar_file'], {}), '(jar_file)\n', (12075, 12085), False, 'import os\n'), ((12095, 12112), 'utils.cd', 'utils.cd', (['mod_dir'], {}), '(mod_dir)\n', (12103, 12112), False, 'import utils\n'), ((12122, 12146), 'os.system', 'os.system', (['"""mvn package"""'], {}), "('mvn package')\n", (12131, 12146), False, 'import os\n'), ((12158, 12182), 'os.path.exists', 'os.path.exists', (['jar_file'], {}), '(jar_file)\n', (12172, 12182), False, 'import os\n'), ((12262, 12281), 'utils.cd', 'utils.cd', (['spire_dir'], {}), '(spire_dir)\n', (12270, 12281), False, 'import utils\n'), ((12291, 12319), 'os.system', 'os.system', (['"""ModTheSpire.jar"""'], {}), "('ModTheSpire.jar')\n", (12300, 12319), False, 'import os\n'), ((470, 539), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".zip"""', 'dir': '"""cache"""', 'delete': '(False)'}), "(suffix='.zip', dir='cache', delete=False)\n", (497, 539), False, 'import tempfile\n'), ((657, 695), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response', 'out_file'], {}), '(response, out_file)\n', (675, 695), False, 'import shutil\n'), ((944, 1010), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".zip"""', 'dir': '""".."""', 'delete': '(False)'}), "(suffix='.zip', dir='..', delete=False)\n", (971, 1010), False, 'import tempfile\n'), ((1162, 1197), 'zipfile.ZipFile', 'zipfile.ZipFile', (['download_file', '"""r"""'], {}), "(download_file, 'r')\n", (1177, 1197), False, 'import zipfile\n'), ((1960, 1998), 'os.path.join', 'os.path.join', (['"""templates"""', '"""card.java"""'], {}), "('templates', 'card.java')\n", (1972, 1998), False, 'import os\n'), ((2552, 2598), 'os.path.join', 'os.path.join', (['templates_cache', '"""FruiyMod.java"""'], {}), "(templates_cache, 'FruiyMod.java')\n", (2564, 2598), False, 'import os\n'), ((2710, 2757), 'os.path.join', 'os.path.join', (['templates_cache', '"""TheSeeker.java"""'], {}), "(templates_cache, 'TheSeeker.java')\n", (2722, 2757), False, 'import os\n'), ((2878, 2937), 'os.path.join', 'os.path.join', (['templates_cache', '"""FruityMod-CardStrings.json"""'], {}), "(templates_cache, 'FruityMod-CardStrings.json')\n", (2890, 2937), False, 'import os\n'), ((3727, 3773), 'os.path.join', 'os.path.join', (['templates_cache', '"""FruiyMod.java"""'], {}), "(templates_cache, 'FruiyMod.java')\n", (3739, 3773), False, 'import os\n'), ((4558, 4605), 'os.path.join', 'os.path.join', (['templates_cache', '"""TheSeeker.java"""'], {}), "(templates_cache, 'TheSeeker.java')\n", (4570, 4605), False, 'import os\n'), ((5912, 5971), 'os.path.join', 'os.path.join', (['templates_cache', '"""FruityMod-CardStrings.json"""'], {}), "(templates_cache, 'FruityMod-CardStrings.json')\n", (5924, 5971), False, 'import os\n'), ((6532, 6571), 'os.path.join', 'os.path.join', (['"""templates"""', '"""power.java"""'], {}), "('templates', 'power.java')\n", (6544, 6571), False, 'import os\n'), ((6937, 6977), 'os.path.join', 'os.path.join', (['"""templates"""', '"""action.java"""'], {}), "('templates', 'action.java')\n", (6949, 6977), False, 'import os\n'), ((7358, 7396), 'os.path.join', 'os.path.join', (['"""templates"""', '"""java.java"""'], {}), "('templates', 'java.java')\n", (7370, 7396), False, 'import os\n'), ((8523, 8550), 'utils.open_data', 'utils.open_data', (['image_file'], {}), '(image_file)\n', (8538, 8550), False, 'import utils\n'), ((9360, 9394), 'skimage.transform.resize', 'resize', (['image', 'target'], {'mode': '"""edge"""'}), "(image, target, mode='edge')\n", (9366, 9394), False, 'from skimage.transform import resize\n'), ((11247, 11279), 'os.path.join', 'os.path.join', (['mod_dir', '"""pom.xml"""'], {}), "(mod_dir, 'pom.xml')\n", (11259, 11279), False, 'import os\n'), ((11914, 11946), 'os.path.join', 'os.path.join', (['mod_dir', '"""pom.xml"""'], {}), "(mod_dir, 'pom.xml')\n", (11926, 11946), False, 'import os\n'), ((2262, 2289), 'utils.format', 'format', (['card_template', 'card'], {}), '(card_template, card)\n', (2268, 2289), False, 'from utils import format\n'), ((3067, 3100), 'spire.name_id', 'name_id', (["card['name']"], {'upper': '(True)'}), "(card['name'], upper=True)\n", (3074, 3100), False, 'from spire import name_id\n'), ((3216, 3242), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (3230, 3242), False, 'import os\n'), ((3290, 3398), 'utils.format', 'format', (['"""public static final String {{ name_id(card["name"], upper=True) }} = "{{ image_file }}";"""'], {}), '(\n \'public static final String {{ name_id(card["name"], upper=True) }} = "{{ image_file }}";\'\n )\n', (3296, 3398), False, 'from utils import format\n'), ((6829, 6858), 'utils.format', 'format', (['power_template', 'power'], {}), '(power_template, power)\n', (6835, 6858), False, 'from utils import format\n'), ((7247, 7278), 'utils.format', 'format', (['action_template', 'action'], {}), '(action_template, action)\n', (7253, 7278), False, 'from utils import format\n'), ((7653, 7680), 'utils.format', 'format', (['java_template', 'java'], {}), '(java_template, java)\n', (7659, 7680), False, 'from utils import format\n'), ((8086, 8119), 'spire.name_id', 'name_id', (["card['name']"], {'upper': '(True)'}), "(card['name'], upper=True)\n", (8093, 8119), False, 'from spire import name_id\n'), ((10029, 10135), 'utils.format', 'format', (['"""BaseMod.addKeyword(new String[] {"{{ name }}", {{ words }}}, "{{ keyword["desc"] }}");"""'], {}), '(\n \'BaseMod.addKeyword(new String[] {"{{ name }}", {{ words }}}, "{{ keyword["desc"] }}");\'\n )\n', (10035, 10135), False, 'from utils import format\n'), ((3486, 3547), 'utils.format', 'format', (['"""BaseMod.addCard(new {{ name_id(card["name"]) }}());"""'], {}), '(\'BaseMod.addCard(new {{ name_id(card["name"]) }}());\')\n', (3492, 3547), False, 'from utils import format\n'), ((3616, 3673), 'utils.format', 'format', (['"""UnlockTracker.unlockCard("{{ card["name"] }}");"""'], {}), '(\'UnlockTracker.unlockCard("{{ card["name"] }}");\')\n', (3622, 3673), False, 'from utils import format\n'), ((8887, 8919), 'skimage.transform.resize', 'resize', (['image', 'size'], {'mode': '"""edge"""'}), "(image, size, mode='edge')\n", (8893, 8919), False, 'from skimage.transform import resize\n'), ((9048, 9080), 'skimage.transform.resize', 'resize', (['image', 'size'], {'mode': '"""edge"""'}), "(image, size, mode='edge')\n", (9054, 9080), False, 'from skimage.transform import resize\n'), ((2173, 2194), 'spire.name_id', 'name_id', (["card['name']"], {}), "(card['name'])\n", (2180, 2194), False, 'from spire import name_id\n'), ((8813, 8848), 'numpy.round', 'np.round', (['((size[0] - target[0]) / 2)'], {}), '((size[0] - target[0]) / 2)\n', (8821, 8848), True, 'import numpy as np\n'), ((8745, 8767), 'numpy.ceil', 'np.ceil', (['(target[1] * r)'], {}), '(target[1] * r)\n', (8752, 8767), True, 'import numpy as np\n'), ((8987, 9009), 'numpy.ceil', 'np.ceil', (['(target[0] / r)'], {}), '(target[0] / r)\n', (8994, 9009), True, 'import numpy as np\n'), ((9213, 9234), 'numpy.round', 'np.round', (['(image * 255)'], {}), '(image * 255)\n', (9221, 9234), True, 'import numpy as np\n'), ((9429, 9450), 'numpy.round', 'np.round', (['(image * 255)'], {}), '(image * 255)\n', (9437, 9450), True, 'import numpy as np\n'), ((4939, 5081), 'utils.format', 'format', (['"""\n retVal.add("{{ relic }}");\n UnlockTracker.markRelicAsSeen("{{ relic }}");\n """'], {}), '(\n """\n retVal.add("{{ relic }}");\n UnlockTracker.markRelicAsSeen("{{ relic }}");\n """\n )\n', (4945, 5081), False, 'from utils import format\n')]
|
# this resizes __1.jpt to x it's original size & it turns it grayscale
import cv
import numpy
import bSpline
if __name__ == "__main__": # this is not a module
scale = 10
# load image
#cv_img = cv.LoadImage("__1.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE) # CV_LOAD_IMAGE_GRAYSCALE
cv_img = cv.LoadImage("__1.jpg", cv.CV_LOAD_IMAGE_UNCHANGED) # CV_LOAD_IMAGE_UNCHANGED
# width & height
cv_img_width = cv.GetSize(cv_img)[0]
cv_img_height = cv.GetSize(cv_img)[1]
img_tpl = numpy.zeros( ((cv_img_height * scale),(cv_img_width * scale),2) )
for h in range(0,(cv_img_height * scale),1) :
for w in range(0,(cv_img_width * scale),1) :
img_tpl[h][w][0] = (h + 0) / (cv_img_height * scale * 1.0) * cv_img_height
img_tpl[h][w][1] = (w + 0) / (cv_img_width * scale * 1.0) * cv_img_width
##bSpl = bSpline.BSpline() # v4.0
# single picture
##cv_img_out = bSpl.cubic(cv_img, img_tpl) # v4.0
#cv_img_out = bSpline.cubic(cv_img, img_tpl)
#cv.SaveImage("out__1.jpg", cv_img_out)
# multiple pictures
img_beta_f = bSpline.cubic_getBeta(cv_img, img_tpl)
cv_img_out = bSpline.cubic_setBeta(cv_img, img_tpl, img_beta_f)
cv.SaveImage("out__1.01.jpg", cv_img_out)
#cv_img_out = bSpl.cubic_setBeta(cv_img, img_tpl, img_beta_f)
#cv.SaveImage("out__1.02.jpg", cv_img_out)
#cv_img_out = bSpl.cubic_setBeta(cv_img, img_tpl, img_beta_f)
#cv.SaveImage("out__1.03.jpg", cv_img_out)
|
[
"cv.GetSize",
"bSpline.cubic_setBeta",
"cv.SaveImage",
"numpy.zeros",
"bSpline.cubic_getBeta",
"cv.LoadImage"
] |
[((292, 343), 'cv.LoadImage', 'cv.LoadImage', (['"""__1.jpg"""', 'cv.CV_LOAD_IMAGE_UNCHANGED'], {}), "('__1.jpg', cv.CV_LOAD_IMAGE_UNCHANGED)\n", (304, 343), False, 'import cv\n'), ((480, 541), 'numpy.zeros', 'numpy.zeros', (['(cv_img_height * scale, cv_img_width * scale, 2)'], {}), '((cv_img_height * scale, cv_img_width * scale, 2))\n', (491, 541), False, 'import numpy\n'), ((1028, 1066), 'bSpline.cubic_getBeta', 'bSpline.cubic_getBeta', (['cv_img', 'img_tpl'], {}), '(cv_img, img_tpl)\n', (1049, 1066), False, 'import bSpline\n'), ((1083, 1133), 'bSpline.cubic_setBeta', 'bSpline.cubic_setBeta', (['cv_img', 'img_tpl', 'img_beta_f'], {}), '(cv_img, img_tpl, img_beta_f)\n', (1104, 1133), False, 'import bSpline\n'), ((1135, 1176), 'cv.SaveImage', 'cv.SaveImage', (['"""out__1.01.jpg"""', 'cv_img_out'], {}), "('out__1.01.jpg', cv_img_out)\n", (1147, 1176), False, 'import cv\n'), ((406, 424), 'cv.GetSize', 'cv.GetSize', (['cv_img'], {}), '(cv_img)\n', (416, 424), False, 'import cv\n'), ((445, 463), 'cv.GetSize', 'cv.GetSize', (['cv_img'], {}), '(cv_img)\n', (455, 463), False, 'import cv\n')]
|
import lamp.modules
import torch
import numpy as np
from lamp.utils import get_activation_function
class FeedforwardNeuralNetwork(lamp.modules.BaseModule):
def __init__(self, dim_in, dim_out, architecture, dropout, outf=None, dtype = None, device = None):
super(FeedforwardNeuralNetwork, self).__init__()
architecture = [dim_in] + architecture + [dim_out]
self.layers = torch.nn.Sequential()
for n in range(len(architecture)-1):
self.layers.add_module('fc{}'.format(n+1), torch.nn.Linear(architecture[n], architecture[n+1]))
if dropout is not None:
self.layers.add_module('dropout{}'.format(n+1), torch.nn.Dropout(p=0.5))
if n != len(architecture) - 2:
self.layers.add_module('activ{}'.format(n+1), torch.nn.ReLU())
else:
if outf is not None:
self.layers.add_module('out_fct', get_activation_function(outf))
self._to(device=device, dtype=dtype)
def forward(self, x):
return self.layers(x)
@classmethod
def FromLinearDecay(cls, dim_in, dim_out, num_hidden_layers, outf = None, dropout=None, dtype=None, device=None):
architecture = list(np.linspace(dim_in, dim_out, num_hidden_layers+2).astype(int))
architecture_hidden = architecture[1:-1]
return cls(dim_in, dim_out, architecture_hidden, dropout, outf, dtype, device)
|
[
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"lamp.utils.get_activation_function",
"numpy.linspace",
"torch.nn.Linear"
] |
[((404, 425), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (423, 425), False, 'import torch\n'), ((528, 581), 'torch.nn.Linear', 'torch.nn.Linear', (['architecture[n]', 'architecture[n + 1]'], {}), '(architecture[n], architecture[n + 1])\n', (543, 581), False, 'import torch\n'), ((682, 705), 'torch.nn.Dropout', 'torch.nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (698, 705), False, 'import torch\n'), ((813, 828), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (826, 828), False, 'import torch\n'), ((1240, 1291), 'numpy.linspace', 'np.linspace', (['dim_in', 'dim_out', '(num_hidden_layers + 2)'], {}), '(dim_in, dim_out, num_hidden_layers + 2)\n', (1251, 1291), True, 'import numpy as np\n'), ((939, 968), 'lamp.utils.get_activation_function', 'get_activation_function', (['outf'], {}), '(outf)\n', (962, 968), False, 'from lamp.utils import get_activation_function\n')]
|
import numpy as np
import math
from scipy.sparse import csr_matrix, diags
from scipy import linalg
import time
try:
from numba import jit, njit
numbaOn = True
except ModuleNotFoundError:
numbaOn = False
if numbaOn:
@njit(["void(float64[:], f8, float64[:], float64[:], f8, f8)"])
def velocityImplNumba(u, t, f, expVec, dxInvHalf, mu0):
n = len(u)
uSq = np.square(u)
f[0] = dxInvHalf * (math.pow(mu0, 2) - uSq[0]) + expVec[0]
for i in range(1,n):
f[i] = dxInvHalf * ( uSq[i-1] - uSq[i] ) + expVec[i]
else:
def velocityImplNumba(u, t, f, expVec, dxInvHalf, mu0):
n = len(u)
uSq = np.square(u)
f[0] = dxInvHalf * (math.pow(mu0, 2) - uSq[0]) + expVec[0]
for i in range(1,n):
f[i] = dxInvHalf * ( uSq[i-1] - uSq[i] ) + expVec[i]
if numbaOn:
@njit(["void(float64[:], float64[:], float64[:], f8)"])
def fillDiag(u, diag, ldiag, dxInv):
n = len(u)
for i in range(n-1):
diag[i] = -dxInv*u[i]
ldiag[i] = dxInv*u[i]
diag[n-1] = -dxInv*u[n-1]
else:
def fillDiag(u, diag, ldiag, dxInv):
n = len(u)
for i in range(n-1):
diag[i] = -dxInv*u[i]
ldiag[i] = dxInv*u[i]
diag[n-1] = -dxInv*u[n-1]
class Burgers1d:
def __init__(self, Ncell):
self.mu_ = np.array([5., 0.02, 0.02])
self.xL_ = 0.
self.xR_ = 100.
self.Ncell_ = Ncell
self.dx_ = 0.
self.dxInv_ = 0.
self.dxInvHalf_ = 0.
self.xGrid_ = np.zeros(self.Ncell_)
self.U0_ = np.zeros(self.Ncell_)
self.expVec_= np.zeros(self.Ncell_)
self.diag_ = np.zeros(self.Ncell_)
self.ldiag_ = np.zeros(self.Ncell_-1)
self.setup()
def setup(self):
self.dx_ = (self.xR_ - self.xL_)/float(self.Ncell_)
self.dxInv_ = (1.0/self.dx_)
self.dxInvHalf_ = 0.5 * self.dxInv_
for i in range(0, self.Ncell_):
self.U0_[i] = 1.
self.xGrid_[i] = self.dx_*i + self.dx_*0.5
self.expVec_ = self.mu_[1] * np.exp( self.mu_[2] * self.xGrid_ )
def createVelocity(self):
return np.zeros(self.Ncell_)
def velocity(self, u, t, f):
velocityImplNumba(u, t, f[:], self.expVec_,
self.dxInvHalf_, self.mu_[0])
def createApplyJacobianResult(self, B):
return np.zeros_like(B)
def applyJacobian(self, u, B, t, result):
J = self.jacobian(u, t)
result[:] = J.dot(B)
def jacobian(self, u, t):
fillDiag(u, self.diag_, self.ldiag_, self.dxInv_)
return diags( [self.ldiag_, self.diag_], [-1,0], format='csr')
|
[
"math.pow",
"numba.njit",
"numpy.square",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"scipy.sparse.diags",
"numpy.zeros_like"
] |
[((227, 289), 'numba.njit', 'njit', (["['void(float64[:], f8, float64[:], float64[:], f8, f8)']"], {}), "(['void(float64[:], f8, float64[:], float64[:], f8, f8)'])\n", (231, 289), False, 'from numba import jit, njit\n'), ((798, 852), 'numba.njit', 'njit', (["['void(float64[:], float64[:], float64[:], f8)']"], {}), "(['void(float64[:], float64[:], float64[:], f8)'])\n", (802, 852), False, 'from numba import jit, njit\n'), ((373, 385), 'numpy.square', 'np.square', (['u'], {}), '(u)\n', (382, 385), True, 'import numpy as np\n'), ((622, 634), 'numpy.square', 'np.square', (['u'], {}), '(u)\n', (631, 634), True, 'import numpy as np\n'), ((1254, 1281), 'numpy.array', 'np.array', (['[5.0, 0.02, 0.02]'], {}), '([5.0, 0.02, 0.02])\n', (1262, 1281), True, 'import numpy as np\n'), ((1434, 1455), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (1442, 1455), True, 'import numpy as np\n'), ((1474, 1495), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (1482, 1495), True, 'import numpy as np\n'), ((1514, 1535), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (1522, 1535), True, 'import numpy as np\n'), ((1554, 1575), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (1562, 1575), True, 'import numpy as np\n'), ((1594, 1619), 'numpy.zeros', 'np.zeros', (['(self.Ncell_ - 1)'], {}), '(self.Ncell_ - 1)\n', (1602, 1619), True, 'import numpy as np\n'), ((2001, 2022), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (2009, 2022), True, 'import numpy as np\n'), ((2209, 2225), 'numpy.zeros_like', 'np.zeros_like', (['B'], {}), '(B)\n', (2222, 2225), True, 'import numpy as np\n'), ((2418, 2473), 'scipy.sparse.diags', 'diags', (['[self.ldiag_, self.diag_]', '[-1, 0]'], {'format': '"""csr"""'}), "([self.ldiag_, self.diag_], [-1, 0], format='csr')\n", (2423, 2473), False, 'from scipy.sparse import csr_matrix, diags\n'), ((1925, 1958), 'numpy.exp', 'np.exp', (['(self.mu_[2] * self.xGrid_)'], {}), '(self.mu_[2] * self.xGrid_)\n', (1931, 1958), True, 'import numpy as np\n'), ((410, 426), 'math.pow', 'math.pow', (['mu0', '(2)'], {}), '(mu0, 2)\n', (418, 426), False, 'import math\n'), ((659, 675), 'math.pow', 'math.pow', (['mu0', '(2)'], {}), '(mu0, 2)\n', (667, 675), False, 'import math\n')]
|
import time
import argparse
from datetime import datetime
import logging
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel
from data import EqaDataLoader
from metrics import NavMetric
from models import MaskedNLLCriterion
from models import get_state, ensure_shared_grads
from data import load_vocab
from torch.autograd import Variable
from tqdm import tqdm
import time
torch.backends.cudnn.enabled = False
################################################################################################
#make models trained in pytorch 4 compatible with earlier pytorch versions
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
################################################################################################
def eval(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
eval_loader_kwargs = {
'questions_h5': getattr(args, args.eval_split + '_h5'),
'data_json': args.data_json,
'vocab': args.vocab_json,
'target_obj_conn_map_dir': args.target_obj_conn_map_dir,
'map_resolution': args.map_resolution,
'batch_size': 1,
'input_type': args.model_type,
'num_frames': 5,
'split': args.eval_split,
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': False,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
}
eval_loader = EqaDataLoader(**eval_loader_kwargs)
print('eval_loader has %d samples' % len(eval_loader.dataset))
logging.info("EVAL: eval_loader has {} samples".format(len(eval_loader.dataset)))
args.output_log_path = os.path.join(args.log_dir,
'eval_' + str(rank) + '.json')
t, epoch, best_eval_acc = 0, 0, 0.0
max_epochs = args.max_epochs
if args.mode == 'eval':
max_epochs = 1
while epoch < int(max_epochs):
invalids = []
model.load_state_dict(shared_model.state_dict())
model.eval()
# that's a lot of numbers
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
if 'cnn' in args.model_type:
done = False
while done == False:
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_length = batch
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_length[0] + 1 - i - 5 < 0:
invalids.append(idx[0])
continue
ep_inds = [
x for x in range(action_length[0] + 1 - i - 5,
action_length[0] + 1 - i)
]
sub_img_feats = torch.index_select(
img_feats, 1, torch.LongTensor(ep_inds))
init_pos = eval_loader.dataset.episode_pos_queue[
ep_inds[-1]]
h3d = eval_loader.dataset.episode_house
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append(idx[0])
continue
sub_img_feats_var = Variable(sub_img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
# sample actions till max steps or <stop>
# max no. of actions = 100
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores = model(sub_img_feats_var,
questions_var)
else:
scores = model(sub_img_feats_var)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
sub_img_feats_var = torch.cat(
[sub_img_feats_var, img_feat_var], dim=1)
sub_img_feats_var = sub_img_feats_var[:, -5:, :]
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
logging.info("EVAL: invalids: {}".format(len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'lstm' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, answer, _, actions_in, actions_out, action_lengths, _ = batch
question_var = Variable(questions.cuda())
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_lengths[0] - 1 - i < 0:
invalids.append([idx[0], i])
continue
h3d = eval_loader.dataset.episode_house
# forward through lstm till spawn
if len(eval_loader.dataset.episode_pos_queue[:-i]
) > 0:
images = eval_loader.dataset.get_frames(
h3d,
eval_loader.dataset.episode_pos_queue[:-i],
preprocess=True)
raw_img_feats = eval_loader.dataset.cnn(
Variable(torch.FloatTensor(images).cuda()))
actions_in_pruned = actions_in[:, :
action_lengths[0] -
i]
actions_in_var = Variable(actions_in_pruned.cuda())
action_lengths_pruned = action_lengths.clone(
).fill_(action_lengths[0] - i)
img_feats_var = raw_img_feats.view(1, -1, 3200)
if '+q' in args.model_type:
scores, hidden = model(
img_feats_var, question_var,
actions_in_var,
action_lengths_pruned.cpu().numpy())
else:
scores, hidden = model(
img_feats_var, False, actions_in_var,
action_lengths_pruned.cpu().numpy())
try:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
except:
invalids.append([idx[0], i])
continue
action_in = torch.LongTensor(1, 1).fill_(
actions_in[0,
action_lengths[0] - i]).cuda()
else:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
hidden = model.nav_rnn.init_hidden(1)
action_in = torch.LongTensor(1, 1).fill_(0).cuda()
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
img = h3d.env.render()
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224).cuda())).view(
1, 1, 3200)
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
actual_pos_queue = [(h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw)]
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores, hidden = model(
img_feat_var,
question_var,
Variable(action_in),
False,
hidden=hidden,
step=True)
else:
scores, hidden = model(
img_feat_var,
False,
Variable(action_in),
False,
hidden=hidden,
step=True)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
actual_pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw])
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: init_steps: {} metrics: {}".format(i, metrics.get_stat_string(mode=0)))
logging.info("EVAL: init_steps: {} invalids: {}".format(i, len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
print("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
logging.info("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
assert len(eval_loader.dataset.pruned_env_set) > 0
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'pacman' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, question, answer, actions, action_length = batch
metrics_slug = {}
h3d = eval_loader.dataset.episode_house
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if i > action_length[0]:
invalids.append([idx[0], i])
continue
question_var = Variable(question.cuda())
controller_step = False
planner_hidden = model.planner_nav_rnn.init_hidden(1)
# get hierarchical action history
(
planner_actions_in, planner_img_feats,
controller_step, controller_action_in,
controller_img_feats, init_pos,
controller_action_counter
) = eval_loader.dataset.get_hierarchical_features_till_spawn(
actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions
)
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_img_feats_var = Variable(
planner_img_feats.cuda())
# forward planner till spawn to update hidden state
for step in range(planner_actions_in.size(0)):
planner_scores, planner_hidden = model.planner_step(
question_var, planner_img_feats_var[step]
.unsqueeze(0).unsqueeze(0),
planner_actions_in_var[step].view(1, 1),
planner_hidden
)
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
dists_to_target, pos_queue, pred_actions = [
init_dist_to_target
], [init_pos], []
planner_actions, controller_actions = [], []
episode_length = 0
if args.max_controller_actions > 1:
controller_action_counter = controller_action_counter % args.max_controller_actions
controller_action_counter = max(controller_action_counter - 1, 0)
else:
controller_action_counter = 0
first_step = True
first_step_is_controller = controller_step
planner_step = True
action = int(controller_action_in)
for step in range(args.max_episode_length):
if not first_step:
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224,
224).cuda())).view(
1, 1, 3200)
else:
img_feat_var = Variable(controller_img_feats.cuda()).view(1, 1, 3200)
if not first_step or first_step_is_controller:
# query controller to continue or not
controller_action_in = Variable(
torch.LongTensor(1, 1).fill_(action).cuda())
controller_scores = model.controller_step(
img_feat_var, controller_action_in,
planner_hidden[0])
prob = F.softmax(controller_scores, dim=1)
controller_action = int(
prob.max(1)[1].data.cpu().numpy()[0])
if controller_action == 1 and controller_action_counter < args.max_controller_actions - 1:
controller_action_counter += 1
planner_step = False
else:
controller_action_counter = 0
planner_step = True
controller_action = 0
controller_actions.append(controller_action)
first_step = False
if planner_step:
if not first_step:
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
planner_scores, planner_hidden = model.planner_step(
question_var, img_feat_var,
Variable(action_in), planner_hidden)
prob = F.softmax(planner_scores, dim=1)
action = int(
prob.max(1)[1].data.cpu().numpy()[0])
planner_actions.append(action)
episode_done = action == 3 or episode_length >= args.max_episode_length
episode_length += 1
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done:
break
img, _, _ = h3d.step(action)
first_step = False
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
try:
print(metrics.get_stat_string(mode=0))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
except:
pass
print('epoch', epoch)
print('invalids', len(invalids))
logging.info("EVAL: epoch {}".format(epoch))
logging.info("EVAL: invalids {}".format(invalids))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
epoch += 1
# checkpoint if best val loss
if metrics.metrics[8][0] > best_eval_acc: # d_D_50
best_eval_acc = metrics.metrics[8][0]
if epoch % args.eval_every == 0 and args.log == True:
metrics.dump_log()
model_state = get_state(model)
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}
checkpoint_path = '%s/epoch_%d_d_D_50_%.04f.pt' % (
args.checkpoint_dir, epoch, best_eval_acc)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("EVAL: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
print('[best_eval_d_D_50:%.04f]' % best_eval_acc)
logging.info("EVAL: [best_eval_d_D_50:{:.04f}]".format(best_eval_acc))
eval_loader.dataset._load_envs(start_idx=0, in_order=True)
def train(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
lossFn = torch.nn.CrossEntropyLoss().cuda()
optim = torch.optim.Adamax(
filter(lambda p: p.requires_grad, shared_model.parameters()),
lr=args.learning_rate)
train_loader_kwargs = {
'questions_h5': args.train_h5,
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': args.batch_size,
'input_type': args.model_type,
'num_frames': 5,
'map_resolution': args.map_resolution,
'split': 'train',
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': args.cache,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
'max_actions': args.max_actions
}
args.output_log_path = os.path.join(args.log_dir,
'train_' + str(rank) + '.json')
if 'pacman' in args.model_type:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['planner_loss', 'controller_loss'],
log_json=args.output_log_path)
else:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['loss'],
log_json=args.output_log_path)
train_loader = EqaDataLoader(**train_loader_kwargs)
print('train_loader has %d samples' % len(train_loader.dataset))
logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))
t, epoch = 0, 0
while epoch < int(args.max_epochs):
if 'cnn' in args.model_type:
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, _, actions_out, _ = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_out_var = Variable(actions_out.cuda())
if '+q' in args.model_type:
scores = model(img_feats_var, questions_var)
else:
scores = model(img_feats_var)
loss = lossFn(scores, actions_out_var)
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'lstm' in args.model_type:
lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
total_times = []
while done == False:
start_time = time.time()
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_lengths, masks = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_in_var = Variable(actions_in.cuda())
actions_out_var = Variable(actions_out.cuda())
action_lengths = action_lengths.cuda()
masks_var = Variable(masks.cuda())
action_lengths, perm_idx = action_lengths.sort(
0, descending=True)
img_feats_var = img_feats_var[perm_idx]
if '+q' in args.model_type:
questions_var = questions_var[perm_idx]
actions_in_var = actions_in_var[perm_idx]
actions_out_var = actions_out_var[perm_idx]
masks_var = masks_var[perm_idx]
if '+q' in args.model_type:
scores, hidden = model(img_feats_var, questions_var,
actions_in_var,
action_lengths.cpu().numpy())
else:
scores, hidden = model(img_feats_var, False,
actions_in_var,
action_lengths.cpu().numpy())
#block out masks
if args.curriculum:
curriculum_length = (epoch+1)*5
for i, action_length in enumerate(action_lengths):
if action_length - curriculum_length > 0:
masks_var[i, :action_length-curriculum_length] = 0
logprob = F.log_softmax(scores, dim=1)
loss = lossFn(
logprob, actions_out_var[:, :action_lengths.max()]
.contiguous().view(-1, 1),
masks_var[:, :action_lengths.max()].contiguous().view(
-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
logging.info("TRAIN LSTM loss: {:.6f}".format(loss.data[0]))
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'pacman' in args.model_type:
planner_lossFn = MaskedNLLCriterion().cuda()
controller_lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, planner_img_feats, planner_actions_in, \
planner_actions_out, planner_action_lengths, planner_masks, \
controller_img_feats, controller_actions_in, planner_hidden_idx, \
controller_outs, controller_action_lengths, controller_masks = batch
questions_var = Variable(questions.cuda())
planner_img_feats_var = Variable(planner_img_feats.cuda())
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_actions_out_var = Variable(
planner_actions_out.cuda())
planner_action_lengths = planner_action_lengths.cuda()
planner_masks_var = Variable(planner_masks.cuda())
controller_img_feats_var = Variable(
controller_img_feats.cuda())
controller_actions_in_var = Variable(
controller_actions_in.cuda())
planner_hidden_idx_var = Variable(
planner_hidden_idx.cuda())
controller_outs_var = Variable(controller_outs.cuda())
controller_action_lengths = controller_action_lengths.cuda(
)
controller_masks_var = Variable(controller_masks.cuda())
planner_action_lengths, perm_idx = planner_action_lengths.sort(
0, descending=True)
questions_var = questions_var[perm_idx]
planner_img_feats_var = planner_img_feats_var[perm_idx]
planner_actions_in_var = planner_actions_in_var[perm_idx]
planner_actions_out_var = planner_actions_out_var[perm_idx]
planner_masks_var = planner_masks_var[perm_idx]
controller_img_feats_var = controller_img_feats_var[
perm_idx]
controller_actions_in_var = controller_actions_in_var[
perm_idx]
controller_outs_var = controller_outs_var[perm_idx]
planner_hidden_idx_var = planner_hidden_idx_var[perm_idx]
controller_action_lengths = controller_action_lengths[
perm_idx]
controller_masks_var = controller_masks_var[perm_idx]
planner_scores, controller_scores, planner_hidden = model(
questions_var, planner_img_feats_var,
planner_actions_in_var,
planner_action_lengths.cpu().numpy(),
planner_hidden_idx_var, controller_img_feats_var,
controller_actions_in_var, controller_action_lengths)
planner_logprob = F.log_softmax(planner_scores, dim=1)
controller_logprob = F.log_softmax(
controller_scores, dim=1)
planner_loss = planner_lossFn(
planner_logprob,
planner_actions_out_var[:, :planner_action_lengths.max(
)].contiguous().view(-1, 1),
planner_masks_var[:, :planner_action_lengths.max()]
.contiguous().view(-1, 1))
controller_loss = controller_lossFn(
controller_logprob,
controller_outs_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1),
controller_masks_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update(
[planner_loss.data[0], controller_loss.data[0]])
logging.info("TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}".format(
planner_loss.data[0], controller_loss.data[0]))
# backprop and update
if args.max_controller_actions == 1:
(planner_loss).backward()
else:
(planner_loss + controller_loss).backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
epoch += 1
if epoch % args.save_every == 0:
model_state = get_state(model)
optimizer_state = optim.state_dict()
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad,
'state': model_state,
'epoch': epoch,
'optimizer': optimizer_state}
checkpoint_path = '%s/epoch_%d_thread_%d.pt' % (
args.checkpoint_dir, epoch, rank)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("TRAIN: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data params
parser.add_argument('-train_h5', default='data/train.h5')
parser.add_argument('-val_h5', default='data/val.h5')
parser.add_argument('-test_h5', default='data/test.h5')
parser.add_argument('-data_json', default='data/data.json')
parser.add_argument('-vocab_json', default='data/vocab.json')
parser.add_argument(
'-target_obj_conn_map_dir',
default='data/target-obj-conn-maps/500')
parser.add_argument('-map_resolution', default=500, type=int)
parser.add_argument(
'-mode',
default='train+eval',
type=str,
choices=['train', 'eval', 'train+eval'])
parser.add_argument('-eval_split', default='val', type=str)
# model details
parser.add_argument(
'-model_type',
default='cnn',
choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'lstm-mult+q', 'pacman'])
parser.add_argument('-max_episode_length', default=100, type=int)
parser.add_argument('-curriculum', default=0, type=int)
# optim params
parser.add_argument('-batch_size', default=20, type=int)
parser.add_argument('-learning_rate', default=1e-3, type=float)
parser.add_argument('-max_epochs', default=1000, type=int)
parser.add_argument('-overfit', default=False, action='store_true')
# bookkeeping
parser.add_argument('-print_every', default=5, type=int)
parser.add_argument('-eval_every', default=1, type=int)
parser.add_argument('-save_every', default=1000, type=int) #optional if you would like to save specific epochs as opposed to relying on the eval thread
parser.add_argument('-identifier', default='cnn')
parser.add_argument('-num_processes', default=1, type=int)
parser.add_argument('-max_threads_per_gpu', default=10, type=int)
# checkpointing
parser.add_argument('-checkpoint_path', default=False)
parser.add_argument('-checkpoint_dir', default='checkpoints/nav/')
parser.add_argument('-log_dir', default='logs/nav/')
parser.add_argument('-log', default=False, action='store_true')
parser.add_argument('-cache', default=False, action='store_true')
parser.add_argument('-max_controller_actions', type=int, default=5)
parser.add_argument('-max_actions', type=int)
args = parser.parse_args()
args.time_id = time.strftime("%m_%d_%H:%M")
#MAX_CONTROLLER_ACTIONS = args.max_controller_actions
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
if args.curriculum:
assert 'lstm' in args.model_type #TODO: Finish implementing curriculum for other model types
logging.basicConfig(filename=os.path.join(args.log_dir, "run_{}.log".format(
str(datetime.now()).replace(' ', '_'))),
level=logging.INFO,
format='%(asctime)-15s %(message)s')
try:
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
except KeyError:
print("CPU not supported")
logging.info("CPU not supported")
exit()
if args.checkpoint_path != False:
print('Loading checkpoint from %s' % args.checkpoint_path)
logging.info("Loading checkpoint from {}".format(args.checkpoint_path))
args_to_keep = ['model_type']
checkpoint = torch.load(args.checkpoint_path, map_location={
'cuda:0': 'cpu'
})
for i in args.__dict__:
if i not in args_to_keep:
checkpoint['args'][i] = args.__dict__[i]
args = type('new_dict', (object, ), checkpoint['args'])
args.checkpoint_dir = os.path.join(args.checkpoint_dir,
args.time_id + '_' + args.identifier)
args.log_dir = os.path.join(args.log_dir,
args.time_id + '_' + args.identifier)
# if set to overfit; set eval_split to train
if args.overfit == True:
args.eval_split = 'train'
print(args.__dict__)
logging.info(args.__dict__)
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.log_dir)
if args.model_type == 'cnn':
model_kwargs = {}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
shared_model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
shared_model.share_memory()
if args.checkpoint_path != False:
print('Loading params from checkpoint: %s' % args.checkpoint_path)
logging.info("Loading params from checkpoint: {}".format(args.checkpoint_path))
shared_model.load_state_dict(checkpoint['state'])
if args.mode == 'eval':
eval(0, args, shared_model)
elif args.mode == 'train':
if args.num_processes > 1:
processes = []
for rank in range(0, args.num_processes):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
train(0, args, shared_model)
else:
processes = []
# Start the eval thread
p = mp.Process(target=eval, args=(0, args, shared_model))
p.start()
processes.append(p)
# Start the training thread(s)
for rank in range(1, args.num_processes + 1):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
|
[
"metrics.NavMetric",
"models.NavCnnModel",
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"models.get_state",
"numpy.array",
"models.MaskedNLLCriterion",
"logging.info",
"torch.nn.functional.softmax",
"os.path.exists",
"argparse.ArgumentParser",
"models.NavPlannerControllerModel",
"data.EqaDataLoader",
"os.path.isdir",
"torch.autograd.Variable",
"torch.multiprocessing.Process",
"data.load_vocab",
"models.NavCnnRnnModel",
"torch.save",
"torch.nn.functional.log_softmax",
"time.time",
"torch._utils._rebuild_tensor",
"torch.cat",
"models.NavCnnRnnMultModel",
"os.makedirs",
"torch.load",
"time.strftime",
"os.path.join",
"tqdm.tqdm",
"datetime.datetime.now",
"torch.FloatTensor"
] |
[((3106, 3141), 'data.EqaDataLoader', 'EqaDataLoader', ([], {}), '(**eval_loader_kwargs)\n', (3119, 3141), False, 'from data import EqaDataLoader\n'), ((34031, 34067), 'data.EqaDataLoader', 'EqaDataLoader', ([], {}), '(**train_loader_kwargs)\n', (34044, 34067), False, 'from data import EqaDataLoader\n'), ((47924, 47949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (47947, 47949), False, 'import argparse\n'), ((50244, 50272), 'time.strftime', 'time.strftime', (['"""%m_%d_%H:%M"""'], {}), "('%m_%d_%H:%M')\n", (50257, 50272), False, 'import time\n'), ((51601, 51672), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "(args.time_id + '_' + args.identifier)"], {}), "(args.checkpoint_dir, args.time_id + '_' + args.identifier)\n", (51613, 51672), False, 'import os\n'), ((51731, 51795), 'os.path.join', 'os.path.join', (['args.log_dir', "(args.time_id + '_' + args.identifier)"], {}), "(args.log_dir, args.time_id + '_' + args.identifier)\n", (51743, 51795), False, 'import os\n'), ((51972, 51999), 'logging.info', 'logging.info', (['args.__dict__'], {}), '(args.__dict__)\n', (51984, 51999), False, 'import logging\n'), ((1463, 1490), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (1474, 1490), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((3737, 4121), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': args.eval_split, 'thread': rank}", 'metric_names': "['d_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50']", 'log_json': 'args.output_log_path'}), "(info={'split': args.eval_split, 'thread': rank}, metric_names=[\n 'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50'], log_json=args.\n output_log_path)\n", (3746, 4121), False, 'from metrics import NavMetric\n'), ((31682, 31709), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (31693, 31709), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((33634, 33770), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': 'train', 'thread': rank}", 'metric_names': "['planner_loss', 'controller_loss']", 'log_json': 'args.output_log_path'}), "(info={'split': 'train', 'thread': rank}, metric_names=[\n 'planner_loss', 'controller_loss'], log_json=args.output_log_path)\n", (33643, 33770), False, 'from metrics import NavMetric\n'), ((33851, 33959), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': 'train', 'thread': rank}", 'metric_names': "['loss']", 'log_json': 'args.output_log_path'}), "(info={'split': 'train', 'thread': rank}, metric_names=['loss'],\n log_json=args.output_log_path)\n", (33860, 33959), False, 'from metrics import NavMetric\n'), ((50344, 50371), 'os.path.isdir', 'os.path.isdir', (['args.log_dir'], {}), '(args.log_dir)\n', (50357, 50371), False, 'import os\n'), ((50381, 50406), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (50392, 50406), False, 'import os\n'), ((51294, 51358), 'torch.load', 'torch.load', (['args.checkpoint_path'], {'map_location': "{'cuda:0': 'cpu'}"}), "(args.checkpoint_path, map_location={'cuda:0': 'cpu'})\n", (51304, 51358), False, 'import torch\n'), ((52012, 52047), 'os.path.exists', 'os.path.exists', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (52026, 52047), False, 'import os\n'), ((52057, 52089), 'os.makedirs', 'os.makedirs', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (52068, 52089), False, 'import os\n'), ((52098, 52123), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (52109, 52123), False, 'import os\n'), ((52208, 52235), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (52219, 52235), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((933, 1000), 'torch._utils._rebuild_tensor', 'torch._utils._rebuild_tensor', (['storage', 'storage_offset', 'size', 'stride'], {}), '(storage, storage_offset, size, stride)\n', (961, 1000), False, 'import torch\n'), ((1675, 1702), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (1686, 1702), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((31894, 31921), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (31905, 31921), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((32684, 32711), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (32709, 32711), False, 'import torch\n'), ((47169, 47185), 'models.get_state', 'get_state', (['model'], {}), '(model)\n', (47178, 47185), False, 'from models import get_state, ensure_shared_grads\n'), ((47842, 47881), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (47852, 47881), False, 'import torch\n'), ((50997, 51030), 'logging.info', 'logging.info', (['"""CPU not supported"""'], {}), "('CPU not supported')\n", (51009, 51030), False, 'import logging\n'), ((52427, 52454), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (52438, 52454), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((53900, 53953), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'eval', 'args': '(0, args, shared_model)'}), '(target=eval, args=(0, args, shared_model))\n', (53910, 53953), True, 'import torch.multiprocessing as mp\n'), ((1621, 1648), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (1631, 1648), False, 'from data import load_vocab\n'), ((1783, 1813), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (1797, 1813), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((4377, 4394), 'tqdm.tqdm', 'tqdm', (['eval_loader'], {}), '(eval_loader)\n', (4381, 4394), False, 'from tqdm import tqdm\n'), ((30672, 30688), 'models.get_state', 'get_state', (['model'], {}), '(model)\n', (30681, 30688), False, 'from models import get_state, ensure_shared_grads\n'), ((31243, 31282), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (31253, 31282), False, 'import torch\n'), ((31840, 31867), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (31850, 31867), False, 'from data import load_vocab\n'), ((32002, 32032), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (32016, 32032), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((52366, 52393), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (52376, 52393), False, 'from data import load_vocab\n'), ((52542, 52572), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (52556, 52572), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((54166, 54223), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'train', 'args': '(rank, args, shared_model)'}), '(target=train, args=(rank, args, shared_model))\n', (54176, 54223), True, 'import torch.multiprocessing as mp\n'), ((1999, 2029), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (2013, 2029), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((11294, 11311), 'tqdm.tqdm', 'tqdm', (['eval_loader'], {}), '(eval_loader)\n', (11298, 11311), False, 'from tqdm import tqdm\n'), ((32223, 32257), 'models.NavCnnRnnMultModel', 'NavCnnRnnMultModel', ([], {}), '(**model_kwargs)\n', (32241, 32257), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((36992, 37003), 'time.time', 'time.time', ([], {}), '()\n', (37001, 37003), False, 'import time\n'), ((52765, 52795), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (52779, 52795), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((53587, 53644), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'train', 'args': '(rank, args, shared_model)'}), '(target=train, args=(rank, args, shared_model))\n', (53597, 53644), True, 'import torch.multiprocessing as mp\n'), ((1945, 1972), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (1955, 1972), False, 'from data import load_vocab\n'), ((2220, 2254), 'models.NavCnnRnnMultModel', 'NavCnnRnnMultModel', ([], {}), '(**model_kwargs)\n', (2238, 2254), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((10632, 11016), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': args.eval_split, 'thread': rank}", 'metric_names': "['d_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50']", 'log_json': 'args.output_log_path'}), "(info={'split': args.eval_split, 'thread': rank}, metric_names=[\n 'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50'], log_json=args.\n output_log_path)\n", (10641, 11016), False, 'from metrics import NavMetric\n'), ((21204, 21221), 'tqdm.tqdm', 'tqdm', (['eval_loader'], {}), '(eval_loader)\n', (21208, 21221), False, 'from tqdm import tqdm\n'), ((32169, 32196), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (32179, 32196), False, 'from data import load_vocab\n'), ((32443, 32473), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (32457, 32473), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((36767, 36787), 'models.MaskedNLLCriterion', 'MaskedNLLCriterion', ([], {}), '()\n', (36785, 36787), False, 'from models import MaskedNLLCriterion\n'), ((39102, 39130), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (39115, 39130), True, 'import torch.nn.functional as F\n'), ((52704, 52731), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (52714, 52731), False, 'from data import load_vocab\n'), ((52930, 52971), 'models.NavPlannerControllerModel', 'NavPlannerControllerModel', ([], {}), '(**model_kwargs)\n', (52955, 52971), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((2166, 2193), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (2176, 2193), False, 'from data import load_vocab\n'), ((2382, 2423), 'models.NavPlannerControllerModel', 'NavPlannerControllerModel', ([], {}), '(**model_kwargs)\n', (2407, 2423), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((5233, 5258), 'torch.LongTensor', 'torch.LongTensor', (['ep_inds'], {}), '(ep_inds)\n', (5249, 5258), False, 'import torch\n'), ((6827, 6851), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (6836, 6851), True, 'import torch.nn.functional as F\n'), ((7530, 7581), 'torch.cat', 'torch.cat', (['[sub_img_feats_var, img_feat_var]'], {'dim': '(1)'}), '([sub_img_feats_var, img_feat_var], dim=1)\n', (7539, 7581), False, 'import torch\n'), ((20542, 20926), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': args.eval_split, 'thread': rank}", 'metric_names': "['d_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50']", 'log_json': 'args.output_log_path'}), "(info={'split': args.eval_split, 'thread': rank}, metric_names=[\n 'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50'], log_json=args.\n output_log_path)\n", (20551, 20926), False, 'from metrics import NavMetric\n'), ((32389, 32416), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (32399, 32416), False, 'from data import load_vocab\n'), ((32601, 32642), 'models.NavPlannerControllerModel', 'NavPlannerControllerModel', ([], {}), '(**model_kwargs)\n', (32626, 32642), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((41010, 41030), 'models.MaskedNLLCriterion', 'MaskedNLLCriterion', ([], {}), '()\n', (41028, 41030), False, 'from models import MaskedNLLCriterion\n'), ((41070, 41090), 'models.MaskedNLLCriterion', 'MaskedNLLCriterion', ([], {}), '()\n', (41088, 41090), False, 'from models import MaskedNLLCriterion\n'), ((44370, 44406), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['planner_scores'], {'dim': '(1)'}), '(planner_scores, dim=1)\n', (44383, 44406), True, 'import torch.nn.functional as F\n'), ((44448, 44487), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['controller_scores'], {'dim': '(1)'}), '(controller_scores, dim=1)\n', (44461, 44487), True, 'import torch.nn.functional as F\n'), ((52878, 52905), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (52888, 52905), False, 'from data import load_vocab\n'), ((2337, 2364), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (2347, 2364), False, 'from data import load_vocab\n'), ((8501, 8526), 'numpy.array', 'np.array', (['dists_to_target'], {}), '(dists_to_target)\n', (8509, 8526), True, 'import numpy as np\n'), ((16354, 16378), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (16363, 16378), True, 'import torch.nn.functional as F\n'), ((32556, 32583), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (32566, 32583), False, 'from data import load_vocab\n'), ((50667, 50681), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (50679, 50681), False, 'from datetime import datetime\n'), ((18083, 18108), 'numpy.array', 'np.array', (['dists_to_target'], {}), '(dists_to_target)\n', (18091, 18108), True, 'import numpy as np\n'), ((15775, 15794), 'torch.autograd.Variable', 'Variable', (['action_in'], {}), '(action_in)\n', (15783, 15794), False, 'from torch.autograd import Variable\n'), ((16156, 16175), 'torch.autograd.Variable', 'Variable', (['action_in'], {}), '(action_in)\n', (16164, 16175), False, 'from torch.autograd import Variable\n'), ((25691, 25726), 'torch.nn.functional.softmax', 'F.softmax', (['controller_scores'], {'dim': '(1)'}), '(controller_scores, dim=1)\n', (25700, 25726), True, 'import torch.nn.functional as F\n'), ((26961, 26993), 'torch.nn.functional.softmax', 'F.softmax', (['planner_scores'], {'dim': '(1)'}), '(planner_scores, dim=1)\n', (26970, 26993), True, 'import torch.nn.functional as F\n'), ((28232, 28257), 'numpy.array', 'np.array', (['dists_to_target'], {}), '(dists_to_target)\n', (28240, 28257), True, 'import numpy as np\n'), ((12480, 12505), 'torch.FloatTensor', 'torch.FloatTensor', (['images'], {}), '(images)\n', (12497, 12505), False, 'import torch\n'), ((13884, 13906), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (13900, 13906), False, 'import torch\n'), ((14284, 14306), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (14300, 14306), False, 'import torch\n'), ((17050, 17072), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (17066, 17072), False, 'import torch\n'), ((26884, 26903), 'torch.autograd.Variable', 'Variable', (['action_in'], {}), '(action_in)\n', (26892, 26903), False, 'from torch.autograd import Variable\n'), ((25404, 25426), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (25420, 25426), False, 'import torch\n'), ((26598, 26620), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (26614, 26620), False, 'import torch\n')]
|
from BinaryModel import *
from numpy.random import rand
class MajorityModel(BinaryModel):
def __init__(self, filename=None):
self.mdlPrm = {
'addNoise' : False,
}
self.wkrIds = {}
self.imgIds = {}
if filename:
self.load_data(filename)
else:
self._setup_prior()
def __del__(self):
pass
def load_data(self, filename, skipyaml=False):
"""
Data is assumed to be in the format:
imageId workerId label
"""
# load the text data
filein = open(filename)
info = filein.readline().rstrip().split(' ')
self.numLbls = int(info[2])
self.numWkrs = int(info[1])
self.numImgs = int(info[0])
self.imgPrm = []
for i in range(self.numImgs):
self.imgPrm.append([0, 0]) # (frac +ve votes, total n votes)
self.wkrLbls = dict((id, []) for id in range(self.numWkrs))
self.imgLbls = dict((id, []) for id in range(self.numImgs))
self.labels = []
for (lino, line) in enumerate(filein):
cols = [int(c) for c in line.rstrip().split(' ')]
iId = cols[0]; wId = cols[1]; lij = int(cols[2]==1)
self.wkrLbls[wId].append([iId, lij])
self.imgLbls[iId].append([wId, lij])
self.labels.append((iId, wId, lij))
self.imgPrm[iId][0] += lij
self.imgPrm[iId][1] += 1
# renormalize img prm
for i in range(len(self.imgPrm)):
self.imgPrm[i][0] = float(self.imgPrm[i][0])/self.imgPrm[i][1]
def get_num_wkrs(self):
return self.numWkrs
def get_num_imgs(self):
return self.numImgs
def get_num_lbls(self):
return self.numLbls
def set_model_param(self, raw=[], prm=None):
"""
Sets model parameters.
Arguments:
- `raw`: raw parameter vector
- `prm`: hash of model parameter values to be changed
"""
if not prm is None:
for (k, v) in prm.iteritems():
self.mdlPrm[k] = v
def set_worker_param(self, raw):
pass
def set_image_param(self, raw):
self.imgPrm = [r for r in raw]
def get_model_param(self):
return {}
def get_worker_param_raw(self):
return {}
def get_image_param_raw(self):
return [p for p in self.imgPrm]
def get_worker_param(self, id=None):
return {}
def get_image_param(self, id=None):
return [p for p in self.imgPrm]
def get_labels(self):
if self.mdlPrm['addNoise']:
return [int((self.imgPrm[i][0]+(rand()-.5)/self.imgPrm[i][1])>.5)\
for i in range(len(self.imgPrm))]
else:
return [int(self.imgPrm[i][0]>.5) for i \
in range(len(self.imgPrm))]
# TODO: load and save parameters
def optimize_worker_param(self):
pass
def optimize_image_param(self):
pass
def objective(self, prm=None):
pass
def image_objective(self, prm=None):
pass
def image_objective_range(self, imgId, prm):
pass
def worker_objective_range(self, wkrId, prm):
pass
def gradient(self, prm=None):
return []
def worker_gradient(self, prm=None):
return []
def image_gradient(self, prm=None):
pass
def get_num_wkr_lbls(self):
return [len(self.wkrLbls[id]) for id in range(self.numWkrs)]
def get_num_img_lbls(self):
return [len(self.imgLbls[id]) for id in range(self.numImgs)]
|
[
"numpy.random.rand"
] |
[((2670, 2676), 'numpy.random.rand', 'rand', ([], {}), '()\n', (2674, 2676), False, 'from numpy.random import rand\n')]
|
import matplotlib.pyplot as plt
import numpy
import errandpy
"""
logファイルのFitting Parameter: a,b,c,dを返します
normalized_paramの時正規化したパラメーターを返します
"""
def real_a(a, delta, min):
return (a + 1) * delta + min
def real_b(b, delta):
return b * delta
def get_z0FromLogFile(path, isLegacy=False):
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
if isLegacy:
s = lines[length - 4].split(" ")
# print(path, lines[length - 4], int(s[13]))
else:
s = lines[length - 3].split(" ")
return int(s[13])
def legacy_get_logFileParamater(path, normalized_param=True, normMode=1) -> []:
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
s = lines[length - 2].split(" ")
print(s)
if len(s) == 10:
result = [float(s[3]), float(s[5]), float(s[7]), float(s[9])]
else:
result = [0,0,0,0]
print(" Warning: Log File Error!!! " + path)
if normalized_param is False:
min = float(lines[0].split(" ")[1][normMode:-2])
delta = float(lines[1].split(" ")[1][normMode:-2])
result[0] = real_a(result[0], delta, min)
result[1] = real_b(result[1], delta)
return result
def get_logFileParamater(path, normalized_param=True, normMode=1) -> []:
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
s = lines[length - 1].split(" ")
# print(s)
if len(s) == 12 or len(s) == 14:
result = [float(s[3]), float(s[5]), float(s[7]), float(s[9])]
else:
result = [0,0,0,0]
print(" Warning: Log File Error!!! " + path)
if normalized_param is False:
min = float(lines[0].split(" ")[1][normMode:-2])
delta = float(lines[1].split(" ")[1][normMode:-2])
result[0] = real_a(result[0], delta, min)
result[1] = real_b(result[1], delta)
print(result)
return result
def _f_long(x, a, b, c, d):
if errandpy.useLegacyModel:
y = a - b / (1 + c * x) ** d
else:
y = a - b / (c + x) ** d
return y
def clamp(minValue, maxValue, value):
return max(min(value, maxValue), minValue)
def clamp01(value):
return clamp(0, 1, value)
def mean_r(x, y, a, b, c, d):
ss_res = numpy.dot((y - _f_long(x, a, b, c, d)), (y - _f_long(x, a, b, c, d)))
ymean = numpy.mean(y)
ss_tot = numpy.dot((y - ymean), (y - ymean))
return 1 - ss_res / ss_tot
def normalized(array, max=1, bias=0):
minValue = array.min(keepdims=True)
maxValue = array.max(keepdims=True)
result = (array - minValue) / (maxValue - minValue) * max + bias
return result, minValue, maxValue - minValue
def draw_plt(x, y, a, b, c, d, bound, name, ze=None):
y_b = y[bound:]
plt.clf()
plt.scatter(x, y, color='red', label='Original data', alpha=0.5)
_x = x[bound:]
plt.title(name + " (Mean R: " + str(mean_r(_x, y_b, a, b, c, d)) + ")")
plt.axhline(0, color='green', linestyle='dashdot')
plt.axvline(x[bound], color='green', linestyle='dashdot')
if ze is not None:
plt.axvline(x[ze], color='blue', linestyle='dashdot')
plt.plot(x, _f_long(x, a, b, c, d), color='blue', label='Fitted line')
plt.plot(x, y - _f_long(x, a, b, c, d), color='black', label='force curve')
|
[
"numpy.mean",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axhline",
"numpy.dot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axvline"
] |
[((2581, 2594), 'numpy.mean', 'numpy.mean', (['y'], {}), '(y)\n', (2591, 2594), False, 'import numpy\n'), ((2609, 2640), 'numpy.dot', 'numpy.dot', (['(y - ymean)', '(y - ymean)'], {}), '(y - ymean, y - ymean)\n', (2618, 2640), False, 'import numpy\n'), ((3009, 3018), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3016, 3018), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3088), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': '"""red"""', 'label': '"""Original data"""', 'alpha': '(0.5)'}), "(x, y, color='red', label='Original data', alpha=0.5)\n", (3035, 3088), True, 'import matplotlib.pyplot as plt\n'), ((3195, 3245), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""green"""', 'linestyle': '"""dashdot"""'}), "(0, color='green', linestyle='dashdot')\n", (3206, 3245), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3308), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x[bound]'], {'color': '"""green"""', 'linestyle': '"""dashdot"""'}), "(x[bound], color='green', linestyle='dashdot')\n", (3262, 3308), True, 'import matplotlib.pyplot as plt\n'), ((3342, 3395), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x[ze]'], {'color': '"""blue"""', 'linestyle': '"""dashdot"""'}), "(x[ze], color='blue', linestyle='dashdot')\n", (3353, 3395), True, 'import matplotlib.pyplot as plt\n')]
|
import qiskit
import qtm.progress_bar
import qtm.constant
import qtm.qfim
import qtm.noise
import qtm.optimizer
import qtm.fubini_study
import numpy as np
import types, typing
def measure(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""Measuring the quantu circuit which fully measurement gates
Args:
- qc (QuantumCircuit): Measured circuit
- qubits (np.ndarray): List of measured qubit
Returns:
- float: Frequency of 00.. cbit
"""
n = len(qubits)
if cbits == []:
cbits = qubits.copy()
for i in range(0, n):
qc.measure(qubits[i], cbits[i])
if qtm.constant.noise_prob > 0:
noise_model = qtm.noise.generate_noise_model(
n, qtm.constant.noise_prob)
results = qiskit.execute(qc, backend=qtm.constant.backend,
noise_model=noise_model,
shots=qtm.constant.num_shots).result()
# Raw counts
counts = results.get_counts()
# Mitigating noise based on https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html
meas_filter = qtm.noise.generate_measurement_filter(
n, noise_model=noise_model)
# Mitigated counts
counts = meas_filter.apply(counts.copy())
else:
counts = qiskit.execute(
qc, backend=qtm.constant.backend,
shots=qtm.constant.num_shots).result().get_counts()
return counts.get("0" * len(qubits), 0) / qtm.constant.num_shots
def x_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def y_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.sdg(qubits[i])
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def z_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.measure(qubits[i], cbits[i])
return qc
def get_u_hat(thetas: np.ndarray, create_circuit_func: types.FunctionType, num_qubits: int,
**kwargs):
"""Return inverse of reconstructed gate
Args:
- thetas (np.ndarray): Parameters
- num_qubits (Int): number of qubit
Returns:
- Statevector: The state vector of when applying u_1q gate
"""
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
if not kwargs:
qc = create_circuit_func(qc, thetas).inverse()
else:
qc = create_circuit_func(qc, thetas, **kwargs).inverse()
return qiskit.quantum_info.Statevector.from_instruction(qc)
def get_cry_index(create_circuit_func: types.FunctionType, thetas: np.ndarray, num_qubits, **kwargs):
"""Return a list where i_th = 1 mean thetas[i] is parameter of CRY gate
Args:
- func (types.FunctionType): The creating circuit function
- thetas (np.ndarray): Parameters
Returns:
- np.ndarray: The index list has length equal with number of parameters
"""
qc = qiskit.QuantumCircuit(num_qubits)
qc = create_circuit_func(qc, thetas, **kwargs)
layers = qtm.fubini_study.split_into_layers(qc)
index_list = []
for layer in layers:
for gate in layer[1]:
if gate[0] == 'cry':
index_list.append(1)
else:
index_list.append(0)
if len(index_list) == len(thetas):
return index_list
return index_list
def grad_loss(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, **kwargs):
"""Return the gradient of the loss function
L = 1 - |<psi~|psi>|^2 = 1 - P_0
=> nabla_L = - nabla_P_0 = - r (P_0(+s) - P_0(-s))
Args:
- qc (QuantumCircuit): The quantum circuit want to calculate the gradient
- create_circuit_func (Function): The creating circuit function
- thetas (np.ndarray): Parameters
- c_0 (float): cost value
- **kwargs: additional parameters for different create_circuit_func()
Returns:
- np.ndarray: the gradient vector
"""
index_list = get_cry_index(create_circuit_func, thetas,
num_qubits=qc.num_qubits, **kwargs)
grad_loss = np.zeros(len(thetas))
for i in range(0, len(thetas)):
if index_list[i] == 0:
# In equation (13)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.two_term_psr['s']
thetas2[i] -= qtm.constant.two_term_psr['s']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
grad_loss[i] = -qtm.constant.two_term_psr['r'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits))))
if index_list[i] == 1:
# In equation (14)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas3, thetas4 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.four_term_psr['alpha']
thetas2[i] -= qtm.constant.four_term_psr['alpha']
thetas3[i] += qtm.constant.four_term_psr['beta']
thetas4[i] -= qtm.constant.four_term_psr['beta']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
qc3 = create_circuit_func(qc.copy(), thetas3, **kwargs)
qc4 = create_circuit_func(qc.copy(), thetas4, **kwargs)
grad_loss[i] = - (qtm.constant.four_term_psr['d_plus'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits)))) - qtm.constant.four_term_psr['d_minus'] * (
qtm.base.measure(qc3, list(range(qc3.num_qubits))) -
qtm.base.measure(qc4, list(range(qc4.num_qubits)))))
return grad_loss
def grad_psi(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, r: float, s: float, **kwargs):
"""Return the derivatite of the psi base on parameter shift rule
Args:
- qc (qiskit.QuantumCircuit): circuit
- create_circuit_func (types.FunctionType)
- thetas (np.ndarray): parameters
- r (float): in psr
- s (float): in psr
Returns:
- np.ndarray: N x N matrix
"""
gradient_psi = []
for i in range(0, len(thetas)):
thetas_copy = thetas.copy()
thetas_copy[i] += s
qc_copy = create_circuit_func(qc.copy(), thetas_copy, **kwargs)
psi_qc = qiskit.quantum_info.Statevector.from_instruction(qc_copy).data
psi_qc = np.expand_dims(psi_qc, 1)
gradient_psi.append(r * psi_qc)
gradient_psi = np.array(gradient_psi)
return gradient_psi
def fit_state_tomography(u: qiskit.QuantumCircuit,
create_vdagger_func: types.FunctionType,
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
"""Return the new thetas that fit with the circuit from create_vdagger_func function
Args:
- u (QuantumCircuit): fitting circuit
- create_vdagger_func (types.FunctionType): added circuit function
- thetas (np.ndarray): parameters
- num_steps (Int): number of iterations
- loss_func (types.FunctionType): loss function
- optimizer (types.FunctionType): otimizer function
- verbose (Int): the seeing level of the fitting process (0: nothing, 1: progress bar, 2: one line per step)
- **kwargs: additional parameters for create_circuit_func()
Returns:
- thetas (np.ndarray): the optimized parameters
- loss_values (np.ndarray): the list of loss_value
"""
thetass = []
loss_values = []
if verbose == 1:
bar = qtm.progress_bar.ProgressBar(max_value=num_steps, disable=False)
for i in range(0, num_steps):
grad_loss = qtm.base.grad_loss(u, create_vdagger_func, thetas, **kwargs)
optimizer_name = optimizer.__name__
if optimizer_name == 'sgd':
thetas = qtm.optimizer.sgd(thetas, grad_loss)
elif optimizer_name == 'adam':
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v, i, grad_loss)
elif optimizer_name in ['qng_fubini_study', 'qng_qfim', 'qng_adam']:
grad_psi1 = grad_psi(u,
create_vdagger_func,
thetas,
r=qtm.constant.two_term_psr['s'],
s=np.pi,
**kwargs)
u_copy = create_vdagger_func(u.copy(), thetas, **kwargs)
psi = qiskit.quantum_info.Statevector.from_instruction(u_copy).data
psi = np.expand_dims(psi, 1)
if optimizer_name == 'qng_fubini_study':
G = qtm.fubini_study.qng(
u.copy(), thetas, create_vdagger_func, **kwargs)
thetas = qtm.optimizer.qng_fubini_study(thetas, G, grad_loss)
if optimizer_name == 'qng_qfim':
thetas = qtm.optimizer.qng_qfim(
thetas, psi, grad_psi1, grad_loss)
if optimizer_name == 'qng_adam':
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.qng_adam(
thetas, m, v, i, psi, grad_psi1, grad_loss)
else:
thetas = optimizer(thetas, grad_loss)
u_copy = create_vdagger_func(u.copy(), thetas, **kwargs)
loss = loss_func(
qtm.base.measure(u_copy, list(range(u_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas.copy())
if verbose == 1:
bar.update(1)
if verbose == 2 and i % 10 == 0:
print("Step " + str(i) + ": " + str(loss))
if verbose == 1:
bar.close()
if is_return_all_thetas:
return thetass, loss_values
else:
return thetas, loss_values
def fit_state_preparation(create_u_func: types.FunctionType,
vdagger: qiskit.QuantumCircuit,
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
"""Return the new thetas that fit with the circuit from create_u_func function
Args:
- create_u_func (types.FunctionType): added circuit function
- vdagger (QuantumCircuit): fitting circuit
- thetas (np.ndarray): parameters
- num_steps (Int): number of iterations
- loss_func (types.FunctionType): loss function
- optimizer (types.FunctionType): otimizer function
- verbose (Int): the seeing level of the fitting process (0: nothing, 1: progress bar, 2: one line per step)
- **kwargs: additional parameters for create_circuit_func()
Returns:
- thetas (np.ndarray): the optimized parameters
- loss_values (np.ndarray): the list of loss_value
"""
if verbose == 1:
bar = qtm.progress_bar.ProgressBar(max_value=num_steps, disable=False)
thetass = []
loss_values = []
def create_circuit_func(vdagger: qiskit.QuantumCircuit, thetas: np.ndarray, **kwargs):
return create_u_func(qiskit.QuantumCircuit(vdagger.num_qubits, vdagger.num_qubits), thetas, **kwargs).combine(vdagger)
for i in range(0, num_steps):
grad_loss = qtm.base.grad_loss(vdagger, create_circuit_func, thetas, **kwargs)
optimizer_name = optimizer.__name__
if optimizer_name == 'sgd':
thetas = qtm.optimizer.sgd(thetas, grad_loss)
elif optimizer_name == 'adam':
if i == 0:
m, v1 = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v1, i, grad_loss)
elif optimizer_name in ['qng_fubini_study', 'qng_qfim', 'qng_adam']:
grad_psi1 = grad_psi(vdagger,
create_circuit_func,
thetas,
r=1 / 2,
s=np.pi,
**kwargs)
v_copy = create_circuit_func(vdagger.copy(), thetas, **kwargs)
psi = qiskit.quantum_info.Statevector.from_instruction(
v_copy).data
psi = np.expand_dims(psi, 1)
if optimizer_name == 'qng_fubini_study':
G = qtm.fubini_study.qng(
vdagger.copy(), thetas, create_circuit_func, **kwargs)
thetas = qtm.optimizer.qng_fubini_study(thetas, G, grad_loss)
if optimizer_name == 'qng_qfim':
thetas = qtm.optimizer.qng_qfim(
thetas, psi, grad_psi1, grad_loss)
if optimizer_name == 'qng_adam':
if i == 0:
m, v1 = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.qng_adam(
thetas, m, v1, i, psi, grad_psi1, grad_loss)
else:
thetas = optimizer(thetas, grad_loss)
v_copy = create_circuit_func(vdagger.copy(), thetas, **kwargs)
loss = loss_func(
qtm.base.measure(v_copy, list(range(v_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas.copy())
if verbose == 1:
bar.update(1)
if verbose == 2 and i % 10 == 0:
print("Step " + str(i) + ": " + str(loss))
if verbose == 1:
bar.close()
if is_return_all_thetas:
return thetass, loss_values
else:
return thetas, loss_values
def fit(u: typing.Union[qiskit.QuantumCircuit, types.FunctionType], v: typing.Union[qiskit.QuantumCircuit, types.FunctionType],
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
if callable(u):
return fit_state_preparation(create_u_func=u,
vdagger=v,
thetas=thetas,
num_steps=num_steps,
loss_func=loss_func,
optimizer=optimizer,
verbose=verbose,
is_return_all_thetas=is_return_all_thetas,
**kwargs)
else:
return fit_state_tomography(u=u,
create_vdagger_func=v,
thetas=thetas,
num_steps=num_steps,
loss_func=loss_func,
optimizer=optimizer,
verbose=verbose,
is_return_all_thetas=is_return_all_thetas,
**kwargs)
|
[
"qiskit.execute",
"numpy.array",
"numpy.zeros",
"qiskit.quantum_info.Statevector.from_instruction",
"numpy.expand_dims",
"qiskit.QuantumCircuit"
] |
[((3447, 3492), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['num_qubits', 'num_qubits'], {}), '(num_qubits, num_qubits)\n', (3468, 3492), False, 'import qiskit\n'), ((3653, 3705), 'qiskit.quantum_info.Statevector.from_instruction', 'qiskit.quantum_info.Statevector.from_instruction', (['qc'], {}), '(qc)\n', (3701, 3705), False, 'import qiskit\n'), ((4116, 4149), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['num_qubits'], {}), '(num_qubits)\n', (4137, 4149), False, 'import qiskit\n'), ((7952, 7974), 'numpy.array', 'np.array', (['gradient_psi'], {}), '(gradient_psi)\n', (7960, 7974), True, 'import numpy as np\n'), ((7867, 7892), 'numpy.expand_dims', 'np.expand_dims', (['psi_qc', '(1)'], {}), '(psi_qc, 1)\n', (7881, 7892), True, 'import numpy as np\n'), ((7787, 7844), 'qiskit.quantum_info.Statevector.from_instruction', 'qiskit.quantum_info.Statevector.from_instruction', (['qc_copy'], {}), '(qc_copy)\n', (7835, 7844), False, 'import qiskit\n'), ((761, 868), 'qiskit.execute', 'qiskit.execute', (['qc'], {'backend': 'qtm.constant.backend', 'noise_model': 'noise_model', 'shots': 'qtm.constant.num_shots'}), '(qc, backend=qtm.constant.backend, noise_model=noise_model,\n shots=qtm.constant.num_shots)\n', (775, 868), False, 'import qiskit\n'), ((10365, 10387), 'numpy.expand_dims', 'np.expand_dims', (['psi', '(1)'], {}), '(psi, 1)\n', (10379, 10387), True, 'import numpy as np\n'), ((13144, 13205), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['vdagger.num_qubits', 'vdagger.num_qubits'], {}), '(vdagger.num_qubits, vdagger.num_qubits)\n', (13165, 13205), False, 'import qiskit\n'), ((14276, 14298), 'numpy.expand_dims', 'np.expand_dims', (['psi', '(1)'], {}), '(psi, 1)\n', (14290, 14298), True, 'import numpy as np\n'), ((1322, 1400), 'qiskit.execute', 'qiskit.execute', (['qc'], {'backend': 'qtm.constant.backend', 'shots': 'qtm.constant.num_shots'}), '(qc, backend=qtm.constant.backend, shots=qtm.constant.num_shots)\n', (1336, 1400), False, 'import qiskit\n'), ((10285, 10341), 'qiskit.quantum_info.Statevector.from_instruction', 'qiskit.quantum_info.Statevector.from_instruction', (['u_copy'], {}), '(u_copy)\n', (10333, 10341), False, 'import qiskit\n'), ((14179, 14235), 'qiskit.quantum_info.Statevector.from_instruction', 'qiskit.quantum_info.Statevector.from_instruction', (['v_copy'], {}), '(v_copy)\n', (14227, 14235), False, 'import qiskit\n'), ((9688, 9713), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (9696, 9713), True, 'import numpy as np\n'), ((9742, 9767), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (9750, 9767), True, 'import numpy as np\n'), ((13594, 13619), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (13602, 13619), True, 'import numpy as np\n'), ((13648, 13673), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (13656, 13673), True, 'import numpy as np\n'), ((10883, 10908), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (10891, 10908), True, 'import numpy as np\n'), ((10941, 10966), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (10949, 10966), True, 'import numpy as np\n'), ((14803, 14828), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (14811, 14828), True, 'import numpy as np\n'), ((14861, 14886), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (14869, 14886), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import random
import sys
from collections import Counter
import json
from argparse import ArgumentParser
from rand_utils import rand_partition
def build_tree(num_leaves = 10, rootdate = 1000):
"""
Starting from a three-node tree, split a randomly chosen branch to insert a new child
TODO: replace this with a coalescent method
"""
def _get_target_node_by_total_time(node, r):
interval1 = (node["date"] - node["left"]["date"]) * node["left"]["stability"]
if interval1 > r:
return node, True, r
r -= interval1
if node["left"]["left"] is not None:
node2, is_left, r2 = _get_target_node_by_total_time(node["left"], r)
if node2 is not None:
return node2, is_left, r2
r = r2
interval2 = (node["date"] - node["right"]["date"]) * node["right"]["stability"]
if interval2 > r:
return node, False, r
if node["right"]["left"] is not None:
return _get_target_node_by_total_time(node["right"], r - interval2)
return None, False, r - interval2
# endef
gshape, gscale = 2.0, 0.5
tree = {
"date": rootdate,
"left": {
"date": 0,
"left": None,
"right": None,
"name": "L0",
"stability": np.random.gamma(gshape, gscale),
},
"right": {
"date": 0,
"left": None,
"right": None,
"name": "L1",
"stability": np.random.gamma(gshape, gscale),
},
"name": "I0",
"stability": 1.0,
}
cur_leafnum = 2
cur_inodenum = 1
# totaltime = rootdate * 2
totaltime = rootdate * (tree["left"]["stability"] + tree["right"]["stability"])
while cur_leafnum < num_leaves:
r = np.random.uniform(0, totaltime)
parent, is_left, r2 = _get_target_node_by_total_time(tree, r)
cnode = {
"date": 0,
"left": None,
"right": None,
"name": "L{}".format(cur_leafnum),
"stability": np.random.gamma(gshape, gscale),
}
inode = {
"left": None,
"right": None,
"name": "I{}".format(cur_inodenum),
}
if is_left:
inode["date"] = parent["date"] - r2 / parent["left"]["stability"]
assert(inode["date"] > 0)
inode["stability"] = parent["left"]["stability"]
inode["right"] = cnode
inode["left"] = parent["left"]
parent["left"] = inode
else:
inode["date"] = parent["date"] - r2 / parent["right"]["stability"]
inode["stability"] = parent["right"]["stability"]
inode["left"] = cnode
inode["right"] = parent["right"]
parent["right"] = inode
# totaltime += inode["date"]
totaltime += inode["date"] * cnode["stability"]
cur_leafnum += 1
cur_inodenum += 1
return tree
def set_locations_by_random_walk(tree, variance=1.0):
"""
Perform simple random walks to assign coordinates
"""
def _set_locations_main(parent, node, variance):
interval = parent["date"] - node["date"]
_var = variance * interval
loc = np.random.multivariate_normal([parent["x"], parent["y"]], [[_var, 0.0], [0.0, _var]])
node["x"] = loc[0]
node["y"] = loc[1]
if node["left"] is not None:
assert(node["right"] is not None)
_set_locations_main(node, node["left"], variance)
_set_locations_main(node, node["right"], variance)
# endef
tree["x"] = tree["y"] = 0.0
_set_locations_main(tree, tree["left"], variance=variance)
_set_locations_main(tree, tree["right"], variance=variance)
def gen_traits(tree, _lambda=1.0, fnum=100):
"""
At each node,
- randomly choose the number of birth events
- for each birth event, randomly decide which feature is to be updated
"""
def _gen_traits_main(parent, node, flist, vcount, _lambda):
interval = parent["date"] - node["date"]
node["catvect"] = np.copy(parent["catvect"])
# # replace features num times
# num = np.random.poisson(_lambda * interval)
# # the same feature can be updated multiple times along a branch
# target_features = np.unique(np.random.randint(0, len(flist), size=num))
target_features = {}
t = 0.0
while True:
r = np.random.exponential(scale=1.0 / _lambda)
t += r
if t >= interval:
break
# the rich gets richer
weights = list(map(lambda x: x["size"] + 1.0, flist))
fid = rand_partition(weights)
if fid in target_features:
# the same feature can be updated multiple times along a branch
# just update the time
fval = node["catvect"][fid]
fnode["annotation"]["vid2date"][fval] = parent["date"] + t
else:
fnode = flist[fid]
fnode["size"] += 1
fnode["annotation"]["vid2date"][vcount] = parent["date"] + t
node["catvect"][fid] = vcount
vcount += 1
target_features[fid] = t
# ensure that at least one event happens
if len(target_features) <= 0:
t = np.random.uniform(0.0, interval)
fid = np.random.randint(0, len(flist))
fnode = flist[fid]
fnode["size"] += 1
fnode["annotation"]["vid2date"][vcount] = parent["date"] + t
node["catvect"][fid] = vcount
vcount += 1
if node["left"] is not None:
assert(node["right"] is not None)
vcount = _gen_traits_main(node, node["left"], flist, vcount, _lambda)
vcount = _gen_traits_main(node, node["right"], flist, vcount, _lambda)
return vcount
# endef
flist = []
for i in range(fnum):
flist.append({
"fid": i,
"size": 1,
"type": "cat",
"annotation": {
"vid2date": {
i: 0,
}
},
})
tree["catvect"] = np.arange(fnum)
vcount = fnum
vcount = _gen_traits_main(tree, tree["left"], flist, vcount, _lambda)
vcount = _gen_traits_main(tree, tree["right"], flist, vcount, _lambda)
return flist, vcount
def update_tree_by_borrowings(tree, flist, nu=0.05):
def _update_nodeval(node, fid, oldv, newv):
if node["catvect"][fid] != oldv:
return 0
node["catvect"][fid] = newv
change = 1
if node["left"] is not None:
change += _update_nodeval(node["left"], fid, oldv, newv)
change += _update_nodeval(node["right"], fid, oldv, newv)
return change
nodes = get_all_nodes(tree)
nodes_by_date = sorted(nodes, key=lambda x: x["date"], reverse=True)
for i in range(1, len(nodes_by_date)):
node = nodes_by_date[i]
# # # # #
# if node["date"] == 0.0:
# break
# collect branches
contemporary_nodes = []
for pnode in nodes_by_date[:i]:
if pnode["left"] is None:
break
if pnode["left"] is not node and pnode["left"]["date"] <= node["date"]:
contemporary_nodes.append((pnode, pnode["left"]))
if pnode["right"] is not node and pnode["right"]["date"] <= node["date"]:
contemporary_nodes.append((pnode, pnode["right"]))
assert(len(contemporary_nodes) > 0)
weights = []
for pnode, cnode in contemporary_nodes:
# TODO: weighted avg of the locations of pnode and cnode?
dist = np.sqrt((node["x"] - cnode["x"]) ** 2 + (node["y"] - cnode["y"]) ** 2)
weight = np.exp(20.0 * (max(dist / 3, 1.0) ** -0.5))
weights.append(weight)
weights = np.array(weights)
# print(weights / weights.sum())
for fid, is_borrowing in enumerate(np.random.rand(len(flist)) < nu):
if not is_borrowing:
continue
cid = rand_partition(weights)
pnode, cnode = contemporary_nodes[cid]
# too similar, no chance to be documented separately
if node["date"] == 0.0:
overlap = (cnode["catvect"] == pnode["catvect"]).sum() / float(len(pnode["catvect"]))
if overlap > 0.95:
sys.stderr.write("overlap {} ... skip\n".format(overlap))
continue
v = cnode["catvect"][fid]
if cnode["catvect"][fid] == pnode["catvect"][fid]:
newval = v
else:
date = flist[fid]["annotation"]["vid2date"][v]
if date > node["date"]:
newval = v
else:
newval = pnode["catvect"][fid]
# update only if the borrowed one is different from the original
if node["catvect"][fid] != v:
oldv = node["catvect"][fid]
change = _update_nodeval(node, fid, oldv, v)
sys.stderr.write("{} nodes updated\t{} -> {}\n".format(change, oldv, v))
def merge_leaves(tree, thres=0.98):
stack = [tree]
while len(stack) > 0:
node = stack.pop(0)
if node["left"] is not None:
if node["left"]["left"] is None and node["right"]["left"] is None:
assert(node["left"]["date"] == 0.0 and node["right"]["date"] == 0.0)
overlap = (node["left"]["catvect"] == node["right"]["catvect"]).sum() / float(len(node["left"]["catvect"]))
if overlap >= thres:
sys.stderr.write("overlap {} ... remove!\n".format(overlap))
node["name"] = node["left"]["name"]
node["date"] = 0.0
node["left"] = None
node["right"] = None
# restart
# TODO: efficiency
stack = [tree]
else:
sys.stderr.write("test passed {}\n".format(overlap))
else:
stack.append(node["left"])
stack.append(node["right"])
def update_vids(tree, flist, keep_singletons=False):
nodes = get_all_nodes(tree)
fidcounts = [Counter() for i in range(len(flist))]
for node in nodes:
for fid, v in enumerate(node["catvect"]):
fidcounts[fid][v] += 1
do_keep = np.ones(len(flist), dtype=np.bool_)
if not keep_singletons:
for fid in range(len(flist)):
if len(fidcounts[fid]) <= 1:
do_keep[fid] = 0
num_removed = len(flist) - do_keep.sum()
sys.stderr.write("remove {} singleton features\n".format(num_removed))
for node in nodes:
node["catvect"] = node["catvect"][do_keep]
flist2, fidcounts2 = [], []
vcount = 0
for is_kept, fnode, fidcount in zip(do_keep, flist, fidcounts):
if is_kept:
fnode["fid"] = len(flist2)
flist2.append(fnode)
fidcounts2.append(fidcount)
flist = flist2
fidcounts = fidcounts2
vcount = 0
for fid, (fnode, fidcount) in enumerate(zip(flist, fidcounts)):
fnode["size"] = len(fidcount)
vcount += fnode["size"]
labels = sorted(fidcount.keys(), key=int)
fnode["annotation"]["label2vid"] = {}
fnode["annotation"]["vid2label"] = []
for vid, _label in enumerate(labels):
fnode["annotation"]["label2vid"][_label] = vid
fnode["annotation"]["vid2label"].append(_label)
for node in nodes:
node["catvect"][fid] = fnode["annotation"]["label2vid"][node["catvect"][fid]]
return flist, vcount
def get_all_nodes(tree):
stack = [tree]
nodes = []
while len(stack) > 0:
node = stack.pop(0)
nodes.append(node)
if node["left"] is not None:
stack.append(node["left"])
stack.append(node["right"])
return nodes
def get_leaves(node, leaves):
if node["left"] is not None:
get_leaves(node["left"], leaves)
get_leaves(node["right"], leaves)
else:
leaves.append(node)
return leaves
def to_nexus(tree, flist, vcount, dump_tree=False):
leaves = get_leaves(tree, [])
# nexus
rv = "#NEXUS\r\nBEGIN TAXA;\r\nDIMENSIONS NTAX={};\r\nEND;\r\n".format(
len(leaves),
)
rv += "\r\nBEGIN CHARACTERS;\r\nDIMENSIONS NCHAR={};\r\nFORMAT\r\n\tDATATYPE=STANDARD\r\n\tSYMBOLS=\"01\"\r\n\tMISSING=?\r\n\tGAP=-\r\n\tINTERLEAVE=NO\r\n;\r\nMATRIX\n\n".format(vcount)
for node in leaves:
name_normalized = node["name"].replace(" ", "_").replace("(", "").replace(")", "")
binrep = np.zeros(vcount, dtype=np.int32)
for fid, v in enumerate(node["catvect"]):
binrep[v] = 1
rv += "{}\t{}\r".format(name_normalized, "".join(map(str, binrep.tolist())))
rv += ";\r\nEND;\r\n"
if dump_tree:
def _dump_tree(parent, node):
if node["left"] is not None:
rv1 = _dump_tree(node, node["left"])
rv2 = _dump_tree(node, node["right"])
rv = "({},{})".format(rv1, rv2)
else:
rv = node["name"].replace(" ", "_").replace("(", "").replace(")", "")
if parent is not None:
rv += ":{}".format(parent["date"] - node["date"])
return rv
# endef
rv += "\r\nBEGIN Trees;\r\nTree tree1 = "
rv += _dump_tree(None, tree)
rv += ";\r\nEND;\r\n"
return rv
def main():
parser = ArgumentParser()
parser.add_argument("-s", "--seed", metavar="INT", type=int, default=None,
help="random seed")
parser.add_argument('--rootdate', type=float, default=1000.0)
parser.add_argument('--num_leaves', type=int, default=10)
parser.add_argument('--variance', type=float, default=5.0,
help="Brownian process parameter")
parser.add_argument('--fnum', type=int, default=100,
help="# of features")
parser.add_argument('--lambda', dest="_lambda", type=float, default=0.02,
help="parameter of a pure birth process")
parser.add_argument('--nu', type=float, default=0.05,
help="borrowing parameter")
parser.add_argument('--keep_singletons', action="store_true", default=False)
parser.add_argument('--merge_thres', type=float, default=0.90,
help="merge near-identical leaves")
parser.add_argument('--tree', type=str, default=None)
parser.add_argument('--langs', type=str, default=None)
parser.add_argument('--flist', type=str, default=None)
parser.add_argument('--nexus', type=str, default=None)
args = parser.parse_args()
sys.stderr.write("args\t{}\n".format(args))
if args.num_leaves <= 2:
sys.stderr.write("# of leaves must be larger than 2\n")
sys.exit(1)
if args.seed is not None:
np.random.seed(args.seed)
# random.seed(args.seed)
# build a time-tree
tree = build_tree(args.num_leaves, args.rootdate)
# assign an xy coordinate to each node
set_locations_by_random_walk(tree, variance=args.variance)
# generate features
flist, vcount = gen_traits(tree, _lambda=args._lambda, fnum=args.fnum)
sys.stderr.write("{}\n".format(tree))
sys.stderr.write("{}\n".format(vcount))
# sys.stderr.write("{}\n".format(flist))
if args.nu > 0.0:
update_tree_by_borrowings(tree, flist, nu=args.nu)
# merge near-identical leaves
# too similar, no chance to be documented separately
merge_leaves(tree, thres=args.merge_thres)
flist, vcount = update_vids(tree, flist, keep_singletons=args.keep_singletons)
sys.stderr.write("{}\n".format(vcount))
for node in get_all_nodes(tree):
node["catvect"] = node["catvect"].tolist()
if args.tree is not None:
with open(args.tree, 'w') as f:
f.write("{}\n".format(json.dumps(tree)))
if args.langs is not None:
with open(args.langs, 'w') as f:
langs = get_leaves(tree, [])
for lang in langs:
f.write("{}\n".format(json.dumps(lang)))
if args.flist is not None:
with open(args.flist, 'w') as f:
f.write("{}\n".format(json.dumps(flist, indent=4, sort_keys=True)))
if args.nexus is not None:
with open(args.nexus, 'w') as f:
f.write(to_nexus(tree, flist, vcount, dump_tree=True))
if __name__ == "__main__":
main()
|
[
"numpy.copy",
"rand_utils.rand_partition",
"sys.exit",
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.random.multivariate_normal",
"json.dumps",
"numpy.random.exponential",
"collections.Counter",
"numpy.array",
"numpy.zeros",
"sys.stderr.write",
"numpy.random.gamma",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.arange"
] |
[((6320, 6335), 'numpy.arange', 'np.arange', (['fnum'], {}), '(fnum)\n', (6329, 6335), True, 'import numpy as np\n'), ((13855, 13871), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (13869, 13871), False, 'from argparse import ArgumentParser\n'), ((1867, 1898), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'totaltime'], {}), '(0, totaltime)\n', (1884, 1898), True, 'import numpy as np\n'), ((3324, 3414), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (["[parent['x'], parent['y']]", '[[_var, 0.0], [0.0, _var]]'], {}), "([parent['x'], parent['y']], [[_var, 0.0], [\n 0.0, _var]])\n", (3353, 3414), True, 'import numpy as np\n'), ((4187, 4213), 'numpy.copy', 'np.copy', (["parent['catvect']"], {}), "(parent['catvect'])\n", (4194, 4213), True, 'import numpy as np\n'), ((8054, 8071), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (8062, 8071), True, 'import numpy as np\n'), ((10497, 10506), 'collections.Counter', 'Counter', ([], {}), '()\n', (10504, 10506), False, 'from collections import Counter\n'), ((12978, 13010), 'numpy.zeros', 'np.zeros', (['vcount'], {'dtype': 'np.int32'}), '(vcount, dtype=np.int32)\n', (12986, 13010), True, 'import numpy as np\n'), ((15162, 15217), 'sys.stderr.write', 'sys.stderr.write', (['"""# of leaves must be larger than 2\n"""'], {}), "('# of leaves must be larger than 2\\n')\n", (15178, 15217), False, 'import sys\n'), ((15226, 15237), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15234, 15237), False, 'import sys\n'), ((15276, 15301), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (15290, 15301), True, 'import numpy as np\n'), ((1374, 1405), 'numpy.random.gamma', 'np.random.gamma', (['gshape', 'gscale'], {}), '(gshape, gscale)\n', (1389, 1405), True, 'import numpy as np\n'), ((1564, 1595), 'numpy.random.gamma', 'np.random.gamma', (['gshape', 'gscale'], {}), '(gshape, gscale)\n', (1579, 1595), True, 'import numpy as np\n'), ((2135, 2166), 'numpy.random.gamma', 'np.random.gamma', (['gshape', 'gscale'], {}), '(gshape, gscale)\n', (2150, 2166), True, 'import numpy as np\n'), ((4545, 4587), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(1.0 / _lambda)'}), '(scale=1.0 / _lambda)\n', (4566, 4587), True, 'import numpy as np\n'), ((4783, 4806), 'rand_utils.rand_partition', 'rand_partition', (['weights'], {}), '(weights)\n', (4797, 4806), False, 'from rand_utils import rand_partition\n'), ((5467, 5499), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'interval'], {}), '(0.0, interval)\n', (5484, 5499), True, 'import numpy as np\n'), ((7865, 7935), 'numpy.sqrt', 'np.sqrt', (["((node['x'] - cnode['x']) ** 2 + (node['y'] - cnode['y']) ** 2)"], {}), "((node['x'] - cnode['x']) ** 2 + (node['y'] - cnode['y']) ** 2)\n", (7872, 7935), True, 'import numpy as np\n'), ((8266, 8289), 'rand_utils.rand_partition', 'rand_partition', (['weights'], {}), '(weights)\n', (8280, 8289), False, 'from rand_utils import rand_partition\n'), ((16296, 16312), 'json.dumps', 'json.dumps', (['tree'], {}), '(tree)\n', (16306, 16312), False, 'import json\n'), ((16622, 16665), 'json.dumps', 'json.dumps', (['flist'], {'indent': '(4)', 'sort_keys': '(True)'}), '(flist, indent=4, sort_keys=True)\n', (16632, 16665), False, 'import json\n'), ((16497, 16513), 'json.dumps', 'json.dumps', (['lang'], {}), '(lang)\n', (16507, 16513), False, 'import json\n')]
|
import pandas as pd
import numpy as np
import os
import sys
def load_data(assets, start_date, end_date):
df_open = load_data_from_file('etf_data_open.csv', assets, start_date, end_date)
df_close = load_data_from_file('etf_data_close.csv', assets, start_date, end_date)
df_high = load_data_from_file('etf_data_high.csv', assets, start_date, end_date)
df_low = load_data_from_file('etf_data_low.csv', assets, start_date, end_date)
df_adj_close = load_data_from_file('etf_data_adj_close.csv', assets, start_date, end_date)
return df_open, df_close, df_high, df_low, df_adj_close
def load_data_from_file(file, assets, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../../etf_data/' + file
if not os.path.isfile(file):
file = '../../../etf_data/' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.Date > start_date]
df = df.loc[df.Date < end_date]
df = df[assets]
indexes = []
for key in df.keys():
for i in df[key].index:
val = df[key][i]
try:
if np.isnan(val) and not indexes.__contains__(i):
indexes.append(i)
except TypeError:
if not indexes.__contains__(i):
indexes.append(i)
df.drop(indexes, inplace=True)
return df
def load_data_from_file2(file, assets, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../../etf_data/' + file
if not os.path.isfile(file):
file = '../../../etf_data/' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.date > start_date]
df = df.loc[df.date < end_date]
df = df[assets]
indexes = []
for key in df.keys():
for i in df[key].index:
val = df[key][i]
try:
if np.isnan(val) and not indexes.__contains__(i):
indexes.append(i)
except TypeError:
if not indexes.__contains__(i):
indexes.append(i)
df.drop(indexes, inplace=True)
return df
def load_all_data_from_file(file, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../' + file
if not os.path.isfile(file):
file = '../' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.Date > start_date]
df = df.loc[df.Date < end_date]
# indexes = []
#
# for key in df.keys():
# for i in df[key].index:
# val = df[key][i]
# try:
# if np.isnan(val) and not indexes.__contains__(i):
# indexes.append(i)
# except TypeError:
# if not indexes.__contains__(i):
# indexes.append(i)
# df.drop(indexes, inplace=True)
return df
def load_all_data_from_file2(file, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../' + file
if not os.path.isfile(file):
file = '../' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.date > start_date]
df = df.loc[df.date < end_date]
return df
def load_all_data(start_date, end_date):
df_open = load_all_data_from_file('etf_data_open.csv', start_date, end_date)
df_close = load_all_data_from_file('etf_data_close.csv', start_date, end_date)
df_high = load_all_data_from_file('etf_data_high.csv', start_date, end_date)
df_low = load_all_data_from_file('etf_data_low.csv', start_date, end_date)
df_adj_close = load_all_data_from_file('etf_data_adj_close.csv', start_date, end_date)
return df_open, df_close, df_high, df_low, df_adj_close
|
[
"os.path.isfile",
"numpy.isnan",
"pandas.read_csv"
] |
[((927, 944), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (938, 944), True, 'import pandas as pd\n'), ((1757, 1774), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (1768, 1774), True, 'import pandas as pd\n'), ((2559, 2576), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (2570, 2576), True, 'import pandas as pd\n'), ((3366, 3383), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (3377, 3383), True, 'import pandas as pd\n'), ((676, 696), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (690, 696), False, 'import os\n'), ((746, 766), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (760, 766), False, 'import os\n'), ((819, 839), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (833, 839), False, 'import os\n'), ((1506, 1526), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1520, 1526), False, 'import os\n'), ((1576, 1596), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1590, 1596), False, 'import os\n'), ((1649, 1669), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1663, 1669), False, 'import os\n'), ((2334, 2354), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2348, 2354), False, 'import os\n'), ((2404, 2424), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2418, 2424), False, 'import os\n'), ((2465, 2485), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2479, 2485), False, 'import os\n'), ((3141, 3161), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3155, 3161), False, 'import os\n'), ((3211, 3231), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3225, 3231), False, 'import os\n'), ((3272, 3292), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3286, 3292), False, 'import os\n'), ((1181, 1194), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (1189, 1194), True, 'import numpy as np\n'), ((2011, 2024), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (2019, 2024), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
from datetime import datetime, date, timedelta
from sklearn.linear_model import LinearRegression
import scipy
import math
import sys
import locator
file_path = os.path.dirname(os.path.realpath(__file__))
proj_path = os.path.abspath(os.path.join(file_path,".."))
datagouv_path = os.path.join(proj_path,"datagouv")
gen_path = os.path.join(proj_path,"../../gatsby/trends/generated")
datagen_path = os.path.join(gen_path,"data")
def downloadIfNeeded(fileName):
need_download = True
if os.path.exists(fileName):
today = date.today()
last_modified_ts = os.path.getmtime(fileName)
mtime = date.fromtimestamp(last_modified_ts)
if (today-mtime).days <= 1:
need_download = False
if need_download:
print("%s Needs a download"%fileName)
if "department" in fileName:
command = "/usr/bin/wget https://www.data.gouv.fr/fr/datasets/r/eceb9fb4-3ebc-4da3-828d-f5939712600a -O %s"%fileName
elif "hospitalieres" in fileName:
command = "/usr/bin/wget https://www.data.gouv.fr/fr/datasets/r/6fadff46-9efd-4c53-942a-54aca783c30c -O %s"%fileName
os.system(command)
else:
print("%s est à jour"%fileName)
urgence_data = os.path.join(datagouv_path,"department_latest.csv")
downloadIfNeeded(urgence_data)
urgence_df = pd.read_csv(urgence_data, sep=";", dtype= {'dep':'object'})
hosp_data = os.path.join(datagouv_path,"donnees_hospitalieres_latest.csv")
downloadIfNeeded(hosp_data)
hosp_df = pd.read_csv(hosp_data, sep=';')
# Heure des données (wget garde le mtime du site web)
last_modified_ts = os.path.getmtime(urgence_data)
data_date = datetime.fromtimestamp(last_modified_ts)
#extraire les données toutes classe d'age
urgence_df = urgence_df[urgence_df["sursaud_cl_age_corona"] == 0].copy()
# Lire le fichier des code département
depts = pd.read_csv(os.path.join(datagouv_path,"departement2020.csv"))
depts.set_index(depts.dep, inplace=True)
depts.drop("dep",axis=1, inplace=True)
# Lire le fichier des régions
regs = pd.read_csv(os.path.join(datagouv_path,"region2020.csv"))
#regs["reg"] = regs["reg"].apply(lambda x: str(x) if len(str(x)) > 1 else '0' + str(x))
regs.set_index(regs.reg, inplace=True)
regs.drop("reg", axis=1, inplace=True)
# Ajouter nom de département, code région, nom région dans les données des urgences
urgence_df["dep_name"] = urgence_df["dep"].apply(lambda x: depts.loc[str(x)].libelle if pd.notnull(x) else None)
urgence_df["reg"] = urgence_df["dep"].apply(lambda x: depts.loc[x].reg if pd.notnull(x) else None)
urgence_df["reg_name"] = urgence_df["reg"].apply(lambda x: regs.loc[x].libelle if pd.notnull(x) else None)
# Ajouter nom de département, code région, nom région dans les données des hospitalières
hosp_df["dep"] = hosp_df["dep"].apply(lambda x: x if len(x) > 1 else '0'+x)
#Retrait de <NAME>
hosp_df=hosp_df[hosp_df.dep != "978"]
hosp_df["dep_name"] = hosp_df["dep"].apply(lambda x: depts.loc[str(x)].libelle if pd.notnull(x) else None)
hosp_df["reg"] = hosp_df["dep"].apply(lambda x: depts.loc[x].reg if pd.notnull(x) else None)
hosp_df["reg_name"] = hosp_df["reg"].apply(lambda x: regs.loc[x].libelle if pd.notnull(x) else None)
# Afficher les dates au format jj/mm/yy et les mettre en index
def convertDate(isodate):
l = isodate.split('-')
return l[2]+"/"+l[1]+"/"+l[0][2:]
def addDays(df, duration):
# Agrandissement du dataframe du nombre de jours spécifié
d = df.index[-1]
a = d.split("/")
dd = int(a[0])
mm = int(a[1])
yy = 2000 + int(a[2])
first = date(yy,mm,dd)+ timedelta(days=1)
last = date(yy,mm,dd)+ timedelta(days=duration)
current = first
indexExtension = []
while current <= last:
ds = str(current.day)
if len(ds) == 1:
ds = '0'+ds
ms = str(current.month)
if len(ms) == 1:
ms = '0'+ms
ys = str(current.year)[2:]
di = ds + '/' + ms + '/' + ys
indexExtension.append(di)
current += timedelta(days = 1)
return df.reindex(index = df.index.append(pd.Index(indexExtension)))
# Calcul de l'intervalle de confiance de la prédiction
# Voir http://pageperso.lif.univ-mrs.fr/~alexis.nasr/Ens/IAAAM2/SlidesModStat_C1_print.pdf
def estimateSigma(reg, X, Y):
Y_pred = reg.predict(X)
err = (Y - Y_pred)**2
return math.sqrt(err.sum() / (len(err) - 2))
def plot_non_zero(ax, logScale, df, col, label):
col_draw = col
if logScale:
col_draw = "nnz_%s"%col
df[col_draw] = df[col]
df.loc[df[col] == 0 ,col_draw] = np.nan
ax.plot(df[col_draw], label=label)
def make_hosp_bars(has_reg, df_source, hosp_col, reg_index, source_label, ax):
if has_reg:
# Afficher differement la donnée du dernier jour, car tout n'est pas encore remonté. Ce jour n'est pas pris en
# compte pour calculer la tendance
df_source["valid_hosp"] = np.nan
df_source["uncertain_hosp"] = np.nan
df_source.loc[df_source.index[:df_source.index.get_loc(reg_index[-1])+1], "valid_hosp"] = df_source[hosp_col]
last_day = df_source.index[df_source.index.get_loc(reg_index[-1]) + 1]
df_source.loc[last_day,"uncertain_hosp"] = df_source.loc[last_day,hosp_col]
ax.bar(df_source.index,
df_source["valid_hosp"],
label = "Nouvelles hospitalisations quotidiennes - données %s"%source_label,
alpha=0.3,
color="blue")
ax.bar(df_source.index,
df_source["uncertain_hosp"],
alpha=0.2,
edgecolor="black",
linestyle="--",
color="blue")
else:
# Le dernier jour n'est pas facile à avoir ici. Pas affiché. Mais de toute façon, il n'y a pas de tendance calculée.
ax.bar(df_source.index,
df_source[hosp_col],
label = "Nouvelles hospitalisations quotidiennes - données %s"%source_label,
alpha=0.3,
color="blue")
def make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, logScale):
# Plot
fig = plt.figure(figsize=(10,6))
ax = plt.axes()
has_reg = df_row["reg_start"] is not None
# Ajout d'un échelle à droite pour meilleure lecture sur les telephones
ax.yaxis.set_ticks_position('both')
ax.tick_params(labeltop=False, labelright=True)
if src_urgence:
make_hosp_bars(has_reg, urgence, "nbre_hospit_corona", urg_index, "urgences", ax)
ax.plot(urgence[roll_urg], label="Nouvelles hospitalisations quotidiennes lissées - données urgences", color="orange")
if has_reg:
ax.plot(urgence["pred_hosp"], "--", label="Tendance hospitalisations quotidiennes -- données urgences", color="orange")
ax.fill_between(urgence.index, urgence["pred_max"], urgence["pred_min"],color="orange",alpha=0.3, label="Intervalle de confiance")
# En plus foncé sur la zone de prediction
reg_end = urg_index[-1]
pred_index = urgence.index[urgence.index.get_loc(reg_end) + 1 :]
ax.fill_between(pred_index, urgence.loc[pred_index, "pred_max"], urgence.loc[pred_index, "pred_min"],color="orange",alpha=0.2)
# Autres données (non utilsées pour la tendance)
ax.plot(hosp[roll_hosp], label="Nouvelles hospitalisations quotidiennes lissées - données hôpitaux", color="red")
else:
make_hosp_bars(has_reg, hosp, "incid_hosp", hosp_index, "hôpitaux", ax)
ax.plot(hosp[roll_hosp], label="Nouvelles hospitalisations quotidiennes lissées - données hôpitaux", color="orange")
if has_reg:
ax.plot(hosp["pred_hosp"], "--", label="Tendance hospitalisations quotidiennes - données hôpitaux", color="orange")
ax.fill_between(hosp.index, hosp["pred_max"], hosp["pred_min"],color="orange",alpha=0.3, label="Intervalle de confiance")
# En plus foncé sur la zone de prediction
reg_end = hosp_index[-1]
pred_index = hosp.index[hosp.index.get_loc(reg_end) + 1 :]
ax.fill_between(pred_index, hosp.loc[pred_index, "pred_max"], hosp.loc[pred_index,"pred_min"],color="orange",alpha=0.2)
#ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.xaxis.set_major_locator(locator.FirstOfMonthLocator())
#ax.xaxis.set_minor_locator(plt.MultipleLocator(1))
ax.legend()
if src_urgence:
# Pour utiliser cette limite pour les données hospitalières, il faudrait étendre l'index vers le 24 février.
ax.set_xlim(left = "24/02/20", right=urgence.index[-1])
if logScale:
plt.yscale("log")
# Same scale for log curves
# Limit high enough to let room for the legend
ax.set_ylim(0.1,50000)
else:
if has_reg:
# Protection contre les prédiction trop divergeantes
df_source = urgence if src_urgence else hosp
hosp_col = "nbre_hospit_corona" if src_urgence else "incid_hosp"
if df_source.loc[df_source.index[-1], "pred_max"] > df_source[hosp_col].max()*4:
ax.set_ylim(0, df_source[hosp_col].max()*4)
ax.set_title("Hospitalisations COVID-19 quotidiennes en %s - échelle %s"%(label,"logarithmique" if logScale else "linéaire"))
file_name = file_radical + ("_log" if logScale else "_lin") + ".png"
plt.savefig(os.path.join(datagen_path,file_name))
df_row["log_curve" if logScale else "lin_curve"] = file_name
plt.close()
def aggregate(df_source, date_col):
df_source = df_source.groupby([date_col]).agg('sum')
# Convertir les dates maintenant que les tris sont faits
df_source["date"] = df_source.index
df_source["date"] = df_source["date"].apply(convertDate)
df_source = df_source.set_index(["date"])
return df_source
def make_rolling(df_source, col):
roll_col = "rolling_%s"%col
nnz_col = "nnz_%s"%col
df_source[nnz_col] = df_source[col]
df_source.loc[df_source[nnz_col]==0,nnz_col] = 0.1
# Calculer la moyenne lissée géométrique
df_source[roll_col] = df_source[nnz_col].rolling(7,center=True).aggregate(lambda x: x.prod()**(1./7))
# Remplacer ce qui vaut 0.1 par 0
df_source.loc[df_source[roll_col]<=0.101, roll_col] = 0
return roll_col
def extract_recent(source, history, use_latest):
if use_latest:
return source.iloc[-history:]
else:
return source.iloc[-history-1:-1]
def make_trend(df_source, hosp_col, roll_col, recent_hist):
recent = extract_recent(df_source, recent_hist, False)
nullVals = len(recent[recent[hosp_col] == 0])
if nullVals == 0:
reg_col = hosp_col
else:
# Remplacer les valeurs nulles par 0.1 (ou 0 si la moyenne glissante vaut 0)
reg_col = "%s_patch"%hosp_col
df_source[reg_col] = df_source[hosp_col]
df_source.loc[df_source[reg_col] == 0, reg_col] = 0.1
df_source.loc[df_source[roll_col] == 0, reg_col] = 0
# Si plus de 2 valeurs nulles, on double aussi la période d'estimation
if nullVals > 2:
recent_hist *= 2
else:
recent_hist = int(recent_hist*1.5)
# Ajouter une colonne de numéro de jour
df_source["num_jour"] = np.arange(len(df_source))
for_regression = extract_recent(df_source, recent_hist,False)
# Si pas assez de données ne pas générer de tendance
if len(for_regression[for_regression[reg_col] > 0]) < recent_hist*0.5:
return None, None, df_source
# Enlever les valeurs nulles ou non définies
for_regression = for_regression[for_regression[reg_col] > 0]
reg = LinearRegression()
X_train = for_regression.drop(columns = [c for c in for_regression.columns if c != "num_jour"])
Y_train = np.log(for_regression[reg_col])
reg.fit(X_train,Y_train)
# Extraire la pente de la regression
slope = reg.coef_[0]
timeToDouble = math.log(2)/slope
# Ajouter deux semaines de données et mettre a jour la colonne num_jour
df_source = addDays(df_source, 15)
df_source["num_jour"] = np.arange(len(df_source))
# Ajouter la prédiction dans les données
df_source["pred_hosp"]=np.nan
# Plage de prédiction: dans la phase descendante - jusqu'à last_day
predIndex = df_source[(df_source["num_jour"] >= X_train.iloc[0]["num_jour"])].index
X = df_source.loc[predIndex].drop(columns = [c for c in df_source.columns if c != "num_jour"])
df_source.loc[predIndex,"pred_hosp"]=np.exp(reg.predict(X))
# Intervalle de confiance
sigma = estimateSigma(reg,X_train,Y_train)
X_train_mean = X_train["num_jour"].mean()
# Ajout de l'intervalle de confiance en log (alpha = 10% -- 1 - alpha/2 = 0.95)
df_source["conf_log_mean"] = np.nan
# Plage pour l'intervalle de confiance sur la moyennes: depuis les données utilisées pour la régerssion linéaire
df_source.loc[predIndex,"conf_log_mean"] = np.sqrt(1./len(X_train) + \
(df_source["num_jour"]-X_train_mean)**2 / ((X_train["num_jour"]-X_train_mean)**2).sum()) * \
sigma*scipy.stats.t.ppf(0.95,len(X_train)-2)
df_source["pred_max"] = df_source["pred_hosp"]*np.exp(df_source["conf_log_mean"])
df_source["pred_min"] = df_source["pred_hosp"]/np.exp(df_source["conf_log_mean"])
return for_regression.index, timeToDouble, df_source
def make_trend_metadata(df_row, reg_index, df_source, timeToDouble, hosp_rate_row_col):
df_row["reg_start"] = reg_index[0] if reg_index is not None else None
df_row["reg_end"]=reg_index[-1] if reg_index is not None else None
cont_end_loc = df_source.index.get_loc(reg_index[-1]) - 11 if reg_index is not None else None
cont_start_loc = df_source.index.get_loc(reg_index[0]) - 11 if reg_index is not None else None
df_row["cont_end"]=df_source.index[cont_end_loc] if reg_index is not None else None
df_row["cont_start"]=df_source.index[cont_start_loc] if reg_index is not None else None
df_row["timeToDouble"] = timeToDouble
if df_row["reg_start"] is not None:
if df_source["pred_max"][-1] > df_row[hosp_rate_row_col]*2 and df_source["pred_min"][-1] < df_row[hosp_rate_row_col]/2.:
df_row["trend_confidence"] = 0
else:
df_row["trend_confidence"] = 1
else:
# Pas de tendance s'il n'y avait pas assez de données pour la calculer
df_row["trend_confidence"] = 0
def make_data(urgence, hosp, file_radical, df_row, label):
urgence = aggregate(urgence, "date_de_passage")
hosp = aggregate(hosp, "jour")
recent_hist = 15
recent = urgence.loc[urgence.index[-recent_hist:]]
recent = extract_recent(urgence, recent_hist, False)
# Utilisation des données urgence si au moins un cas est reporté dans la "période récente"
src_urgence = len(recent[recent["nbre_hospit_corona"] > 0]) >= 1
roll_urg = make_rolling(urgence, "nbre_hospit_corona")
roll_hosp = make_rolling(hosp, "incid_hosp")
# On utilise le dernier jour de la moyenne lissée pour indiquer le nombre d'hospitalisations par jour
if src_urgence:
df_row["hosp_rate_urgence"] = urgence[urgence[roll_urg] > 0 ][roll_urg][-1]
df_row["hosp_rate_all"] = hosp[hosp[roll_hosp] > 0 ][roll_hosp][-1]
df_row["rate_date"] = urgence[urgence[roll_urg] > 0 ].index[-1]
else:
df_row["hosp_rate_all"] = hosp[hosp[roll_hosp] > 0 ][roll_hosp][-1]
df_row["rate_date"] = hosp[hosp[roll_hosp] > 0 ].index[-1]
# make_trend modifies the dataframe (it extends the index) so we need to update the df variables
if src_urgence:
urg_index, urg_timeToDouble, urgence = make_trend(urgence, "nbre_hospit_corona", roll_urg, recent_hist)
else:
# Python interpreter complains if the value is not assigned
urg_index = None
# Calculer la tendance sur les données hospitalière dans tous les cas, même si elle n'est pas
# utilisée pour le moment lorsque les données des urgences sont utilisables
hosp_index, hosp_timeToDouble, hosp = make_trend(hosp, "incid_hosp", roll_hosp, recent_hist)
if src_urgence:
make_trend_metadata(df_row, urg_index, urgence,urg_timeToDouble, "hosp_rate_urgence")
else:
make_trend_metadata(df_row, hosp_index,hosp, hosp_timeToDouble, "hosp_rate_all")
make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, True)
make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, False)
common_fields = ["log_curve", "lin_curve","timeToDouble", "reg_start", "reg_end", "cont_start", "cont_end", "rate_date", "hosp_rate_urgence", "hosp_rate_all", "trend_confidence"]
fr_summary = pd.DataFrame(index=["France"],columns=["data_date"] + common_fields)
fr_summary.loc["France","data_date"] = data_date.strftime("%d/%m/%Y %H:%M")
make_data(urgence_df, hosp_df, "france", fr_summary.loc["France"], "France")
fr_summary.to_csv(os.path.join(datagen_path, "france.csv"), index_label='id')
metropole = [r for r in regs.index if r > 10]
drom = [r for r in regs.index if r < 10]
reg_summary = pd.DataFrame(index = metropole+drom, columns=["reg_name"] + common_fields)
dep_summary = pd.DataFrame(index = depts.index, columns=["dep_name", "reg"] + common_fields)
for reg in metropole + drom:
reg_name = regs.loc[reg]["libelle"]
file_radical = code = "r_" + str(reg)
print(reg, reg_name)
reg_summary.loc[reg]["reg_name"] = reg_name
make_data(urgence_df[urgence_df["reg"] == reg], hosp_df[hosp_df["reg"] == reg], file_radical, reg_summary.loc[reg], reg_name)
reg_depts = depts[depts["reg"]==reg]
for dept in reg_depts.index:
dep_name = reg_depts.loc[dept,"libelle"]
dep_summary.loc[dept,"reg"] = reg
dep_summary.loc[dept,"dep_name"] = dep_name
file_radical = code = "d_" + str(dept)
print("\t%s %s"%(dept, dep_name))
make_data(urgence_df[urgence_df["dep"] == dept], hosp_df[hosp_df["dep"] == dept], file_radical, dep_summary.loc[dept], dep_name)
reg_summary.to_csv(os.path.join(datagen_path, "regions.csv"), index_label="reg")
dep_summary.to_csv(os.path.join(datagen_path, "departements.csv"), index_label="dep")
|
[
"pandas.read_csv",
"numpy.log",
"math.log",
"pandas.Index",
"datetime.date.fromtimestamp",
"locator.FirstOfMonthLocator",
"datetime.timedelta",
"pandas.notnull",
"os.path.exists",
"matplotlib.pyplot.close",
"numpy.exp",
"datetime.date",
"pandas.DataFrame",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.axes",
"os.path.getmtime",
"datetime.date.today",
"sklearn.linear_model.LinearRegression",
"datetime.datetime.fromtimestamp",
"os.path.join",
"os.path.realpath",
"matplotlib.pyplot.figure",
"os.system"
] |
[((366, 401), 'os.path.join', 'os.path.join', (['proj_path', '"""datagouv"""'], {}), "(proj_path, 'datagouv')\n", (378, 401), False, 'import os\n'), ((412, 468), 'os.path.join', 'os.path.join', (['proj_path', '"""../../gatsby/trends/generated"""'], {}), "(proj_path, '../../gatsby/trends/generated')\n", (424, 468), False, 'import os\n'), ((483, 513), 'os.path.join', 'os.path.join', (['gen_path', '"""data"""'], {}), "(gen_path, 'data')\n", (495, 513), False, 'import os\n'), ((1322, 1374), 'os.path.join', 'os.path.join', (['datagouv_path', '"""department_latest.csv"""'], {}), "(datagouv_path, 'department_latest.csv')\n", (1334, 1374), False, 'import os\n'), ((1422, 1481), 'pandas.read_csv', 'pd.read_csv', (['urgence_data'], {'sep': '""";"""', 'dtype': "{'dep': 'object'}"}), "(urgence_data, sep=';', dtype={'dep': 'object'})\n", (1433, 1481), True, 'import pandas as pd\n'), ((1495, 1558), 'os.path.join', 'os.path.join', (['datagouv_path', '"""donnees_hospitalieres_latest.csv"""'], {}), "(datagouv_path, 'donnees_hospitalieres_latest.csv')\n", (1507, 1558), False, 'import os\n'), ((1600, 1631), 'pandas.read_csv', 'pd.read_csv', (['hosp_data'], {'sep': '""";"""'}), "(hosp_data, sep=';')\n", (1611, 1631), True, 'import pandas as pd\n'), ((1706, 1736), 'os.path.getmtime', 'os.path.getmtime', (['urgence_data'], {}), '(urgence_data)\n', (1722, 1736), False, 'import os\n'), ((1749, 1789), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['last_modified_ts'], {}), '(last_modified_ts)\n', (1771, 1789), False, 'from datetime import datetime, date, timedelta\n'), ((17105, 17174), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['France']", 'columns': "(['data_date'] + common_fields)"}), "(index=['France'], columns=['data_date'] + common_fields)\n", (17117, 17174), True, 'import pandas as pd\n'), ((17510, 17584), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '(metropole + drom)', 'columns': "(['reg_name'] + common_fields)"}), "(index=metropole + drom, columns=['reg_name'] + common_fields)\n", (17522, 17584), True, 'import pandas as pd\n'), ((17599, 17675), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'depts.index', 'columns': "(['dep_name', 'reg'] + common_fields)"}), "(index=depts.index, columns=['dep_name', 'reg'] + common_fields)\n", (17611, 17675), True, 'import pandas as pd\n'), ((264, 290), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (280, 290), False, 'import os\n'), ((320, 349), 'os.path.join', 'os.path.join', (['file_path', '""".."""'], {}), "(file_path, '..')\n", (332, 349), False, 'import os\n'), ((578, 602), 'os.path.exists', 'os.path.exists', (['fileName'], {}), '(fileName)\n', (592, 602), False, 'import os\n'), ((1967, 2017), 'os.path.join', 'os.path.join', (['datagouv_path', '"""departement2020.csv"""'], {}), "(datagouv_path, 'departement2020.csv')\n", (1979, 2017), False, 'import os\n'), ((2148, 2193), 'os.path.join', 'os.path.join', (['datagouv_path', '"""region2020.csv"""'], {}), "(datagouv_path, 'region2020.csv')\n", (2160, 2193), False, 'import os\n'), ((6272, 6299), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (6282, 6299), True, 'from matplotlib import pyplot as plt\n'), ((6308, 6318), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (6316, 6318), True, 'from matplotlib import pyplot as plt\n'), ((9690, 9701), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9699, 9701), True, 'from matplotlib import pyplot as plt\n'), ((11870, 11888), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11886, 11888), False, 'from sklearn.linear_model import LinearRegression\n'), ((12003, 12034), 'numpy.log', 'np.log', (['for_regression[reg_col]'], {}), '(for_regression[reg_col])\n', (12009, 12034), True, 'import numpy as np\n'), ((17345, 17385), 'os.path.join', 'os.path.join', (['datagen_path', '"""france.csv"""'], {}), "(datagen_path, 'france.csv')\n", (17357, 17385), False, 'import os\n'), ((18457, 18498), 'os.path.join', 'os.path.join', (['datagen_path', '"""regions.csv"""'], {}), "(datagen_path, 'regions.csv')\n", (18469, 18498), False, 'import os\n'), ((18538, 18584), 'os.path.join', 'os.path.join', (['datagen_path', '"""departements.csv"""'], {}), "(datagen_path, 'departements.csv')\n", (18550, 18584), False, 'import os\n'), ((620, 632), 'datetime.date.today', 'date.today', ([], {}), '()\n', (630, 632), False, 'from datetime import datetime, date, timedelta\n'), ((660, 686), 'os.path.getmtime', 'os.path.getmtime', (['fileName'], {}), '(fileName)\n', (676, 686), False, 'import os\n'), ((703, 739), 'datetime.date.fromtimestamp', 'date.fromtimestamp', (['last_modified_ts'], {}), '(last_modified_ts)\n', (721, 739), False, 'from datetime import datetime, date, timedelta\n'), ((1236, 1254), 'os.system', 'os.system', (['command'], {}), '(command)\n', (1245, 1254), False, 'import os\n'), ((3656, 3672), 'datetime.date', 'date', (['yy', 'mm', 'dd'], {}), '(yy, mm, dd)\n', (3660, 3672), False, 'from datetime import datetime, date, timedelta\n'), ((3672, 3689), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3681, 3689), False, 'from datetime import datetime, date, timedelta\n'), ((3701, 3717), 'datetime.date', 'date', (['yy', 'mm', 'dd'], {}), '(yy, mm, dd)\n', (3705, 3717), False, 'from datetime import datetime, date, timedelta\n'), ((3717, 3741), 'datetime.timedelta', 'timedelta', ([], {'days': 'duration'}), '(days=duration)\n', (3726, 3741), False, 'from datetime import datetime, date, timedelta\n'), ((4102, 4119), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4111, 4119), False, 'from datetime import datetime, date, timedelta\n'), ((8484, 8513), 'locator.FirstOfMonthLocator', 'locator.FirstOfMonthLocator', ([], {}), '()\n', (8511, 8513), False, 'import locator\n'), ((8815, 8832), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (8825, 8832), True, 'from matplotlib import pyplot as plt\n'), ((9583, 9620), 'os.path.join', 'os.path.join', (['datagen_path', 'file_name'], {}), '(datagen_path, file_name)\n', (9595, 9620), False, 'import os\n'), ((12150, 12161), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (12158, 12161), False, 'import math\n'), ((13497, 13531), 'numpy.exp', 'np.exp', (["df_source['conf_log_mean']"], {}), "(df_source['conf_log_mean'])\n", (13503, 13531), True, 'import numpy as np\n'), ((13583, 13617), 'numpy.exp', 'np.exp', (["df_source['conf_log_mean']"], {}), "(df_source['conf_log_mean'])\n", (13589, 13617), True, 'import numpy as np\n'), ((2535, 2548), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (2545, 2548), True, 'import pandas as pd\n'), ((2634, 2647), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (2644, 2647), True, 'import pandas as pd\n'), ((2741, 2754), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (2751, 2754), True, 'import pandas as pd\n'), ((3073, 3086), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (3083, 3086), True, 'import pandas as pd\n'), ((3166, 3179), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (3176, 3179), True, 'import pandas as pd\n'), ((3267, 3280), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (3277, 3280), True, 'import pandas as pd\n'), ((4169, 4193), 'pandas.Index', 'pd.Index', (['indexExtension'], {}), '(indexExtension)\n', (4177, 4193), True, 'import pandas as pd\n')]
|
import unittest
import numpy as np
from dolo.numeric.ncpsolve import ncpsolve, smooth
def josephy(x):
# Computes the function value F(x) of the NCP-example by Josephy.
n=len(x)
Fx=np.zeros(n)
Fx[0]=3*x[0]**2+2*x[0]*x[1]+2*x[1]**2+x[2]+3*x[3]-6
Fx[1]=2*x[0]**2+x[0]+x[1]**2+3*x[2]+2*x[3]-2
Fx[2]=3*x[0]**2+x[0]*x[1]+2*x[1]**2+2*x[2]+3*x[3]-1
Fx[3]=x[0]**2+3*x[1]**2+2*x[2]+3*x[3]-3;
return Fx
def Djosephy(x):
# Local Variables: x, DFx, n
# Function calls: Djosephy, zeros, length
#%
#% Computes the Jacobian DF(x) of the NCP-example by Josephy
#%
n = len(x)
DFx = np.zeros( (n, n) )
DFx[0,0] = 6.*x[0]+2.*x[1]
DFx[0,1] = 2.*x[0]+4.*x[1]
DFx[0,2] = 1.
DFx[0,3] = 3.
DFx[1,0] = 4.*x[0]+1.
DFx[1,1] = 2.*x[1]
DFx[1,2] = 3.
DFx[1,3] = 2.
DFx[2,0] = 6.*x[0]+x[1]
DFx[2,1] = x[0]+4.*x[1]
DFx[2,2] = 2.
DFx[2,3] = 3.
DFx[3,0] = 2.*x[0]
DFx[3,1] = 6.*x[1]
DFx[3,2] = 2.
DFx[3,3] = 3.
return DFx
class SerialSolve(unittest.TestCase):
def test_simple_solve(self):
x0 = np.array([0.5,0.5,0.5,0.5])
lb = np.array([0.0,0.6,0.0,0.0])
ub = np.array([1.0,1.0,1.0,0.4])
fval = np.array([ 0.5, 0.5, 0.1,0.5 ])
jac = np.array([
[1.0,0.2,0.1,0.0],
[1.0,0.2,0.1,0.0],
[0.0,1.0,0.2,0.0],
[0.1,1.0,0.2,0.1]
])
N = 10
d = len(fval)
from dolo.numeric.solver import solver
sol_fsolve = solver(josephy, x0, method='fsolve')
sol_lmmcp = solver(josephy, x0, method='lmmcp')
from numpy.testing import assert_almost_equal
assert_almost_equal(sol_fsolve, sol_lmmcp)
def test_serial_problems(self):
from numpy import inf
import numpy
fun = lambda x: [-josephy(x), -Djosephy(x)]
x0=np.array( [1.25, 0.01, 0.01, 0.50] )
lb=np.array( [0.00, 0.00, 0.00, 0.00] )
ub=np.array( [inf, inf, inf, inf] )
resp = ncpsolve(fun, lb, ub, x0, tol=1e-15)
sol = np.array( [ 1.22474487e+00, 0.00000000e+00, 3.60543164e-17, 5.00000000e-01])
from numpy.testing import assert_almost_equal, assert_equal
assert_almost_equal(sol, resp)
N = 10
d = len(x0)
serial_sol_check = np.zeros((d,N))
for n in range(N):
serial_sol_check[:,n] = resp[0]
s_x0 = np.column_stack([x0]*N)
s_lb = np.column_stack([lb]*N)
s_ub = np.column_stack([ub]*N)
def serial_fun(xvec, deriv=None):
resp = np.zeros( (d,N) )
if deriv=='serial':
dresp = np.zeros( (d,d,N) )
elif deriv=='full':
dresp = np.zeros( (d,N,d,N) )
for n in range(N):
[v, dv] = fun(xvec[:,n])
resp[:,n] = v
if deriv=='serial':
dresp[:,:,n] = dv
elif deriv=='full':
dresp[:,n,:,n] = dv
# if deriv=='full':
# dresp = dresp.swapaxes(0,2).swapaxes(1,3)
if deriv is None:
return resp
else:
return [resp, dresp]
serial_fun_val = lambda x: serial_fun(x)
serial_fun_serial_jac = lambda x: serial_fun(x,deriv='serial')[1]
serial_fun_full_jac = lambda x: serial_fun(x,deriv='full')[1]
from dolo.numeric.solver import solver
print("Serial Bounded solution : ncpsolve")
serial_sol_with_bounds_without_jac = solver( serial_fun_val, s_x0, lb=s_lb, ub=s_ub, method='ncpsolve', serial_problem=True)
print("Serial Bounded solution (with jacobian) : ncpsolve")
serial_sol_with_bounds_with_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_serial_jac, method='ncpsolve', serial_problem=True)
print("Bounded solution : ncpsolve")
sol_with_bounds_without_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, method='ncpsolve', serial_problem=False)
print("Bounded solution (with jacobian) : ncpsolve")
sol_with_bounds_with_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_full_jac, method='ncpsolve', serial_problem=False)
print("Serial Unbounded solution : ncpsolve")
serial_sol_without_bounds_without_jac = solver( serial_fun_val, s_x0, method='newton', serial_problem=True)
print("Unbounded solution : fsolve")
sol_without_bounds_without_jac = solver( serial_fun_val, s_x0, method='fsolve', serial_problem=False)
print("Unbounded solution (with jacobian) : fsolve")
sol_without_bounds = solver( serial_fun_val, s_x0, jac=serial_fun_full_jac, method='fsolve', serial_problem=False)
print("Unbounded solution : lmmcp")
sol_without_bounds = solver( serial_fun_val, s_x0, jac=serial_fun_full_jac, method='lmmcp', serial_problem=False)
# TODO : check that results are equal to the benchmark
if __name__ == '__main__':
unittest.main()
|
[
"numpy.column_stack",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_almost_equal",
"dolo.numeric.ncpsolve.ncpsolve",
"unittest.main",
"dolo.numeric.solver.solver"
] |
[((198, 209), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (206, 209), True, 'import numpy as np\n'), ((633, 649), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (641, 649), True, 'import numpy as np\n'), ((5046, 5061), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5059, 5061), False, 'import unittest\n'), ((1111, 1141), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5, 0.5])\n', (1119, 1141), True, 'import numpy as np\n'), ((1154, 1184), 'numpy.array', 'np.array', (['[0.0, 0.6, 0.0, 0.0]'], {}), '([0.0, 0.6, 0.0, 0.0])\n', (1162, 1184), True, 'import numpy as np\n'), ((1195, 1225), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 0.4]'], {}), '([1.0, 1.0, 1.0, 0.4])\n', (1203, 1225), True, 'import numpy as np\n'), ((1239, 1269), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.1, 0.5]'], {}), '([0.5, 0.5, 0.1, 0.5])\n', (1247, 1269), True, 'import numpy as np\n'), ((1286, 1388), 'numpy.array', 'np.array', (['[[1.0, 0.2, 0.1, 0.0], [1.0, 0.2, 0.1, 0.0], [0.0, 1.0, 0.2, 0.0], [0.1, \n 1.0, 0.2, 0.1]]'], {}), '([[1.0, 0.2, 0.1, 0.0], [1.0, 0.2, 0.1, 0.0], [0.0, 1.0, 0.2, 0.0],\n [0.1, 1.0, 0.2, 0.1]])\n', (1294, 1388), True, 'import numpy as np\n'), ((1539, 1575), 'dolo.numeric.solver.solver', 'solver', (['josephy', 'x0'], {'method': '"""fsolve"""'}), "(josephy, x0, method='fsolve')\n", (1545, 1575), False, 'from dolo.numeric.solver import solver\n'), ((1597, 1632), 'dolo.numeric.solver.solver', 'solver', (['josephy', 'x0'], {'method': '"""lmmcp"""'}), "(josephy, x0, method='lmmcp')\n", (1603, 1632), False, 'from dolo.numeric.solver import solver\n'), ((1697, 1739), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sol_fsolve', 'sol_lmmcp'], {}), '(sol_fsolve, sol_lmmcp)\n', (1716, 1739), False, 'from numpy.testing import assert_almost_equal, assert_equal\n'), ((1895, 1928), 'numpy.array', 'np.array', (['[1.25, 0.01, 0.01, 0.5]'], {}), '([1.25, 0.01, 0.01, 0.5])\n', (1903, 1928), True, 'import numpy as np\n'), ((1943, 1973), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (1951, 1973), True, 'import numpy as np\n'), ((1991, 2021), 'numpy.array', 'np.array', (['[inf, inf, inf, inf]'], {}), '([inf, inf, inf, inf])\n', (1999, 2021), True, 'import numpy as np\n'), ((2040, 2076), 'dolo.numeric.ncpsolve.ncpsolve', 'ncpsolve', (['fun', 'lb', 'ub', 'x0'], {'tol': '(1e-15)'}), '(fun, lb, ub, x0, tol=1e-15)\n', (2048, 2076), False, 'from dolo.numeric.ncpsolve import ncpsolve, smooth\n'), ((2093, 2141), 'numpy.array', 'np.array', (['[1.22474487, 0.0, 3.60543164e-17, 0.5]'], {}), '([1.22474487, 0.0, 3.60543164e-17, 0.5])\n', (2101, 2141), True, 'import numpy as np\n'), ((2248, 2278), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sol', 'resp'], {}), '(sol, resp)\n', (2267, 2278), False, 'from numpy.testing import assert_almost_equal, assert_equal\n'), ((2346, 2362), 'numpy.zeros', 'np.zeros', (['(d, N)'], {}), '((d, N))\n', (2354, 2362), True, 'import numpy as np\n'), ((2449, 2474), 'numpy.column_stack', 'np.column_stack', (['([x0] * N)'], {}), '([x0] * N)\n', (2464, 2474), True, 'import numpy as np\n'), ((2488, 2513), 'numpy.column_stack', 'np.column_stack', (['([lb] * N)'], {}), '([lb] * N)\n', (2503, 2513), True, 'import numpy as np\n'), ((2527, 2552), 'numpy.column_stack', 'np.column_stack', (['([ub] * N)'], {}), '([ub] * N)\n', (2542, 2552), True, 'import numpy as np\n'), ((3583, 3673), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'lb': 's_lb', 'ub': 's_ub', 'method': '"""ncpsolve"""', 'serial_problem': '(True)'}), "(serial_fun_val, s_x0, lb=s_lb, ub=s_ub, method='ncpsolve',\n serial_problem=True)\n", (3589, 3673), False, 'from dolo.numeric.solver import solver\n'), ((3782, 3894), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0', 's_lb', 's_ub'], {'jac': 'serial_fun_serial_jac', 'method': '"""ncpsolve"""', 'serial_problem': '(True)'}), "(serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_serial_jac, method=\n 'ncpsolve', serial_problem=True)\n", (3788, 3894), False, 'from dolo.numeric.solver import solver\n'), ((3976, 4062), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0', 's_lb', 's_ub'], {'method': '"""ncpsolve"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, s_lb, s_ub, method='ncpsolve', serial_problem=\n False)\n", (3982, 4062), False, 'from dolo.numeric.solver import solver\n'), ((4156, 4267), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0', 's_lb', 's_ub'], {'jac': 'serial_fun_full_jac', 'method': '"""ncpsolve"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_full_jac, method=\n 'ncpsolve', serial_problem=False)\n", (4162, 4267), False, 'from dolo.numeric.solver import solver\n'), ((4369, 4435), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'method': '"""newton"""', 'serial_problem': '(True)'}), "(serial_fun_val, s_x0, method='newton', serial_problem=True)\n", (4375, 4435), False, 'from dolo.numeric.solver import solver\n'), ((4524, 4591), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'method': '"""fsolve"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, method='fsolve', serial_problem=False)\n", (4530, 4591), False, 'from dolo.numeric.solver import solver\n'), ((4686, 4782), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'jac': 'serial_fun_full_jac', 'method': '"""fsolve"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, jac=serial_fun_full_jac, method='fsolve',\n serial_problem=False)\n", (4692, 4782), False, 'from dolo.numeric.solver import solver\n'), ((4855, 4950), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'jac': 'serial_fun_full_jac', 'method': '"""lmmcp"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, jac=serial_fun_full_jac, method='lmmcp',\n serial_problem=False)\n", (4861, 4950), False, 'from dolo.numeric.solver import solver\n'), ((2614, 2630), 'numpy.zeros', 'np.zeros', (['(d, N)'], {}), '((d, N))\n', (2622, 2630), True, 'import numpy as np\n'), ((2688, 2707), 'numpy.zeros', 'np.zeros', (['(d, d, N)'], {}), '((d, d, N))\n', (2696, 2707), True, 'import numpy as np\n'), ((2764, 2786), 'numpy.zeros', 'np.zeros', (['(d, N, d, N)'], {}), '((d, N, d, N))\n', (2772, 2786), True, 'import numpy as np\n')]
|
from unittest.mock import MagicMock
import google.protobuf.text_format as text_format
import numpy as np
from banditpylib.bandits import CvarReward
from banditpylib.data_pb2 import Actions, Context
from .ts import ThompsonSampling
class TestThompsonSampling:
"""Test thompson sampling policy"""
def test_simple_run(self):
revenues = np.array([0, 0.7, 0.8, 0.9, 1.0])
horizon = 100
reward = CvarReward(0.7)
learner = ThompsonSampling(revenues=revenues,
horizon=horizon,
reward=reward)
# Test warm start
learner.reset()
assert learner.actions(Context()).SerializeToString() == text_format.Parse(
"""
arm_pulls {
arm {
set {
id: 1
}
}
times: 1
}
""", Actions()).SerializeToString()
learner.reset()
# pylint: disable=protected-access
learner._ThompsonSampling__within_warm_start = MagicMock(
return_value=False)
mock_preference_params = np.array([1, 1, 1, 1, 1])
learner._ThompsonSampling__correlated_sampling = MagicMock(
return_value=mock_preference_params)
assert learner.actions(Context()).SerializeToString() == text_format.Parse(
"""
arm_pulls {
arm {
set {
id: 1
id: 2
id: 3
id: 4
}
}
times: 1
}
""", Actions()).SerializeToString()
|
[
"banditpylib.bandits.CvarReward",
"unittest.mock.MagicMock",
"numpy.array",
"banditpylib.data_pb2.Context",
"banditpylib.data_pb2.Actions"
] |
[((346, 379), 'numpy.array', 'np.array', (['[0, 0.7, 0.8, 0.9, 1.0]'], {}), '([0, 0.7, 0.8, 0.9, 1.0])\n', (354, 379), True, 'import numpy as np\n'), ((411, 426), 'banditpylib.bandits.CvarReward', 'CvarReward', (['(0.7)'], {}), '(0.7)\n', (421, 426), False, 'from banditpylib.bandits import CvarReward\n'), ((972, 1001), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (981, 1001), False, 'from unittest.mock import MagicMock\n'), ((1040, 1065), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1])\n', (1048, 1065), True, 'import numpy as np\n'), ((1119, 1165), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'mock_preference_params'}), '(return_value=mock_preference_params)\n', (1128, 1165), False, 'from unittest.mock import MagicMock\n'), ((641, 650), 'banditpylib.data_pb2.Context', 'Context', ([], {}), '()\n', (648, 650), False, 'from banditpylib.data_pb2 import Actions, Context\n'), ((830, 839), 'banditpylib.data_pb2.Actions', 'Actions', ([], {}), '()\n', (837, 839), False, 'from banditpylib.data_pb2 import Actions, Context\n'), ((1202, 1211), 'banditpylib.data_pb2.Context', 'Context', ([], {}), '()\n', (1209, 1211), False, 'from banditpylib.data_pb2 import Actions, Context\n'), ((1445, 1454), 'banditpylib.data_pb2.Actions', 'Actions', ([], {}), '()\n', (1452, 1454), False, 'from banditpylib.data_pb2 import Actions, Context\n')]
|
import numpy
from xoppy_dabax_util import bragg_calc2
from run_diff_pat import run_diff_pat
from srxraylib.plot.gol import plot
if __name__ == "__main__":
descriptor = 'YB66'
SCANFROM = 0 # in microradiants
SCANTO = 100 # in microradiants
MILLER_INDEX_H = 4
MILLER_INDEX_K = 0
MILLER_INDEX_L = 0
TEMPER = 1.0
ENERGY = 8040.0
SCANPOINTS = 200
print("Using crystal descriptor: ",descriptor)
bragg_dictionary = bragg_calc2(descriptor=descriptor,
hh=MILLER_INDEX_H,kk=MILLER_INDEX_K,ll=MILLER_INDEX_L,
temper=TEMPER,
emin=ENERGY-100.0,emax=ENERGY+100.0,
estep=(SCANTO-SCANFROM)/SCANPOINTS,fileout="xcrystal.bra")
run_diff_pat(
MOSAIC = 0,
GEOMETRY = 0,
SCAN = 2,
UNIT = 1,
SCANFROM = SCANFROM,
SCANTO = SCANTO,
SCANPOINTS = SCANPOINTS,
ENERGY = ENERGY,
ASYMMETRY_ANGLE = 0.0,
THICKNESS = 0.7,
MOSAIC_FWHM = 0.1,
RSAG = 125.0,
RMER = 1290.0,
ANISOTROPY = 0,
POISSON = 0.22,
CUT = "2 -1 -1 ; 1 1 1 ; 0 0 0",
FILECOMPLIANCE = "mycompliance.dat")
a = numpy.loadtxt("diff_pat.dat",skiprows=5)
#
# plot
#
plot(a[:, 0], a[:, -1])
|
[
"xoppy_dabax_util.bragg_calc2",
"numpy.loadtxt",
"run_diff_pat.run_diff_pat",
"srxraylib.plot.gol.plot"
] |
[((457, 679), 'xoppy_dabax_util.bragg_calc2', 'bragg_calc2', ([], {'descriptor': 'descriptor', 'hh': 'MILLER_INDEX_H', 'kk': 'MILLER_INDEX_K', 'll': 'MILLER_INDEX_L', 'temper': 'TEMPER', 'emin': '(ENERGY - 100.0)', 'emax': '(ENERGY + 100.0)', 'estep': '((SCANTO - SCANFROM) / SCANPOINTS)', 'fileout': '"""xcrystal.bra"""'}), "(descriptor=descriptor, hh=MILLER_INDEX_H, kk=MILLER_INDEX_K, ll\n =MILLER_INDEX_L, temper=TEMPER, emin=ENERGY - 100.0, emax=ENERGY + \n 100.0, estep=(SCANTO - SCANFROM) / SCANPOINTS, fileout='xcrystal.bra')\n", (468, 679), False, 'from xoppy_dabax_util import bragg_calc2\n'), ((840, 1151), 'run_diff_pat.run_diff_pat', 'run_diff_pat', ([], {'MOSAIC': '(0)', 'GEOMETRY': '(0)', 'SCAN': '(2)', 'UNIT': '(1)', 'SCANFROM': 'SCANFROM', 'SCANTO': 'SCANTO', 'SCANPOINTS': 'SCANPOINTS', 'ENERGY': 'ENERGY', 'ASYMMETRY_ANGLE': '(0.0)', 'THICKNESS': '(0.7)', 'MOSAIC_FWHM': '(0.1)', 'RSAG': '(125.0)', 'RMER': '(1290.0)', 'ANISOTROPY': '(0)', 'POISSON': '(0.22)', 'CUT': '"""2 -1 -1 ; 1 1 1 ; 0 0 0"""', 'FILECOMPLIANCE': '"""mycompliance.dat"""'}), "(MOSAIC=0, GEOMETRY=0, SCAN=2, UNIT=1, SCANFROM=SCANFROM,\n SCANTO=SCANTO, SCANPOINTS=SCANPOINTS, ENERGY=ENERGY, ASYMMETRY_ANGLE=\n 0.0, THICKNESS=0.7, MOSAIC_FWHM=0.1, RSAG=125.0, RMER=1290.0,\n ANISOTROPY=0, POISSON=0.22, CUT='2 -1 -1 ; 1 1 1 ; 0 0 0',\n FILECOMPLIANCE='mycompliance.dat')\n", (852, 1151), False, 'from run_diff_pat import run_diff_pat\n'), ((1315, 1356), 'numpy.loadtxt', 'numpy.loadtxt', (['"""diff_pat.dat"""'], {'skiprows': '(5)'}), "('diff_pat.dat', skiprows=5)\n", (1328, 1356), False, 'import numpy\n'), ((1384, 1407), 'srxraylib.plot.gol.plot', 'plot', (['a[:, 0]', 'a[:, -1]'], {}), '(a[:, 0], a[:, -1])\n', (1388, 1407), False, 'from srxraylib.plot.gol import plot\n')]
|
import os
import datetime
import math
import traceback
from typing import List
import requests
from loguru import logger
from lxml import etree
from siphon.catalog import TDSCatalog
from dask.utils import memory_repr
import numpy as np
from dateutil import parser
from ooi_harvester.settings import harvest_settings
def estimate_size_and_time(raw):
m = ""
if "requestUUID" in raw:
est_size = raw["sizeCalculation"] / 1024 ** 2
size_txt = "MB"
if (est_size / 1024) >= 1.0:
est_size = est_size / 1024
size_txt = "GB"
est_time = raw["timeCalculation"]
time_txt = "Seconds"
if (est_time / 60) >= 1.0 and (est_time / 60) < 60.0:
est_time = math.floor(est_time / 60)
time_txt = "Minutes"
if est_time == 1:
time_txt = "Minute"
elif (est_time / 60) >= 60.0:
est_time = math.floor(est_time / 60 ** 2)
time_txt = "Hours"
if est_time == 1:
time_txt = "Hour"
m = f"""
Estimated File size: {est_size:.4} {size_txt}
Estimated Time: {est_time} {time_txt}
"""
elif "message" in raw:
m = f"""
No estimate calculated.
{raw['message']['status']}
"""
return m
def parse_uframe_response(resp):
if "allURLs" in resp:
return {
"request_id": resp["requestUUID"],
"thredds_catalog": resp["allURLs"][0],
"download_catalog": resp["allURLs"][1],
"status_url": resp["allURLs"][1] + "/status.txt",
"data_size": resp["sizeCalculation"],
"estimated_time": resp["timeCalculation"],
"units": {
"data_size": "bytes",
"estimated_time": "seconds",
"request_dt": "UTC",
},
"request_dt": datetime.datetime.utcnow().isoformat(),
}
logger.warning(resp)
return None
def param_change(name):
"""
Method to accomodate for param change.
https://oceanobservatories.org/renaming-data-stream-parameters/
"""
if name == 'pressure_depth':
return 'pressure'
else:
return name
def parse_param_dict(param_dict):
unit = None
if "unit" in param_dict:
if isinstance(param_dict["unit"], dict):
if "value" in param_dict["unit"]:
unit = param_dict["unit"]["value"]
product_type = None
if "data_product_type" in param_dict:
if isinstance(param_dict["data_product_type"], dict):
if "value" in param_dict["data_product_type"]:
product_type = param_dict["data_product_type"]["value"]
return {
"pid": param_dict["id"],
"reference_designator": param_change(param_dict["name"]),
"parameter_name": param_dict["display_name"],
"netcdf_name": param_dict["netcdf_name"],
"standard_name": param_dict["standard_name"],
"description": param_dict["description"],
"unit": unit,
"data_level": param_dict['data_level'],
"data_product_type": product_type,
"data_product_identifier": param_dict["data_product_identifier"],
"last_updated": datetime.datetime.utcnow().isoformat(),
}
def parse_global_range_dataframe(global_ranges):
"""Cleans up the global ranges dataframe"""
global_df = global_ranges[global_ranges.columns[:-3]]
global_df.columns = [
"reference_designator",
"parameter_id_r",
"parameter_id_t",
"global_range_min",
"global_range_max",
"data_level",
"units",
]
return global_df
def get_bytes(value, unit):
bytes_map = {
'bytes': 1,
'Kbytes': 1024 ** 1,
'Mbytes': 1024 ** 2,
'Gbytes': 1024 ** 3,
}
return value * bytes_map[unit]
def parse_dataset_element(d, namespace):
dataset_dict = {}
for i in d.getiterator():
clean_tag = i.tag.replace('{' + namespace + '}', '')
if clean_tag == 'dataset':
dataset_dict = dict(**i.attrib)
if clean_tag == 'dataSize':
dataset_dict = dict(
data_size=float(i.text), **i.attrib, **dataset_dict
)
dataset_dict = dict(
size_bytes=get_bytes(
dataset_dict['data_size'], dataset_dict['units']
),
**dataset_dict,
)
if clean_tag == 'date':
dataset_dict = dict(date_modified=i.text, **dataset_dict)
return dataset_dict
def parse_response_thredds(response):
stream_name = response['stream']['table_name']
catalog = TDSCatalog(
response['result']['thredds_catalog'].replace('.html', '.xml')
)
catalog_dict = {
'stream_name': stream_name,
'catalog_url': catalog.catalog_url,
'base_tds_url': catalog.base_tds_url,
'async_url': response['result']['download_catalog'],
}
req = requests.get(catalog.catalog_url)
catalog_root = etree.fromstring(req.content)
namespaces = {}
for k, v in catalog_root.nsmap.items():
if k is None:
namespaces['cat'] = v
else:
namespaces[k] = v
dataset_elements = catalog_root.xpath(
'/cat:catalog/cat:dataset/cat:dataset', namespaces=namespaces
)
datasets = [
parse_dataset_element(i, namespaces['cat']) for i in dataset_elements
]
catalog_dict['datasets'] = datasets
return catalog_dict
def filter_and_parse_datasets(cat):
import re
stream_cat = cat.copy()
name = stream_cat['stream_name']
provenance_files = []
filtered_datasets = []
for d in stream_cat['datasets']:
m = re.search(
r'(deployment(\d{4})_(%s)_(\d{4}\d{2}\d{2}T\d+.\d+)-(\d{4}\d{2}\d{2}T\d+.\d+).nc)' # noqa
% (name),
str(d['name']),
)
prov = re.search(
r'(deployment(\d{4})_(%s)_aggregate_provenance.json)' % (name),
str(d['name']),
)
if m:
_, dep_num, _, start, end = m.groups()
dataset = dict(
deployment=int(dep_num), start_ts=start, end_ts=end, **d
)
filtered_datasets.append(dataset)
elif prov:
_, dep_num, _ = prov.groups()
provenance = dict(deployment=int(dep_num), **d)
provenance_files.append(provenance)
total_bytes = np.sum([d['size_bytes'] for d in filtered_datasets])
stream_cat['datasets'] = filtered_datasets
stream_cat['provenance'] = provenance_files
stream_cat['total_data_size'] = memory_repr(total_bytes)
stream_cat['total_data_bytes'] = total_bytes
return stream_cat
def filter_datasets_by_time(
datasets: List[dict], start_dt: np.datetime64, end_dt: np.datetime64
) -> List[dict]:
"""
Filters datasets collection based on the given start and end datetime.
Each dataset dictionary in the collection MUST have
`start_ts` and `end_ts`key in it.
Parameters
----------
datasets : list
The datasets collection to be filtered.
start_dt : np.datetime64
The start datetime desired.
end_dt : np.datetime64
The end datetime desired.
Returns
-------
list
The filtered datasets collection
"""
filtered_datasets = []
for d in datasets:
start_d = np.datetime64(parser.parse(d['start_ts']))
end_d = np.datetime64(parser.parse(d['end_ts']))
if start_d >= start_dt.astype(
start_d.dtype
) and end_d <= end_dt.astype(start_d.dtype):
filtered_datasets.append(d)
return filtered_datasets
def setup_etl(stream, source='ooinet', target_bucket='s3://ooi-data'):
name = stream['stream_name']
harvest_location = os.path.expanduser('~/.ooi-harvester')
# Setup Local temp folder for netcdf
temp_fold = os.path.join(harvest_location, name)
if not os.path.exists(os.path.dirname(temp_fold)):
os.mkdir(os.path.dirname(temp_fold))
if not os.path.exists(temp_fold):
os.mkdir(temp_fold)
# Setup S3 Bucket
temp_s3_fold = f"s3://temp-ooi-data/{name}.zarr"
final_s3_fold = f"{target_bucket}/{name}"
if source == 'ooinet':
retrieved_dt = stream['result']['request_dt']
else:
retrieved_dt = stream['retrieved_dt']
del stream['retrieved_dt']
return dict(
temp_fold=temp_fold,
temp_bucket=temp_s3_fold,
final_bucket=final_s3_fold,
retrieved_dt=retrieved_dt,
**stream,
)
def seconds_to_date(num):
start_dt = datetime.datetime(1900, 1, 1)
return start_dt + datetime.timedelta(seconds=num)
def get_storage_options(path):
if path.startswith("s3://"):
return harvest_settings.storage_options.aws.dict()
def get_items(keys, orig_dict):
new_dict = {}
for k, v in orig_dict.items():
if k in keys:
new_dict[k] = v
return new_dict
def rename_item(old_key, new_key, orig_dict):
new_dict = orig_dict.copy()
if old_key in new_dict:
new_dict.update({new_key: new_dict[old_key]})
del new_dict[old_key]
return new_dict
def parse_exception(exception):
exc_dict = {
'type': type(exception).__name__,
'value': str(exception),
'traceback': "".join(
traceback.format_exception(
type(exception), exception, exception.__traceback__
)
),
}
return exc_dict
|
[
"datetime.datetime",
"os.path.exists",
"dask.utils.memory_repr",
"dateutil.parser.parse",
"math.floor",
"datetime.datetime.utcnow",
"loguru.logger.warning",
"os.path.join",
"requests.get",
"numpy.sum",
"os.path.dirname",
"os.mkdir",
"lxml.etree.fromstring",
"ooi_harvester.settings.harvest_settings.storage_options.aws.dict",
"datetime.timedelta",
"os.path.expanduser"
] |
[((1942, 1962), 'loguru.logger.warning', 'logger.warning', (['resp'], {}), '(resp)\n', (1956, 1962), False, 'from loguru import logger\n'), ((5007, 5040), 'requests.get', 'requests.get', (['catalog.catalog_url'], {}), '(catalog.catalog_url)\n', (5019, 5040), False, 'import requests\n'), ((5060, 5089), 'lxml.etree.fromstring', 'etree.fromstring', (['req.content'], {}), '(req.content)\n', (5076, 5089), False, 'from lxml import etree\n'), ((6488, 6540), 'numpy.sum', 'np.sum', (["[d['size_bytes'] for d in filtered_datasets]"], {}), "([d['size_bytes'] for d in filtered_datasets])\n", (6494, 6540), True, 'import numpy as np\n'), ((6672, 6696), 'dask.utils.memory_repr', 'memory_repr', (['total_bytes'], {}), '(total_bytes)\n', (6683, 6696), False, 'from dask.utils import memory_repr\n'), ((7861, 7899), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.ooi-harvester"""'], {}), "('~/.ooi-harvester')\n", (7879, 7899), False, 'import os\n'), ((7958, 7994), 'os.path.join', 'os.path.join', (['harvest_location', 'name'], {}), '(harvest_location, name)\n', (7970, 7994), False, 'import os\n'), ((8675, 8704), 'datetime.datetime', 'datetime.datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (8692, 8704), False, 'import datetime\n'), ((8107, 8132), 'os.path.exists', 'os.path.exists', (['temp_fold'], {}), '(temp_fold)\n', (8121, 8132), False, 'import os\n'), ((8142, 8161), 'os.mkdir', 'os.mkdir', (['temp_fold'], {}), '(temp_fold)\n', (8150, 8161), False, 'import os\n'), ((8727, 8758), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'num'}), '(seconds=num)\n', (8745, 8758), False, 'import datetime\n'), ((8840, 8883), 'ooi_harvester.settings.harvest_settings.storage_options.aws.dict', 'harvest_settings.storage_options.aws.dict', ([], {}), '()\n', (8881, 8883), False, 'from ooi_harvester.settings import harvest_settings\n'), ((732, 757), 'math.floor', 'math.floor', (['(est_time / 60)'], {}), '(est_time / 60)\n', (742, 757), False, 'import math\n'), ((7458, 7485), 'dateutil.parser.parse', 'parser.parse', (["d['start_ts']"], {}), "(d['start_ts'])\n", (7470, 7485), False, 'from dateutil import parser\n'), ((7517, 7542), 'dateutil.parser.parse', 'parser.parse', (["d['end_ts']"], {}), "(d['end_ts'])\n", (7529, 7542), False, 'from dateutil import parser\n'), ((8021, 8047), 'os.path.dirname', 'os.path.dirname', (['temp_fold'], {}), '(temp_fold)\n', (8036, 8047), False, 'import os\n'), ((8067, 8093), 'os.path.dirname', 'os.path.dirname', (['temp_fold'], {}), '(temp_fold)\n', (8082, 8093), False, 'import os\n'), ((918, 948), 'math.floor', 'math.floor', (['(est_time / 60 ** 2)'], {}), '(est_time / 60 ** 2)\n', (928, 948), False, 'import math\n'), ((3239, 3265), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3263, 3265), False, 'import datetime\n'), ((1888, 1914), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1912, 1914), False, 'import datetime\n')]
|
import numpy as np
import pandas as pd
import torch
from physics.protein_os import Protein
import options
from utils import write_pdb, write_pdb_sample, transform_profile, load_protein
from physics.anneal import AnnealCoords, AnnealFrag
# from physics.move import SampleICNext
from physics.grad_minimizer import *
from physics.dynamics import *
import os
import mdtraj as md
from utils import test_setup
import h5py
#################################################
parser = options.get_fold_parser()
args = options.parse_args_and_arch(parser)
device, model, energy_fn, ProteinBase = test_setup(args)
# position_weights = torch.zeros((1, args.seq_len + 1), device=device)
# position_weights[:, 0:5] = 1
# energy_fn.energy_fn.position_weights = position_weights
#################################################
data_path = 'data/fold/cullpdb_val_deep'
protein_sample = pd.read_csv(f'{data_path}/sample.csv')
pdb_selected = protein_sample['pdb'].values
np.random.shuffle(pdb_selected)
fold_engine = args.fold_engine
mode = args.mode
# sample_ic = SampleICNext(mode)
exp_id = args.load_exp[-5:]
save_dir = args.save_dir
# if not os.path.exists(f'data/fold/{exp_id}'):
# os.mkdir(f'data/fold/{exp_id}')
if not os.path.exists(f'data/fold/{save_dir}'):
os.mkdir(f'data/fold/{save_dir}')
for pdb_id in pdb_selected:
seq, coords_native, profile = load_protein(data_path, pdb_id, mode, device, args)
protein_native = Protein(seq, coords_native, profile)
energy_native = protein_native.get_energy(energy_fn).item()
print('energy_native:', energy_native)
rg2, collision = protein_native.get_rad_gyration(coords_native)
print('native radius of gyration square:', rg2.item())
# residue_energy = protein_native.get_residue_energy(energy_fn)
# print(residue_energy)
# write_pdb(seq, coords_native, pdb_id, 'native', exp_id)
protein = Protein(seq, coords_native.clone(), profile.clone())
if args.random_init:
# random_coords_int = sample_ic.random_coords_int(len(seq)-3).to(device)
# protein.update_coords_internal(random_coords_int)
# extend_coords_int = torch.tensor([[5.367, 1.6, 0.0]], device=device).repeat((len(seq)-3, 1))
extend_coords_int = torch.tensor([[5.367, 0.1, 0.0]], device=device).repeat((len(seq)-3, 1))
protein.update_coords_internal(extend_coords_int)
protein.update_cartesian_from_internal()
coords_init = protein.coords
energy_init = protein.get_energy(energy_fn).item()
print('energy_init:', energy_init)
# write_pdb(seq, coords_init, pdb_id, f'init_{mode}', exp_id)
if fold_engine == 'anneal':
# simulated annealing
torch.set_grad_enabled(False)
if args.anneal_type == 'int_one':
annealer = AnnealCoords(energy_fn, protein, mode=mode, ic_move_std=args.ic_move_std,
T_max=args.T_max, T_min=args.T_min, L=args.L)
elif args.anneal_type == 'frag':
frag_file = h5py.File(f'data/fragment/{pdb_id}/{pdb_id}_int.h5', 'r')
query_pos = torch.tensor(frag_file['query_pos'][()], device=device)
frag_int = torch.tensor(frag_file['coords_int'][()], device=device)
annealer = AnnealFrag(energy_fn, protein, frag=(query_pos, frag_int), use_rg=args.use_rg,
T_max=args.T_max, T_min=args.T_min, L=args.L)
else:
raise ValueError('anneal_type should be int_one / frag.')
annealer.run()
coords_best = annealer.x_best
energy_best = annealer.energy_best
sample = annealer.sample
sample_energy = annealer.sample_energy
elif fold_engine == 'grad':
if args.x_type == 'cart':
minimizer = GradMinimizerCartesian(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'internal':
minimizer = GradMinimizerInternal(energy_fn, protein, lr=args.lr, num_steps=args.L, momentum=0.0)
elif args.x_type == 'int_fast':
minimizer = GradMinimizerIntFast(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'mixed':
minimizer = GradMinimizerMixed(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'mix_fast':
minimizer = GradMinimizerMixFast(energy_fn, protein, lr=args.lr, num_steps=args.L)
else:
raise ValueError('x_type should be cart / internal / mixed / int_fast / mix_fast.')
minimizer.run()
coords_best = minimizer.x_best
energy_best = minimizer.energy_best
sample = minimizer.sample
sample_energy = minimizer.sample_energy
elif fold_engine == 'dynamics':
if args.x_type == 'cart':
minimizer = Dynamics(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'internal':
minimizer = DynamicsInternal(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'int_fast':
minimizer = DynamicsIntFast(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'mixed':
minimizer = DynamicsMixed(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'mix_fast':
minimizer = DynamicsMixFast(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
else:
raise ValueError('x_type should be cart / internal / mixed / int_fast / mix_fast.')
minimizer.run()
coords_best = minimizer.x_best
energy_best = minimizer.energy_best
sample = minimizer.sample
sample_energy = minimizer.sample_energy
else:
raise ValueError('fold_engine should be anneal / grad / dynamics')
# protein.update_coords(coords_best)
# residue_energy = protein.get_residue_energy(energy_fn)
# print(residue_energy)
# write_pdb(seq, coords_best, pdb_id, f'best_{mode}', exp_id)
# save sampled structures
sample = [coords_native.cpu(), coords_best.cpu(), coords_init.cpu()] + sample
sample_energy = [energy_native, energy_best, energy_init] + sample_energy
# write_pdb_sample(seq, sample, pdb_id, 'sample', exp_id)
# pd.DataFrame({'sample_energy': sample_energy}).to_csv(f'data/fold/{exp_id}/{pdb_id}_energy.csv', index=False)
write_pdb_sample(seq, sample, pdb_id, 'sample', save_dir)
# compute RMSD,
sample_xyz = torch.stack(sample, 0).cpu().detach().numpy()
print(sample_xyz.shape)
t = md.Trajectory(xyz=sample_xyz, topology=None)
t = t.superpose(t, frame=0)
write_pdb_sample(seq, t.xyz, pdb_id, 'sample2', save_dir)
sample_rmsd = md.rmsd(t, t, frame=0) # computation will change sample_xyz;
print(f'best RMSD: {sample_rmsd[1]}')
df = pd.DataFrame({'sample_energy': sample_energy,
'sample_rmsd': sample_rmsd})
df.to_csv(f'data/fold/{save_dir}/{pdb_id}_energy.csv', index=False)
|
[
"pandas.read_csv",
"mdtraj.Trajectory",
"os.path.exists",
"physics.anneal.AnnealCoords",
"os.mkdir",
"pandas.DataFrame",
"mdtraj.rmsd",
"utils.write_pdb_sample",
"physics.protein_os.Protein",
"utils.load_protein",
"options.parse_args_and_arch",
"h5py.File",
"utils.test_setup",
"torch.stack",
"physics.anneal.AnnealFrag",
"torch.tensor",
"options.get_fold_parser",
"torch.set_grad_enabled",
"numpy.random.shuffle"
] |
[((477, 502), 'options.get_fold_parser', 'options.get_fold_parser', ([], {}), '()\n', (500, 502), False, 'import options\n'), ((510, 545), 'options.parse_args_and_arch', 'options.parse_args_and_arch', (['parser'], {}), '(parser)\n', (537, 545), False, 'import options\n'), ((587, 603), 'utils.test_setup', 'test_setup', (['args'], {}), '(args)\n', (597, 603), False, 'from utils import test_setup\n'), ((875, 913), 'pandas.read_csv', 'pd.read_csv', (['f"""{data_path}/sample.csv"""'], {}), "(f'{data_path}/sample.csv')\n", (886, 913), True, 'import pandas as pd\n'), ((958, 989), 'numpy.random.shuffle', 'np.random.shuffle', (['pdb_selected'], {}), '(pdb_selected)\n', (975, 989), True, 'import numpy as np\n'), ((1218, 1257), 'os.path.exists', 'os.path.exists', (['f"""data/fold/{save_dir}"""'], {}), "(f'data/fold/{save_dir}')\n", (1232, 1257), False, 'import os\n'), ((1263, 1296), 'os.mkdir', 'os.mkdir', (['f"""data/fold/{save_dir}"""'], {}), "(f'data/fold/{save_dir}')\n", (1271, 1296), False, 'import os\n'), ((1362, 1413), 'utils.load_protein', 'load_protein', (['data_path', 'pdb_id', 'mode', 'device', 'args'], {}), '(data_path, pdb_id, mode, device, args)\n', (1374, 1413), False, 'from utils import write_pdb, write_pdb_sample, transform_profile, load_protein\n'), ((1436, 1472), 'physics.protein_os.Protein', 'Protein', (['seq', 'coords_native', 'profile'], {}), '(seq, coords_native, profile)\n', (1443, 1472), False, 'from physics.protein_os import Protein\n'), ((6396, 6453), 'utils.write_pdb_sample', 'write_pdb_sample', (['seq', 'sample', 'pdb_id', '"""sample"""', 'save_dir'], {}), "(seq, sample, pdb_id, 'sample', save_dir)\n", (6412, 6453), False, 'from utils import write_pdb, write_pdb_sample, transform_profile, load_protein\n'), ((6574, 6618), 'mdtraj.Trajectory', 'md.Trajectory', ([], {'xyz': 'sample_xyz', 'topology': 'None'}), '(xyz=sample_xyz, topology=None)\n', (6587, 6618), True, 'import mdtraj as md\n'), ((6655, 6712), 'utils.write_pdb_sample', 'write_pdb_sample', (['seq', 't.xyz', 'pdb_id', '"""sample2"""', 'save_dir'], {}), "(seq, t.xyz, pdb_id, 'sample2', save_dir)\n", (6671, 6712), False, 'from utils import write_pdb, write_pdb_sample, transform_profile, load_protein\n'), ((6731, 6753), 'mdtraj.rmsd', 'md.rmsd', (['t', 't'], {'frame': '(0)'}), '(t, t, frame=0)\n', (6738, 6753), True, 'import mdtraj as md\n'), ((6845, 6919), 'pandas.DataFrame', 'pd.DataFrame', (["{'sample_energy': sample_energy, 'sample_rmsd': sample_rmsd}"], {}), "({'sample_energy': sample_energy, 'sample_rmsd': sample_rmsd})\n", (6857, 6919), True, 'import pandas as pd\n'), ((2675, 2704), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (2697, 2704), False, 'import torch\n'), ((2770, 2893), 'physics.anneal.AnnealCoords', 'AnnealCoords', (['energy_fn', 'protein'], {'mode': 'mode', 'ic_move_std': 'args.ic_move_std', 'T_max': 'args.T_max', 'T_min': 'args.T_min', 'L': 'args.L'}), '(energy_fn, protein, mode=mode, ic_move_std=args.ic_move_std,\n T_max=args.T_max, T_min=args.T_min, L=args.L)\n', (2782, 2893), False, 'from physics.anneal import AnnealCoords, AnnealFrag\n'), ((2230, 2278), 'torch.tensor', 'torch.tensor', (['[[5.367, 0.1, 0.0]]'], {'device': 'device'}), '([[5.367, 0.1, 0.0]], device=device)\n', (2242, 2278), False, 'import torch\n'), ((2991, 3048), 'h5py.File', 'h5py.File', (['f"""data/fragment/{pdb_id}/{pdb_id}_int.h5"""', '"""r"""'], {}), "(f'data/fragment/{pdb_id}/{pdb_id}_int.h5', 'r')\n", (3000, 3048), False, 'import h5py\n'), ((3073, 3128), 'torch.tensor', 'torch.tensor', (["frag_file['query_pos'][()]"], {'device': 'device'}), "(frag_file['query_pos'][()], device=device)\n", (3085, 3128), False, 'import torch\n'), ((3152, 3208), 'torch.tensor', 'torch.tensor', (["frag_file['coords_int'][()]"], {'device': 'device'}), "(frag_file['coords_int'][()], device=device)\n", (3164, 3208), False, 'import torch\n'), ((3232, 3361), 'physics.anneal.AnnealFrag', 'AnnealFrag', (['energy_fn', 'protein'], {'frag': '(query_pos, frag_int)', 'use_rg': 'args.use_rg', 'T_max': 'args.T_max', 'T_min': 'args.T_min', 'L': 'args.L'}), '(energy_fn, protein, frag=(query_pos, frag_int), use_rg=args.\n use_rg, T_max=args.T_max, T_min=args.T_min, L=args.L)\n', (3242, 3361), False, 'from physics.anneal import AnnealCoords, AnnealFrag\n'), ((6492, 6514), 'torch.stack', 'torch.stack', (['sample', '(0)'], {}), '(sample, 0)\n', (6503, 6514), False, 'import torch\n')]
|
import numpy as np
from .common import *
from . import rotation
def to_homogeneous(x):
x = np.asarray(x)
o = np.ones_like(x[..., :1])
return np.concatenate([x, o], axis=-1)
def from_homogeneous(x):
return x[..., :-1] / x[..., -1:]
def compose(r, t, rtype, out=None):
if out is None:
shape = tuple(np.shape(t)[:-1]) + (4, 4)
out = np.zeros(shape, dtype=t.dtype)
rtype.to_matrix(r, out=out[..., :3, :3])
out[..., :3, 3:] = t.reshape(out[...,:3,3:].shape)
return out
def translation_from_matrix(T):
return T[..., :3, 3]
def rotation_from_matrix(T):
return T[..., :3, :3]
def rotation_2d(x, R=None, c=None, s=None):
if R is None:
shape = tuple(np.shape(x)[:-1]) + (2, 2)
R = np.zeros(shape, dtype=x.dtype)
if c is None:
c = np.cos(x)
if s is None:
s = np.sin(x)
R[..., 0, 0] = c
R[..., 0, 1] = -s
R[..., 1, 0] = s
R[..., 1, 1] = c
return R
def Rz(x, T=None, c=None, s=None):
if T is None:
shape = tuple(np.shape(x)[:-1]) + (4, 4)
T = np.zeros(shape, dtype=np.float32)
if c is None:
c = np.cos(x)
if s is None:
s = np.sin(x)
T[..., 0, 0] = c
T[..., 0, 1] = -s
T[..., 1, 0] = s
T[..., 1, 1] = c
T[..., 2, 2] = 1
return T
def invert(T, out=None):
R = T[..., :3, :3]
t = T[..., :3, 3:]
if out is None:
out = np.zeros_like(T)
out[..., :3, :3] = R.swapaxes(-1, -2)
out[..., :3, 3:] = -np.einsum('...ba,...bc->...ac', R, t)
out[..., 3, 3] = 1
return out
def Rti(R, t):
Ri = R.swapaxes(-1, -2)
if np.ndim(t) < np.ndim(Ri):
# case (...,D)
ti = -np.einsum('...ab,...b->...a', Ri, t)
else:
# case (...,D,1)
ti = -np.einsum('...ab,...bc->...ac', Ri, t)
return Ri, ti
def lerp(a, b, w):
return (a * (1.0-w)) + (b*w)
def flerp(a, b, w, f, fi):
return fi(lerp(f(a), f(b), w))
def rlerp(ra, rb, w):
Ra = np.eye(4, dtype=np.float32)
Rb = np.eye(4, dtype=np.float32)
Ra[:3, :3] = ra
Rb[:3, :3] = rb
qa = tx.quaternion_from_matrix(Ra)
qb = tx.quaternion_from_matrix(Rb)
q = tx.quaternion_slerp(q0, q1, w)
R = tx.quaternion_matrix(q)[:3, :3]
return R
def rx3(R, x):
rx = np.einsum('...ab,...b->...a', R[..., :3, :3], x)
return rx
def tx3(T, x):
rx = np.einsum('...ab,...b->...a', T[..., :3, :3], x)
return rx + T[..., :3, 3:].swapaxes(-2, -1)
def rtx3(r, t, x):
return x.dot(r.swapaxes(-2, -1)) + t
def tx4(T, x):
return np.einsum('...ab,...b->...a', T, x)
|
[
"numpy.ones_like",
"numpy.eye",
"numpy.asarray",
"numpy.ndim",
"numpy.zeros",
"numpy.einsum",
"numpy.cos",
"numpy.concatenate",
"numpy.sin",
"numpy.shape",
"numpy.zeros_like"
] |
[((96, 109), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (106, 109), True, 'import numpy as np\n'), ((118, 142), 'numpy.ones_like', 'np.ones_like', (['x[..., :1]'], {}), '(x[..., :1])\n', (130, 142), True, 'import numpy as np\n'), ((154, 185), 'numpy.concatenate', 'np.concatenate', (['[x, o]'], {'axis': '(-1)'}), '([x, o], axis=-1)\n', (168, 185), True, 'import numpy as np\n'), ((1987, 2014), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (1993, 2014), True, 'import numpy as np\n'), ((2024, 2051), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (2030, 2051), True, 'import numpy as np\n'), ((2288, 2336), 'numpy.einsum', 'np.einsum', (['"""...ab,...b->...a"""', 'R[..., :3, :3]', 'x'], {}), "('...ab,...b->...a', R[..., :3, :3], x)\n", (2297, 2336), True, 'import numpy as np\n'), ((2376, 2424), 'numpy.einsum', 'np.einsum', (['"""...ab,...b->...a"""', 'T[..., :3, :3]', 'x'], {}), "('...ab,...b->...a', T[..., :3, :3], x)\n", (2385, 2424), True, 'import numpy as np\n'), ((2563, 2598), 'numpy.einsum', 'np.einsum', (['"""...ab,...b->...a"""', 'T', 'x'], {}), "('...ab,...b->...a', T, x)\n", (2572, 2598), True, 'import numpy as np\n'), ((369, 399), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 't.dtype'}), '(shape, dtype=t.dtype)\n', (377, 399), True, 'import numpy as np\n'), ((753, 783), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'x.dtype'}), '(shape, dtype=x.dtype)\n', (761, 783), True, 'import numpy as np\n'), ((814, 823), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (820, 823), True, 'import numpy as np\n'), ((854, 863), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (860, 863), True, 'import numpy as np\n'), ((1077, 1110), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (1085, 1110), True, 'import numpy as np\n'), ((1141, 1150), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1147, 1150), True, 'import numpy as np\n'), ((1181, 1190), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1187, 1190), True, 'import numpy as np\n'), ((1419, 1435), 'numpy.zeros_like', 'np.zeros_like', (['T'], {}), '(T)\n', (1432, 1435), True, 'import numpy as np\n'), ((1502, 1539), 'numpy.einsum', 'np.einsum', (['"""...ba,...bc->...ac"""', 'R', 't'], {}), "('...ba,...bc->...ac', R, t)\n", (1511, 1539), True, 'import numpy as np\n'), ((1630, 1640), 'numpy.ndim', 'np.ndim', (['t'], {}), '(t)\n', (1637, 1640), True, 'import numpy as np\n'), ((1643, 1654), 'numpy.ndim', 'np.ndim', (['Ri'], {}), '(Ri)\n', (1650, 1654), True, 'import numpy as np\n'), ((1693, 1729), 'numpy.einsum', 'np.einsum', (['"""...ab,...b->...a"""', 'Ri', 't'], {}), "('...ab,...b->...a', Ri, t)\n", (1702, 1729), True, 'import numpy as np\n'), ((1779, 1817), 'numpy.einsum', 'np.einsum', (['"""...ab,...bc->...ac"""', 'Ri', 't'], {}), "('...ab,...bc->...ac', Ri, t)\n", (1788, 1817), True, 'import numpy as np\n'), ((328, 339), 'numpy.shape', 'np.shape', (['t'], {}), '(t)\n', (336, 339), True, 'import numpy as np\n'), ((714, 725), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (722, 725), True, 'import numpy as np\n'), ((1038, 1049), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1046, 1049), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[1]:
get_ipython().run_cell_magic('javascript', '', '<!-- Ignore this block -->\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
# # Data preprocessing
# 1. convert any non-numeric values to numeric values.
# 2. If required drop out the rows with missing values or NA. In next lectures we will handle sparse data, which will allow us to use records with missing values.
# 3. Split the data into a train(80%) and test(20%) .
# In[2]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
from __future__ import division
import pandas as pd
import numpy as np
from math import sqrt, isnan
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
"""Set global rcParams for pyplotlib"""
plt.rcParams["figure.figsize"] = "18,25"
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value.
# In[3]:
def textEncoder(*textVectors):
lookUpDictionary = {}
lookupValue = 1
for textVector in textVectors:
for key in textVector.unique():
if key not in lookUpDictionary:
lookUpDictionary[key] = lookupValue
lookupValue +=1
return lookUpDictionary
# ### SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[4]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac=trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet,testSet
# ### generatePearsonCoefficient Procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/f76ccfa7c2ed7f5b085115086107bbe25d329cec">
# For sample:-
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bd1ccc2979b0fd1c1aec96e386f686ae874f9ec0">
# For selecting some features and for dropping others I am using Pearson's Coefficient. The value of Pearson's coefficient lies between [-1, 1] and tells how two features are related<br>
# <table>
# <tr><td>Strength of Association</td><td>Positive</td><td>Negative</td></tr><tr><td>Small</td><td>.1 to .3 </td><td>-0.1 to -0.3 </td></tr><tr><td>Medium</td><td>.3 to .5 </td><td>-0.3 to -0.5 </td></tr><tr><td>Large</td><td>.5 to 1.0 </td><td>-0.5 to -1.0 </td></tr></table>
#
# In[5]:
"""Generate pearson's coefficient"""
def generatePearsonCoefficient(A, B):
A = A - A.mean()
B = B - B.mean()
return ((A * B).sum())/(sqrt((A * A).sum()) * sqrt((B * B).sum()))
# ### predictLinearRegression Procedure
# This method performs predicts the value for Y given X and model parameters. This method will add bias to X.<br>
# The prediction is given by BX<sup>T</sup>
# In[6]:
"""Method to make prediction for yTest"""
def predictionLinearRegression(X, modelParameters):
X = np.insert(X, 0, 1, axis=1)
yPrediction = np.dot(modelParameters, X.T)
return yPrediction
# ### RMSE procedure
# Will calculate root mean squared error for given Ytrue values and YPrediction.
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/fc187c3557d633423444d4c80a4a50cd6ecc3dd4">
#
# In[7]:
"""Model accuracy estimator RMSE"""
def RMSE(yTrue, yPrediction):
n = yTrue.shape[0]
return sqrt((1.0) * np.sum(np.square((yTrue - yPrediction))))/n
# ### armijoStepLengthController proedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/ed6d74a5c23f9034a072125eeb316eee5faeed43">
# In[8]:
"""Uses armijo principle to detect next value of alpha.
Alpha values are rewritten. Passed to function just to maintain uniformity
"""
def armijoStepLengthController(fx, alpha, x, y, beta, gradient, delta, maxIterations = 1000):
alpha = 1.0
gradientSquare = np.dot(gradient, gradient)
for i in range(0, maxIterations):
alpha = alpha/2
residual_alpha_gradient = y - np.dot((beta - (alpha * gradient)), x .T)
fx_alpha_gradient = np.dot(residual_alpha_gradient.T, residual_alpha_gradient)
"""Convergence condition for armijo principle"""
if fx_alpha_gradient < fx - (alpha * delta * gradientSquare):
break;
return alpha
# ### boldDriverStepLengthController procedure
# An extension to armijo steplength controller. Retain alpha values.
# In[9]:
def boldDriverStepLengthController(fx, alpha, x, y, beta, gradient, maxIterations = 1000,
alphaMinus = 0.5, alphaPlus = 1.1):
alpha = alpha * alphaPlus
for i in range(0, maxIterations):
alpha = alpha * alphaMinus
residual_alpha_gradient = y - np.dot((beta - (alpha * gradient)), x .T)
fx_alpha_gradient = np.dot(residual_alpha_gradient.T, residual_alpha_gradient)
"""Convergence condition for bold driver method"""
if fx - fx_alpha_gradient > 0:
break;
return alpha
# ### linearRegressionGradientDescent procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/26a319f33db70a80f8c5373f4348a198a202056c">
# Calculate slope at the given point(gradient) and travel in the negative direction with provided step length.<br/>
# In[10]:
"""If no step length controller is provided then values of alpha will be taken as step length.
Else the step length controller will be used. Additional parameters to the controller are
provided by stepLengthControllerParameters"""
def linearRegressionGradientDescent(x, y, xTest, yTest, alpha, beta,
maxIterations=1000, epsilon=1.1e-20,
stepLengthController = None, stepLengthControllerParameters = None):
x = np.insert(x, 0, 1, axis=1)
x = x * 1.0
y = y * 1.0
if stepLengthController != None:
print("Warning using stepLengthController alpha values will be rewritten")
plotX = []
plotY_diff = []
plotY_RMSE = []
y_prediction = np.dot(beta, x.T)
residual = y_prediction - y
f_x = np.dot(residual.T, residual)
rmse = RMSE(yTest, predictionLinearRegression(xTest, beta))
"""For plotting graph"""
plotY_RMSE.append(rmse)
plotY_diff.append(f_x)
plotX.append(0)
for i in range(1, maxIterations):
gradient = np.dot(x.T, residual) * 2
"""Use step length controller if required"""
if stepLengthController != None:
alpha = stepLengthController(fx = f_x, alpha = alpha, x = x, y = y,
beta = beta, gradient = gradient, **stepLengthControllerParameters)
beta = beta - (alpha * gradient)
y_prediction = np.dot(beta, x.T)
residual = y_prediction - y
f_x_new = np.dot(residual.T, residual)
rmse = RMSE(yTest, predictionLinearRegression(xTest, beta))
"""For plotting graph"""
plotY_RMSE.append(rmse)
plotY_diff.append(abs(f_x_new - f_x))
plotX.append(i)
if abs(f_x - f_x_new) < epsilon:
print("Converged in " + str(i) + " iterations")
return beta, plotX, plotY_diff, plotY_RMSE, f_x, rmse
f_x = f_x_new
print("Warning algorithm failed to converge in " + str(maxIterations) + " interations")
return beta, plotX, plotY_diff, plotY_RMSE, f_x, rmse
# # Gradient descent for airlines fare data
# ### Load the airlines dataset
# In[11]:
""" File path change accordingly"""
directoryPath = "data"
airFareData = pd.read_csv(directoryPath+"/airq402.dat", sep='\s+',header = None)
airFareData.head(10)
"""Adding header"""
airFareData.columns = ["city1", "city2", "avgFare", "distance", "avgWeeklyPassengers",
"marketLeadingAirline", "marketShareLA", "averageFare", "lowPriceAirline",
"marketShareLPA", "price"]
airFareData.head()
# ### Using textEncoder to convert text data to numeric data
# In[12]:
"""Using lambda functions to replace text values based upon lockup dictionary"""
cityLookupDictionary = textEncoder(airFareData.city1, airFareData.city2)
airFareData['city1'] = airFareData.city1.apply(lambda cityName:
cityLookupDictionary[cityName])
airFareData['city2'] = airFareData.city2.apply(lambda cityName:
cityLookupDictionary[cityName])
airLineLookupDictionary = textEncoder(airFareData.lowPriceAirline, airFareData.marketLeadingAirline)
airFareData['lowPriceAirline'] = airFareData.lowPriceAirline.apply(lambda cityName:
airLineLookupDictionary[cityName])
airFareData['marketLeadingAirline'] = airFareData.marketLeadingAirline.apply(lambda cityName:
airLineLookupDictionary[cityName])
# ### Check and remove missing data
# In[13]:
airFareData.dropna(inplace = True)
airFareData.head()
# ### Check for corelation between different X and Y
# In[14]:
for column in airFareData:
if column != "price":
print("The corelation between " + column +" vs price is " +
str(generatePearsonCoefficient(airFareData[column], airFareData['price'])))
# ### Visualizing the data
# In[15]:
plt.close()
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10)) = plt.subplots(5,2,sharey='none')
ax1.plot(airFareData.city1, airFareData.price, "ro")
ax1.grid()
ax1.set_title("city1 vs price")
ax1.set_xlabel("city1")
ax1.set_ylabel("price")
ax2.plot(airFareData.city2, airFareData.price, "ro")
ax2.grid()
ax2.set_title("city2 vs price")
ax2.set_xlabel("city2")
ax2.set_ylabel("price")
ax3.plot(airFareData.avgFare, airFareData.price, "ro")
ax3.grid()
ax3.set_title("avgFare vs price")
ax3.set_xlabel("avgFare")
ax3.set_ylabel("price")
ax4.plot(airFareData.distance, airFareData.price, "ro")
ax4.grid()
ax4.set_title("distance vs price")
ax4.set_xlabel("distance")
ax4.set_ylabel("price")
ax5.plot(airFareData.avgWeeklyPassengers, airFareData.price, "ro")
ax5.grid()
ax5.set_title("avgWeeklyPassengers vs price")
ax5.set_xlabel("avgWeeklyPassengers")
ax5.set_ylabel("price")
ax6.plot(airFareData.marketLeadingAirline, airFareData.price, "ro")
ax6.grid()
ax6.set_title("marketLeadingAirline vs price")
ax6.set_xlabel("marketLeadingAirline")
ax6.set_ylabel("price")
ax7.plot(airFareData.marketShareLA, airFareData.price, "ro")
ax7.grid()
ax7.set_title("marketShareLA vs price")
ax7.set_xlabel("marketShareLA")
ax7.set_ylabel("price")
ax8.plot(airFareData.averageFare, airFareData.price, "ro")
ax8.grid()
ax8.set_title("averageFare vs price")
ax8.set_xlabel("averageFare")
ax8.set_ylabel("price")
ax9.plot(airFareData.lowPriceAirline, airFareData.price, "ro")
ax9.grid()
ax9.set_title("lowPriceAirline vs price")
ax9.set_xlabel("lowPriceAirline")
ax9.set_ylabel("price")
ax10.plot(airFareData.marketShareLPA, airFareData.price, "ro")
ax10.grid()
ax10.set_title("marketShareLPA vs price")
ax10.set_xlabel("marketShareLPA")
ax10.set_ylabel("price")
plt.show()
# By looking at pearson's coefficient we can drop city1, city2, marketLeadingAirline, lowPriceAirline as they do not have any corelation with price.
# ### Selecting the required features and splitting the dataset using splitDataSetProcedure
# In[16]:
airFareData = airFareData[['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA', 'price']]
airFareData.head()
# In[17]:
trainSet, testSet = splitDataSet(airFareData, 0.8)
print(trainSet.shape)
print(testSet.shape)
# In[18]:
trainSet.head()
# ### Running gradient descent with alpha parameter grid serach
# In[19]:
"""Setting beta constant as future comparasion will be easy"""
np.random.seed(8)
inputBeta = np.random.random_sample(7)
alpha_parameterGrid = [0.1, 1.7e-9, 1.17e-11]
X_train = trainSet.as_matrix(columns = ['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA'])
X_test = testSet.as_matrix(columns = ['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA'])
Y_train = trainSet['price']
Y_test = testSet['price']
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
axis = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
index = 0
bestModelParameters = None
bestModelX = None
bestModelY = None
leastRMSE = None
leastRSS = None
for alpha in alpha_parameterGrid:
"""No step length controller provided so normal gradient descent will be executed"""
modelParameters, X, Ydiff, Yrmse, rss, rmse = linearRegressionGradientDescent(X_train, Y_train, X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000)
"""Selecting the best model with least RMSE"""
if not(isnan(rmse)):
if leastRMSE is None or leastRMSE > rmse:
leastRMSE = rmse
bestModelParameters = modelParameters
leastRSS = rss
bestModelX = X
bestModelY = Yrmse
print("RMSE "+ str(rmse))
axis[index][0].plot(X, Ydiff)
axis[index][0].grid()
axis[index][0].set_title("Iteration vs abs(fx+1 - fx), alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("abs(fx+1 - fx)")
axis[index][1].plot(X, Yrmse)
axis[index][1].grid()
axis[index][1].set_title("Iteration vs RMSE, alpha = " + str(alpha))
axis[index][1].set_xlabel("Iterations")
axis[index][1].set_ylabel("RMSE")
index = index + 1
plt.show()
plt.close()
# ### Graph description
# <ul><li><b>Alpha = 0.1</b>
# <br>Here the alpha value is very big. Because of this instead of converging we are diverging away. Both abs(fx+1 - fx) and RMSE appear to be diverging.
# </li><li><b>Alpha = 1.7e-9</b><br>Here also the alpha value is too big. The observed effect is still the same</li><li><b>Alpha = 1.17e-11</b><br>Now alpha value is small enough for algorithm to converge. RMSE is also converging</li>
# ### Best model
# In[20]:
print("Best rmse for alpha grid is "+ str(leastRMSE))
print("Best rss for alpha grid is "+ str(leastRSS))
# ### Some sample predictions
# In[21]:
yPrediction = predictionLinearRegression(X_test,bestModelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Armijo Step Length Controller
# In[22]:
plt.close()
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
"""Set steplengthController to armijoStepLengthController and
stepLengthControllerParameters as any additional model parameters"""
modelParameters, xArmijo, ydiffArmijo, yRMSEArmijo, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
None, inputBeta, maxIterations = 1000,
stepLengthController = armijoStepLengthController,
stepLengthControllerParameters = {"delta":0.2})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xArmijo, ydiffArmijo)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xArmijo, yRMSEArmijo)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Some sample predictions
# In[23]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Bold Driver Step Length Controller
# In[24]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
"""Set steplengthController to boldDriverStepLengthController and
stepLengthControllerParameters as any additional model parameters"""
modelParameters, xBold, yDiffBold, yRMSEBold, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
1.0, inputBeta, maxIterations = 1000,
stepLengthController = boldDriverStepLengthController,
stepLengthControllerParameters =
{"alphaMinus" : 0.9, "alphaPlus" : 1.5})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xBold, yDiffBold)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xBold, yRMSEBold)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[25]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Comparasion
# In[26]:
plt.close()
plt.figure(figsize=(9,7))
plt.plot(bestModelX, bestModelY, label = "Gradient Descent")
plt.plot(xArmijo, yRMSEArmijo, label = "Gradient Descent with Armijo step length controller")
plt.plot(xBold, yRMSEBold, label = "Gradient Descent with Bold driver length controller")
plt.grid()
plt.xlabel("Iteration")
plt.ylabel("RMSE")
plt.title("Comparasion of constant steplength and variable steplength with controller")
plt.legend()
plt.show()
# # Gradient descent for wine data
# ## Load data
# I am combining both red wine and white wine data in a single dataframe
# In[27]:
"""Load redwine data and add a new feature type
type = 0 => RedWine
type = 1 => WhiteWine """
tmpFrame = pd.read_csv(directoryPath+"/winequality-red.csv", sep=";")
tmpFrame['type'] = 0
wineData = tmpFrame
tmpFrame = pd.read_csv(directoryPath+"/winequality-white.csv", sep=";")
tmpFrame['type'] = 1
wineData = pd.concat([wineData, tmpFrame])
wineData.head()
# ## All data is numeric. Checking for NA data
# In[28]:
wineData.dropna(inplace = True)
wineData.head()
# ### Check for corelation between different X and Y
# #### For red wine
# In[29]:
redWine = wineData.loc[wineData['type'] == 0]
for column in redWine:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(redWine[column], redWine['quality'])))
# #### For white wine
# In[30]:
whiteWine = wineData.loc[wineData['type'] == 1]
for column in whiteWine:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(whiteWine[column], whiteWine['quality'])))
# #### Combined
# In[31]:
for column in wineData:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(wineData[column], wineData['quality'])))
# ### Visualizing the data
# In[32]:
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10), (ax11, ax12)) = plt.subplots(6,2,
sharey='none')
figure.tight_layout()
figure.set_figheight(40)
ax1.plot(wineData['fixed acidity'], wineData.quality, "ro")
ax1.grid()
ax1.set_title("fixed acidity vs quality")
ax1.set_xlabel("fixed acidity")
ax1.set_ylabel("quality")
ax2.plot(wineData['volatile acidity'], wineData.quality, "ro")
ax2.grid()
ax2.set_title("volatile acidity vs quality")
ax2.set_xlabel("volatile acidity")
ax2.set_ylabel("quality")
ax3.plot(wineData['citric acid'], wineData.quality, "ro")
ax3.grid()
ax3.set_title("citric acid vs quality")
ax3.set_xlabel("citric acid")
ax3.set_ylabel("quality")
ax4.plot(wineData['residual sugar'], wineData.quality, "ro")
ax4.grid()
ax4.set_title("residual sugar vs quality")
ax4.set_xlabel("residual sugar")
ax4.set_ylabel("quality")
ax5.plot(wineData['chlorides'], wineData.quality, "ro")
ax5.grid()
ax5.set_title("chlorides vs quality")
ax5.set_xlabel("chlorides")
ax5.set_ylabel("quality")
ax6.plot(wineData['free sulfur dioxide'], wineData.quality, "ro")
ax6.grid()
ax6.set_title("free sulfur dioxide vs quality")
ax6.set_xlabel("free sulfur dioxide")
ax6.set_ylabel("quality")
ax7.plot(wineData['total sulfur dioxide'], wineData.quality, "ro")
ax7.grid()
ax7.set_title("total sulfur dioxide vs quality")
ax7.set_xlabel("total sulfur dioxide")
ax7.set_ylabel("quality")
ax8.plot(wineData['density'], wineData.quality, "ro")
ax8.grid()
ax8.set_title("density vs quality")
ax8.set_xlabel("density")
ax8.set_ylabel("quality")
ax9.plot(wineData['pH'], wineData.quality, "ro")
ax9.grid()
ax9.set_title("pH vs quality")
ax9.set_xlabel("pH")
ax9.set_ylabel("quality")
ax10.plot(wineData['sulphates'], wineData.quality, "ro")
ax10.grid()
ax10.set_title("sulphates vs quality")
ax10.set_xlabel("sulphates")
ax10.set_ylabel("quality")
ax11.plot(wineData['alcohol'], wineData.quality, "ro")
ax11.grid()
ax11.set_title("alcohol vs quality")
ax11.set_xlabel("alcohol")
ax11.set_ylabel("quality")
ax12.plot(wineData['type'], wineData.quality, "ro")
ax12.grid()
ax12.set_title("type vs quality")
ax12.set_xlabel("type")
ax12.set_ylabel("quality")
plt.show()
# Selected features are volatile acidity, chlorides, density, alcohol and type
# ### Split data into trainSet and testSet
# In[33]:
trainSet, testSet = splitDataSet(wineData, 0.8)
print(trainSet.shape)
print(testSet.shape)
# ### Gradient descent no step length controller
# In[34]:
np.random.seed(8)
inputBeta = np.random.random_sample(6)
alpha_parameterGrid = [0.1, 0.007, 1.34e-7]
X_train = trainSet.as_matrix(columns = ['volatile acidity', 'chlorides', 'density', 'alcohol','type'])
X_test = testSet.as_matrix(columns = ['volatile acidity', 'chlorides', 'density', 'alcohol','type'])
Y_train = trainSet['quality']
Y_test = testSet['quality']
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
axis = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
index = 0
bestModelParameters = None
bestModelX = None
bestModelY = None
leastRMSE = None
leastRSS = None
for alpha in alpha_parameterGrid:
modelParameters, X, Ydiff, Yrmse, rss, rmse = linearRegressionGradientDescent(X_train, Y_train, X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000)
if not(isnan(rmse)):
if leastRMSE is None or leastRMSE > rmse:
leastRMSE = rmse
bestModelParameters = modelParameters
leastRSS = rss
bestModelX = X
bestModelY = Yrmse
print("RMSE "+ str(rmse))
axis[index][0].plot(X, Ydiff)
axis[index][0].grid()
axis[index][0].set_title("Iteration vs abs(fx+1 - fx), alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("abs(fx+1 - fx)")
axis[index][1].plot(X, Yrmse)
axis[index][1].grid()
axis[index][1].set_title("Iteration vs RMSE, alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("RMSE")
index = index + 1
plt.show()
plt.close()
# ### Graph description
# <ul><li><b>Alpha = 0.1</b>
# <br>Here the alpha value is very big. Because of this instead of converging we are diverging away. Both abs(fx+1 - fx) and RMSE appear to be diverging.
# </li><li><b>Alpha = 0.007</b><br>Here also the alpha value is too big. The observed effect is still the same</li><li><b>Alpha = 1.34e-7</b>Now alpha value is small enough for algorithm to converge. RMSE is also converging</li>
# ### Best model
# In[35]:
print("Best rmse for alpha grid is "+ str(leastRMSE))
print("Best rss for alpha grid is "+ str(leastRSS))
# ### Sample Predictions
# In[36]:
yPrediction = predictionLinearRegression(X_test,bestModelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Armijo Step Length Controller
# In[37]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
modelParameters, xArmijo, ydiffArmijo, yRMSEArmijo, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000,
stepLengthController = armijoStepLengthController,
stepLengthControllerParameters = {"delta" : 0.2})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xArmijo, ydiffArmijo)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xArmijo, yRMSEArmijo)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[38]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Bold Driver Step Length Controller
# In[39]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
modelParameters, xBold, yDiffBold, yRMSEBold, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
1.0, inputBeta, maxIterations = 1000,
stepLengthController = boldDriverStepLengthController,
stepLengthControllerParameters =
{"alphaMinus" : 0.9, "alphaPlus" : 1.5})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(X, Ydiff)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(X, Yrmse)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[40]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Comparasion
# In[41]:
plt.close()
plt.figure(figsize=(9,7))
plt.plot(bestModelX, bestModelY, label = "Gradient Descent")
plt.plot(xArmijo, yRMSEArmijo, label = "Gradient Descent with Armijo step length controller")
plt.plot(xBold, yRMSEBold, label = "Gradient Descent with Bold driver length controller")
plt.grid()
plt.xlabel("Iteration")
plt.ylabel("RMSE")
plt.title("Comparasion of constant steplength and variable steplength with controller")
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.dot",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.random_sample",
"numpy.square",
"matplotlib.pyplot.title",
"warnings.filterwarnings",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.insert",
"matplotlib.pyplot.figure",
"pandas.concat",
"matplotlib.pyplot.subplots",
"math.isnan"
] |
[((734, 767), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (757, 767), False, 'import warnings\n'), ((8116, 8184), 'pandas.read_csv', 'pd.read_csv', (["(directoryPath + '/airq402.dat')"], {'sep': '"""\\\\s+"""', 'header': 'None'}), "(directoryPath + '/airq402.dat', sep='\\\\s+', header=None)\n", (8127, 8184), True, 'import pandas as pd\n'), ((9922, 9933), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9931, 9933), True, 'import matplotlib.pyplot as plt\n'), ((10007, 10040), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(2)'], {'sharey': '"""none"""'}), "(5, 2, sharey='none')\n", (10019, 10040), True, 'import matplotlib.pyplot as plt\n'), ((11696, 11706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11704, 11706), True, 'import matplotlib.pyplot as plt\n'), ((12426, 12443), 'numpy.random.seed', 'np.random.seed', (['(8)'], {}), '(8)\n', (12440, 12443), True, 'import numpy as np\n'), ((12456, 12482), 'numpy.random.random_sample', 'np.random.random_sample', (['(7)'], {}), '(7)\n', (12479, 12482), True, 'import numpy as np\n'), ((12987, 13020), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharey': '"""none"""'}), "(3, 2, sharey='none')\n", (12999, 13020), True, 'import matplotlib.pyplot as plt\n'), ((14429, 14439), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14437, 14439), True, 'import matplotlib.pyplot as plt\n'), ((14440, 14451), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14449, 14451), True, 'import matplotlib.pyplot as plt\n'), ((15152, 15211), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (15164, 15211), True, 'import pandas as pd\n'), ((15273, 15284), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15282, 15284), True, 'import matplotlib.pyplot as plt\n'), ((15308, 15341), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '"""none"""'}), "(1, 2, sharey='none')\n", (15320, 15341), True, 'import matplotlib.pyplot as plt\n'), ((16385, 16395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16393, 16395), True, 'import matplotlib.pyplot as plt\n'), ((16511, 16570), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (16523, 16570), True, 'import pandas as pd\n'), ((16660, 16693), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '"""none"""'}), "(1, 2, sharey='none')\n", (16672, 16693), True, 'import matplotlib.pyplot as plt\n'), ((17724, 17734), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17732, 17734), True, 'import matplotlib.pyplot as plt\n'), ((17845, 17904), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (17857, 17904), True, 'import pandas as pd\n'), ((17948, 17959), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17957, 17959), True, 'import matplotlib.pyplot as plt\n'), ((17960, 17986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (17970, 17986), True, 'import matplotlib.pyplot as plt\n'), ((17986, 18044), 'matplotlib.pyplot.plot', 'plt.plot', (['bestModelX', 'bestModelY'], {'label': '"""Gradient Descent"""'}), "(bestModelX, bestModelY, label='Gradient Descent')\n", (17994, 18044), True, 'import matplotlib.pyplot as plt\n'), ((18047, 18143), 'matplotlib.pyplot.plot', 'plt.plot', (['xArmijo', 'yRMSEArmijo'], {'label': '"""Gradient Descent with Armijo step length controller"""'}), "(xArmijo, yRMSEArmijo, label=\n 'Gradient Descent with Armijo step length controller')\n", (18055, 18143), True, 'import matplotlib.pyplot as plt\n'), ((18141, 18233), 'matplotlib.pyplot.plot', 'plt.plot', (['xBold', 'yRMSEBold'], {'label': '"""Gradient Descent with Bold driver length controller"""'}), "(xBold, yRMSEBold, label=\n 'Gradient Descent with Bold driver length controller')\n", (18149, 18233), True, 'import matplotlib.pyplot as plt\n'), ((18232, 18242), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (18240, 18242), True, 'import matplotlib.pyplot as plt\n'), ((18243, 18266), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (18253, 18266), True, 'import matplotlib.pyplot as plt\n'), ((18267, 18285), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (18277, 18285), True, 'import matplotlib.pyplot as plt\n'), ((18286, 18383), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparasion of constant steplength and variable steplength with controller"""'], {}), "(\n 'Comparasion of constant steplength and variable steplength with controller'\n )\n", (18295, 18383), True, 'import matplotlib.pyplot as plt\n'), ((18374, 18386), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18384, 18386), True, 'import matplotlib.pyplot as plt\n'), ((18387, 18397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18395, 18397), True, 'import matplotlib.pyplot as plt\n'), ((18643, 18703), 'pandas.read_csv', 'pd.read_csv', (["(directoryPath + '/winequality-red.csv')"], {'sep': '""";"""'}), "(directoryPath + '/winequality-red.csv', sep=';')\n", (18654, 18703), True, 'import pandas as pd\n'), ((18756, 18818), 'pandas.read_csv', 'pd.read_csv', (["(directoryPath + '/winequality-white.csv')"], {'sep': '""";"""'}), "(directoryPath + '/winequality-white.csv', sep=';')\n", (18767, 18818), True, 'import pandas as pd\n'), ((18849, 18880), 'pandas.concat', 'pd.concat', (['[wineData, tmpFrame]'], {}), '([wineData, tmpFrame])\n', (18858, 18880), True, 'import pandas as pd\n'), ((20013, 20046), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(2)'], {'sharey': '"""none"""'}), "(6, 2, sharey='none')\n", (20025, 20046), True, 'import matplotlib.pyplot as plt\n'), ((22198, 22208), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22206, 22208), True, 'import matplotlib.pyplot as plt\n'), ((22503, 22520), 'numpy.random.seed', 'np.random.seed', (['(8)'], {}), '(8)\n', (22517, 22520), True, 'import numpy as np\n'), ((22533, 22559), 'numpy.random.random_sample', 'np.random.random_sample', (['(6)'], {}), '(6)\n', (22556, 22559), True, 'import numpy as np\n'), ((22916, 22949), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharey': '"""none"""'}), "(3, 2, sharey='none')\n", (22928, 22949), True, 'import matplotlib.pyplot as plt\n'), ((24218, 24228), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24226, 24228), True, 'import matplotlib.pyplot as plt\n'), ((24229, 24240), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24238, 24240), True, 'import matplotlib.pyplot as plt\n'), ((24930, 24989), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (24942, 24989), True, 'import pandas as pd\n'), ((25074, 25107), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '"""none"""'}), "(1, 2, sharey='none')\n", (25086, 25107), True, 'import matplotlib.pyplot as plt\n'), ((26035, 26045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26043, 26045), True, 'import matplotlib.pyplot as plt\n'), ((26156, 26215), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (26168, 26215), True, 'import pandas as pd\n'), ((26305, 26338), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '"""none"""'}), "(1, 2, sharey='none')\n", (26317, 26338), True, 'import matplotlib.pyplot as plt\n'), ((27220, 27230), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27228, 27230), True, 'import matplotlib.pyplot as plt\n'), ((27341, 27400), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (27353, 27400), True, 'import pandas as pd\n'), ((27444, 27455), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27453, 27455), True, 'import matplotlib.pyplot as plt\n'), ((27456, 27482), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (27466, 27482), True, 'import matplotlib.pyplot as plt\n'), ((27482, 27540), 'matplotlib.pyplot.plot', 'plt.plot', (['bestModelX', 'bestModelY'], {'label': '"""Gradient Descent"""'}), "(bestModelX, bestModelY, label='Gradient Descent')\n", (27490, 27540), True, 'import matplotlib.pyplot as plt\n'), ((27543, 27639), 'matplotlib.pyplot.plot', 'plt.plot', (['xArmijo', 'yRMSEArmijo'], {'label': '"""Gradient Descent with Armijo step length controller"""'}), "(xArmijo, yRMSEArmijo, label=\n 'Gradient Descent with Armijo step length controller')\n", (27551, 27639), True, 'import matplotlib.pyplot as plt\n'), ((27637, 27729), 'matplotlib.pyplot.plot', 'plt.plot', (['xBold', 'yRMSEBold'], {'label': '"""Gradient Descent with Bold driver length controller"""'}), "(xBold, yRMSEBold, label=\n 'Gradient Descent with Bold driver length controller')\n", (27645, 27729), True, 'import matplotlib.pyplot as plt\n'), ((27728, 27738), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (27736, 27738), True, 'import matplotlib.pyplot as plt\n'), ((27739, 27762), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (27749, 27762), True, 'import matplotlib.pyplot as plt\n'), ((27763, 27781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (27773, 27781), True, 'import matplotlib.pyplot as plt\n'), ((27782, 27879), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparasion of constant steplength and variable steplength with controller"""'], {}), "(\n 'Comparasion of constant steplength and variable steplength with controller'\n )\n", (27791, 27879), True, 'import matplotlib.pyplot as plt\n'), ((27870, 27882), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (27880, 27882), True, 'import matplotlib.pyplot as plt\n'), ((27883, 27893), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27891, 27893), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3430), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (3413, 3430), True, 'import numpy as np\n'), ((3449, 3477), 'numpy.dot', 'np.dot', (['modelParameters', 'X.T'], {}), '(modelParameters, X.T)\n', (3455, 3477), True, 'import numpy as np\n'), ((4330, 4356), 'numpy.dot', 'np.dot', (['gradient', 'gradient'], {}), '(gradient, gradient)\n', (4336, 4356), True, 'import numpy as np\n'), ((6279, 6305), 'numpy.insert', 'np.insert', (['x', '(0)', '(1)'], {'axis': '(1)'}), '(x, 0, 1, axis=1)\n', (6288, 6305), True, 'import numpy as np\n'), ((6551, 6568), 'numpy.dot', 'np.dot', (['beta', 'x.T'], {}), '(beta, x.T)\n', (6557, 6568), True, 'import numpy as np\n'), ((6611, 6639), 'numpy.dot', 'np.dot', (['residual.T', 'residual'], {}), '(residual.T, residual)\n', (6617, 6639), True, 'import numpy as np\n'), ((4546, 4604), 'numpy.dot', 'np.dot', (['residual_alpha_gradient.T', 'residual_alpha_gradient'], {}), '(residual_alpha_gradient.T, residual_alpha_gradient)\n', (4552, 4604), True, 'import numpy as np\n'), ((5299, 5357), 'numpy.dot', 'np.dot', (['residual_alpha_gradient.T', 'residual_alpha_gradient'], {}), '(residual_alpha_gradient.T, residual_alpha_gradient)\n', (5305, 5357), True, 'import numpy as np\n'), ((7270, 7287), 'numpy.dot', 'np.dot', (['beta', 'x.T'], {}), '(beta, x.T)\n', (7276, 7287), True, 'import numpy as np\n'), ((7351, 7379), 'numpy.dot', 'np.dot', (['residual.T', 'residual'], {}), '(residual.T, residual)\n', (7357, 7379), True, 'import numpy as np\n'), ((13666, 13677), 'math.isnan', 'isnan', (['rmse'], {}), '(rmse)\n', (13671, 13677), False, 'from math import sqrt, isnan\n'), ((23455, 23466), 'math.isnan', 'isnan', (['rmse'], {}), '(rmse)\n', (23460, 23466), False, 'from math import sqrt, isnan\n'), ((4476, 4512), 'numpy.dot', 'np.dot', (['(beta - alpha * gradient)', 'x.T'], {}), '(beta - alpha * gradient, x.T)\n', (4482, 4512), True, 'import numpy as np\n'), ((5229, 5265), 'numpy.dot', 'np.dot', (['(beta - alpha * gradient)', 'x.T'], {}), '(beta - alpha * gradient, x.T)\n', (5235, 5265), True, 'import numpy as np\n'), ((6885, 6906), 'numpy.dot', 'np.dot', (['x.T', 'residual'], {}), '(x.T, residual)\n', (6891, 6906), True, 'import numpy as np\n'), ((3853, 3883), 'numpy.square', 'np.square', (['(yTrue - yPrediction)'], {}), '(yTrue - yPrediction)\n', (3862, 3883), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.