code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
from math import log
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error, classification_report
from math import sqrt
import json
from pprint import pprint
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--problems', type=str, help='file path to problems.json')
parser.add_argument('-s', '--submissions', type=str, help='file path to user_submissions.json')
parser.add_argument('-k', type=int, default=1, help='k-fold cross validation')
args = parser.parse_args()
with open(args.problems, 'r') as file:
problems = json.load(file)
problem_id_2_tag_ids = {problem['id']: problem['tags'] for problem in problems}
with open(args.submissions, 'r') as file:
user_submissions = json.load(file)
max_skill = max([max(problem['tags']) for problem in problems if len(problem['tags']) > 0]) + 1
print('max_skill:', max_skill)
def read_data(training, group, expand_tags=False):
x = []
y = []
for user_data in user_submissions:
user_group = user_data['group']
if training and user_group == group \
or not training and user_group != group:
continue
submissions = user_data['submissions']
user_success = {}
user_fail = {}
for sub in submissions:
tags = problem_id_2_tag_ids[sub['problem']]
if not expand_tags:
y.append(sub['verdict'])
x.append([0] * 3 * max_skill)
for tag in tags:
s = user_success.get(tag, 1)
f = user_fail.get(tag, 1)
x[-1][tag * 3 + 0] = 1
x[-1][tag * 3 + 1] = log(s)
x[-1][tag * 3 + 2] = log(f)
if sub['verdict'] == 1:
user_success[tag] = s + 1
else:
user_fail[tag] = f + 1
else:
for tag in tags:
s = user_success.get(tag, 1)
f = user_fail.get(tag, 1)
x.append([0] * 3 * max_skill)
x[-1][tag * 3 + 0] = 1
x[-1][tag * 3 + 1] = log(s)
x[-1][tag * 3 + 2] = log(f)
if sub['verdict'] == 1:
y.append(1)
user_success[tag] = s + 1
else:
y.append(0)
user_fail[tag] = f + 1
return x, y
def train(group):
model = LogisticRegression()
x, y = read_data(training=True, group=group, expand_tags=False)
print('Fitting')
model.fit(x, y)
x, y = read_data(training=False, group=group, expand_tags=False)
print('Predicting')
pred = model.predict_proba(x)[:, 1]
auc = roc_auc_score(y, pred)
rmse = sqrt(mean_squared_error(y, pred))
mae = mean_absolute_error(y, pred)
print('ROC AUC: {}'.format(auc))
print('RMSE: {}'.format(rmse))
print('MAE: {}'.format(mae))
# res = np.zeros(pred.shape[0])
# res[pred >= 0.5] = 1
# print(classification_report(y, res))
return auc, rmse, mae
def main():
k = args.k
auc = np.zeros(k)
rmse = np.zeros(k)
mae = np.zeros(k)
for i in range(k):
print('group: %d' % i)
auc[i], rmse[i], mae[i] = train(i)
print('-' * 20)
print('ROC AUC: {} (+/- {})'.format(auc.mean(), auc.std()))
print('RMSE: {} (+/- {})'.format(rmse.mean(), rmse.std()))
print('MAE: {} (+/- {})'.format(mae.mean(), mae.std()))
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.mean_squared_error",
"numpy.zeros",
"math.log",
"json.load",
"sklearn.metrics.mean_absolute_error"
] |
[((295, 374), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (318, 374), False, 'import argparse\n'), ((724, 739), 'json.load', 'json.load', (['file'], {}), '(file)\n', (733, 739), False, 'import json\n'), ((888, 903), 'json.load', 'json.load', (['file'], {}), '(file)\n', (897, 903), False, 'import json\n'), ((2719, 2739), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2737, 2739), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2999, 3021), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y', 'pred'], {}), '(y, pred)\n', (3012, 3021), False, 'from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error, classification_report\n'), ((3079, 3107), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y', 'pred'], {}), '(y, pred)\n', (3098, 3107), False, 'from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error, classification_report\n'), ((3396, 3407), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (3404, 3407), True, 'import numpy as np\n'), ((3420, 3431), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (3428, 3431), True, 'import numpy as np\n'), ((3443, 3454), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (3451, 3454), True, 'import numpy as np\n'), ((3039, 3066), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y', 'pred'], {}), '(y, pred)\n', (3057, 3066), False, 'from sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error, classification_report\n'), ((1851, 1857), 'math.log', 'log', (['s'], {}), '(s)\n', (1854, 1857), False, 'from math import log\n'), ((1900, 1906), 'math.log', 'log', (['f'], {}), '(f)\n', (1903, 1906), False, 'from math import log\n'), ((2365, 2371), 'math.log', 'log', (['s'], {}), '(s)\n', (2368, 2371), False, 'from math import log\n'), ((2414, 2420), 'math.log', 'log', (['f'], {}), '(f)\n', (2417, 2420), False, 'from math import log\n')]
|
from __future__ import unicode_literals
import json
import numpy as np
from builtins import str
from abc import ABCMeta, abstractmethod
from pychemia import HAS_PYMONGO
from pychemia.utils.computing import deep_unicode
if HAS_PYMONGO:
from pychemia.db import PyChemiaDB
class Population:
__metaclass__ = ABCMeta
"""
General class for all optimization algorithms that uses fixed and blocked
Generations
"""
def __init__(self, name, tag, use_mongo=True):
name = deep_unicode(name)
self.tag = tag
self.pcdb = None
if isinstance(name, str):
self.name = name
if use_mongo:
self.pcdb = PyChemiaDB(name)
else:
self.name = name.name
if use_mongo:
self.pcdb = name
def __iter__(self):
return self.pcdb.entries.find()
def __len__(self):
return len(self.members)
def __str__(self):
ret = ' Population Name: %s\n' % self.name
ret += ' Tag: %s\n' % self.tag
ret += ' Members: %s\n' % len(self)
return ret
def disable(self, entry_id):
self.pcdb.entries.update({'_id': entry_id}, {'$set': {'status.' + self.tag: False}})
def enable(self, entry_id):
self.pcdb.entries.update({'_id': entry_id}, {'$set': {'status.' + self.tag: True}})
def get_values(self, selection):
ret = {}
for i in selection:
ret[i] = self.value(i)
return ret
def update_properties(self, entry_id, new_properties):
self.pcdb.update(entry_id, properties=new_properties)
def set_in_properties(self, entry_id, field, value):
return self.pcdb.entries.update_one({'_id': entry_id}, {'$set': {'properties.'+field: value}})
def get_population_info(self):
return self.pcdb.db.population_info.find_one({'tag': self.tag})
def insert_entry(self, entry):
if 'structure' not in entry:
entry['structure']={}
if 'properties' not in entry:
entry['properties']={}
if 'status' not in entry:
entry['status']={}
self.pcdb.entries.insert(entry)
def get_structure(self, entry_id):
return self.pcdb.get_structure(entry_id)
def set_structure(self, entry_id, structure):
return self.pcdb.update(entry_id, structure=structure)
def get_entry(self, entry_id, projection=None, with_id=True):
"""
Return an entry identified by 'entry_id'
:param with_id:
:param projection: Insert that projection into the query
:param entry_id: A database identifier
:return:
"""
if projection is None:
projection = {}
if not with_id:
projection['_id']=0
entry = self.pcdb.entries.find_one({'_id': entry_id}, projection)
return entry
def ids_sorted(self, selection):
values = np.array([self.value(i) for i in selection])
sorted_indices = np.argsort(values)
return np.array(selection)[sorted_indices]
def load_json(self, filename):
filep = open(filename, 'r')
data = json.load(filep)
for entry in data:
self.pcdb.entries.insert(entry)
def random_population(self, n):
"""
Create N new random structures to the population
:param n: (int) The number of new structures
:return: (list) The identifiers for the new structures
"""
return [self.add_random() for i in range(n)]
def replace_failed(self):
pass
def save_info(self):
data = self.pcdb.db.population_info.find_one({'_id': self.tag})
if data is None:
data = self.to_dict
data['_id'] = self.tag
self.pcdb.db.population_info.insert(data)
else:
self.pcdb.db.population_info.update({'_id': self.tag}, self.to_dict)
def save_json(self, filename):
ret = []
for entry_id in self.members:
ret.append(self.get_entry(entry_id, with_id=False))
filep = open(filename, 'w')
json.dump(ret, filep, sort_keys=True, indent=4, separators=(',', ': '))
def unlock_all(self, name=None):
for i in self.members:
self.pcdb.unlock(i, name=name)
@abstractmethod
def add_random(self):
pass
@abstractmethod
def check_duplicates(self, ids):
pass
@abstractmethod
def cross(self, ids):
pass
@abstractmethod
def distance(self, entry_id, entry_jd):
pass
@abstractmethod
def get_duplicates(self, ids):
pass
@abstractmethod
def from_dict(self, population_dict):
pass
@abstractmethod
def is_evaluated(self, entry_id):
pass
@abstractmethod
def move_random(self, entry_id, factor=0.2, in_place=False, kind='move'):
pass
@abstractmethod
def move(self, entry_id, entry_jd, factor=0.2, in_place=False):
pass
@abstractmethod
def new_entry(self, data, active=True):
pass
@abstractmethod
def recover(self):
pass
@abstractmethod
def value(self, entry_id):
pass
@abstractmethod
def str_entry(self, entry_id):
pass
@property
def actives(self):
return [entry['_id'] for entry in self.pcdb.entries.find({'status.' + self.tag: True}, {'_id': 1})]
@property
def actives_evaluated(self):
return [x for x in self.actives if self.is_evaluated(x)]
@property
def actives_no_evaluated(self):
return [x for x in self.actives if not self.is_evaluated(x)]
@property
def evaluated(self):
return [entry for entry in self.members if self.is_evaluated(entry)]
@property
def fraction_evaluated(self):
ret = np.sum([1 for i in self.actives if self.is_evaluated(i)])
return float(ret) / len(self.actives)
@property
def members(self):
return [x['_id'] for x in self.pcdb.entries.find({}, {'_id': 1})]
@property
def to_dict(self):
return {'name': self.name, 'tag': self.tag}
@property
def best_candidate(self):
return self.ids_sorted(self.evaluated)[0]
def refine_progressive(self, entry_id):
pass
|
[
"pychemia.utils.computing.deep_unicode",
"pychemia.db.PyChemiaDB",
"numpy.argsort",
"numpy.array",
"json.load",
"json.dump"
] |
[((501, 519), 'pychemia.utils.computing.deep_unicode', 'deep_unicode', (['name'], {}), '(name)\n', (513, 519), False, 'from pychemia.utils.computing import deep_unicode\n'), ((3036, 3054), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (3046, 3054), True, 'import numpy as np\n'), ((3193, 3209), 'json.load', 'json.load', (['filep'], {}), '(filep)\n', (3202, 3209), False, 'import json\n'), ((4151, 4222), 'json.dump', 'json.dump', (['ret', 'filep'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(ret, filep, sort_keys=True, indent=4, separators=(',', ': '))\n", (4160, 4222), False, 'import json\n'), ((3070, 3089), 'numpy.array', 'np.array', (['selection'], {}), '(selection)\n', (3078, 3089), True, 'import numpy as np\n'), ((685, 701), 'pychemia.db.PyChemiaDB', 'PyChemiaDB', (['name'], {}), '(name)\n', (695, 701), False, 'from pychemia.db import PyChemiaDB\n')]
|
import os
import os.path as osp
import pickle
import time
import numpy as np
from multiprocessing import Pool
from ..utils import get_bbox_dim
from .misc import read_img_info, change_cls_order, get_classes
def load_imgs(img_dir, ann_dir=None, classes=None, nproc=10,
def_bbox_type='poly'):
assert def_bbox_type in ['hbb', 'obb', 'poly', None]
assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
if ann_dir is not None:
print('ann_dir is no use in load_imgs function')
print('Starting loading images information')
start_time = time.time()
imgpaths = [osp.join(img_dir, imgfile)
for imgfile in os.listdir(img_dir)]
if nproc > 1:
pool = Pool(nproc)
infos = pool.map(read_img_info, imgpaths)
pool.close()
else:
infos = list(map(read_img_info, imgpaths))
if def_bbox_type is not None:
for info in infos:
if info is None:
continue
bbox_dim = get_bbox_dim(def_bbox_type)
bboxes = np.zeros((0, bbox_dim), dtype=np.float32)
labels = np.zeros((0, ), dtype=np.int64)
info['ann'] = dict(bboxes=bboxes, labels=labels)
classes = () if classes is None else classes
end_time = time.time()
print(f'Finishing loading images, get {len(infos)} iamges,',
f'using {end_time-start_time:.3f}s.')
return infos, classes
def load_pkl(ann_dir, img_dir=None, classes=None, nproc=10):
assert osp.isfile(ann_dir), f'The {ann_dir} is not an existing pkl file!'
assert img_dir is None or osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
print('Starting loading pkl information')
start_time = time.time()
data = pickle.load(open(ann_dir, 'rb'))
old_classes, contents = data['cls'], data['content']
if img_dir is not None:
imgpaths = [osp.join(img_dir, content['filename'])
for content in contents]
if nproc > 1:
pool = Pool(nproc)
infos = pool.map(read_img_info, imgpaths)
pool.close()
else:
infos = list(map(read_img_info, imgpaths))
for info, content in zip(infos, contents):
content.update(info)
if classes is None:
classes = old_classes
else:
classes = get_classes(classes)
change_cls_order(contents, old_classes, classes)
end_time = time.time()
print(f'Finishing loading pkl, get {len(contents)} iamges,',
f'using {end_time-start_time:.3f}s.')
return contents, classes
def save_pkl(save_dir, contents, classes):
assert save_dir.endswith('.pkl')
filepath = osp.split(save_dir)[0]
if not osp.exists(filepath):
os.makedirs(filepath)
data = dict(cls=classes, content=contents)
pickle.dump(data, open(save_dir, 'wb'))
|
[
"os.path.exists",
"os.listdir",
"os.makedirs",
"os.path.join",
"os.path.split",
"os.path.isfile",
"numpy.zeros",
"os.path.isdir",
"multiprocessing.Pool",
"time.time"
] |
[((376, 394), 'os.path.isdir', 'osp.isdir', (['img_dir'], {}), '(img_dir)\n', (385, 394), True, 'import os.path as osp\n'), ((589, 600), 'time.time', 'time.time', ([], {}), '()\n', (598, 600), False, 'import time\n'), ((1281, 1292), 'time.time', 'time.time', ([], {}), '()\n', (1290, 1292), False, 'import time\n'), ((1506, 1525), 'os.path.isfile', 'osp.isfile', (['ann_dir'], {}), '(ann_dir)\n', (1516, 1525), True, 'import os.path as osp\n'), ((1728, 1739), 'time.time', 'time.time', ([], {}), '()\n', (1737, 1739), False, 'import time\n'), ((2436, 2447), 'time.time', 'time.time', ([], {}), '()\n', (2445, 2447), False, 'import time\n'), ((617, 643), 'os.path.join', 'osp.join', (['img_dir', 'imgfile'], {}), '(img_dir, imgfile)\n', (625, 643), True, 'import os.path as osp\n'), ((729, 740), 'multiprocessing.Pool', 'Pool', (['nproc'], {}), '(nproc)\n', (733, 740), False, 'from multiprocessing import Pool\n'), ((1603, 1621), 'os.path.isdir', 'osp.isdir', (['img_dir'], {}), '(img_dir)\n', (1612, 1621), True, 'import os.path as osp\n'), ((2687, 2706), 'os.path.split', 'osp.split', (['save_dir'], {}), '(save_dir)\n', (2696, 2706), True, 'import os.path as osp\n'), ((2721, 2741), 'os.path.exists', 'osp.exists', (['filepath'], {}), '(filepath)\n', (2731, 2741), True, 'import os.path as osp\n'), ((2751, 2772), 'os.makedirs', 'os.makedirs', (['filepath'], {}), '(filepath)\n', (2762, 2772), False, 'import os\n'), ((675, 694), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (685, 694), False, 'import os\n'), ((1061, 1102), 'numpy.zeros', 'np.zeros', (['(0, bbox_dim)'], {'dtype': 'np.float32'}), '((0, bbox_dim), dtype=np.float32)\n', (1069, 1102), True, 'import numpy as np\n'), ((1124, 1154), 'numpy.zeros', 'np.zeros', (['(0,)'], {'dtype': 'np.int64'}), '((0,), dtype=np.int64)\n', (1132, 1154), True, 'import numpy as np\n'), ((1890, 1928), 'os.path.join', 'osp.join', (['img_dir', "content['filename']"], {}), "(img_dir, content['filename'])\n", (1898, 1928), True, 'import os.path as osp\n'), ((2015, 2026), 'multiprocessing.Pool', 'Pool', (['nproc'], {}), '(nproc)\n', (2019, 2026), False, 'from multiprocessing import Pool\n')]
|
import numpy as np
import cv2
import math
import datetime
from datetime import timedelta as Delta
h=300
w=300
cap = cv2.VideoCapture(0)
SUN_LOC=(200,70)
SUN_RSIZE=20
ORBITAL_R=10
def Orbiral(frame,Centerloc,orbit_r,size_r,phi,color):
x_orbit=Centerloc[0]+int(orbit_r*np.cos(np.deg2rad(phi)))
y_orbit=Centerloc[1]+int(orbit_r*np.sin(np.deg2rad(phi)))
#print(f"x:{x_orbit} y:{y_orbit} phi:{int(orbitphi)}")
frame= cv2.circle(frame,(x_orbit,y_orbit),size_r, color, -1)
return frame
ORBITAL_RSIZE=3
ORBITAL_PHI=0
ORBITAL_DPHI=1 #0.5deg delta
dr=(SUN_RSIZE+ORBITAL_R) #*(orbitdphi) #*np.pi/180)
orbitloc=(SUN_LOC[0],SUN_LOC[1]+SUN_RSIZE+ORBITAL_R)
satsn=0
#2021/05/06 Window priority
print(cv2.WND_PROP_FULLSCREEN)
cv2.namedWindow("Frame", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("Frame",cv2.WND_PROP_FULLSCREEN,0)
Start_Time=datetime.datetime.today()
Delta_T=60
#Sat_Time_Space=Delta(minutes=1)
Sat_Time_Space=Delta(seconds=Delta_T)
Sat_dic={}
Poff=180
Roff=0
#mins=time.minute
while True:
_, frame = cap.read()
frame_time=datetime.datetime.today()
if frame_time >= Sat_Time_Space+Start_Time:
Start_Time=frame_time
dr=(SUN_RSIZE+ORBITAL_R)
Sat_dic[satsn]={"Time":Start_Time,"Phi_Offset":Poff,"Sat_Radius":dr}
print("New Sat added")
print(Sat_dic[satsn])
Poff-=30
satsn+=1
if Poff <=-180:
Poff=180
ORBITAL_R+=5
print(frame_time)
#frame = cv2.resize(frame,(h,w))
if(frame is None):
continue
frame = cv2.circle(frame,SUN_LOC,SUN_RSIZE, (0,0,250), -1)
#Satn to frame
# frame=cv2.putText(frame,str(satsn),(SUN_LOC[0]-15,SUN_LOC[1]+15),
# cv2.FONT_HERSHEY_PLAIN,3,(255,255,255))
if satsn:
for n,sat in Sat_dic.items():
frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE,ORBITAL_PHI-sat["Phi_Offset"],(0,0,255))
#for offphi in range(-180,180,satsn):
#if n==satsn:
# for R_OFF, fadeSeconds in zip(np.linspace(ORBITAL_RSIZE,1,ORBITAL_RSIZE),np.linspace(0,Delta//2,int(ORBITAL_RSIZE))):
# if frame_time >= Sat_Time_Space+fadeSeconds:
# print("Fade:",R_OFF)
# frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE-int(R_OFF),ORBITAL_PHI-sat["Phi_Offset"],(255,0,255))
# else:
#frame=Orbiral(frame,SUN_LOC,sat["Sat_Radius"],ORBITAL_RSIZE,ORBITAL_PHI-sat["Phi_Offset"],(0,0,255))
ORBITAL_PHI+=ORBITAL_DPHI
if ORBITAL_PHI>=360:
ORBITAL_PHI=0
#Line
#img = cv2.line(frame,logoloc,orbitloc,(255,0,0),5)
cv2.imshow('Frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# VideoCaptureオブジェクト破棄
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.setWindowProperty",
"cv2.imshow",
"cv2.circle",
"numpy.deg2rad",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"datetime.datetime.today",
"datetime.timedelta",
"cv2.waitKey",
"cv2.namedWindow"
] |
[((125, 144), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (141, 144), False, 'import cv2\n'), ((775, 824), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Frame"""', 'cv2.WND_PROP_FULLSCREEN'], {}), "('Frame', cv2.WND_PROP_FULLSCREEN)\n", (790, 824), False, 'import cv2\n'), ((826, 884), 'cv2.setWindowProperty', 'cv2.setWindowProperty', (['"""Frame"""', 'cv2.WND_PROP_FULLSCREEN', '(0)'], {}), "('Frame', cv2.WND_PROP_FULLSCREEN, 0)\n", (847, 884), False, 'import cv2\n'), ((899, 924), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (922, 924), False, 'import datetime\n'), ((987, 1009), 'datetime.timedelta', 'Delta', ([], {'seconds': 'Delta_T'}), '(seconds=Delta_T)\n', (992, 1009), True, 'from datetime import timedelta as Delta\n'), ((2968, 2991), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2989, 2991), False, 'import cv2\n'), ((457, 513), 'cv2.circle', 'cv2.circle', (['frame', '(x_orbit, y_orbit)', 'size_r', 'color', '(-1)'], {}), '(frame, (x_orbit, y_orbit), size_r, color, -1)\n', (467, 513), False, 'import cv2\n'), ((1117, 1142), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1140, 1142), False, 'import datetime\n'), ((1654, 1708), 'cv2.circle', 'cv2.circle', (['frame', 'SUN_LOC', 'SUN_RSIZE', '(0, 0, 250)', '(-1)'], {}), '(frame, SUN_LOC, SUN_RSIZE, (0, 0, 250), -1)\n', (1664, 1708), False, 'import cv2\n'), ((2839, 2865), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2849, 2865), False, 'import cv2\n'), ((2874, 2888), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2885, 2888), False, 'import cv2\n'), ((304, 319), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (314, 319), True, 'import numpy as np\n'), ((367, 382), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (377, 382), True, 'import numpy as np\n')]
|
import cv2
import urllib
import numpy as np
import multiprocessing as mp
stream = 'http://192.168.53.114:8000/streamLow.mjpg'
stream2 = 'http://192.168.53.114:8001/streamLow.mjpg'
def procImg(str, wind, stop):
bytes = ''
stream = urllib.urlopen(str)
while not stop.is_set():
try:
bytes += stream.read(4096)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if wind == 'Low':
c = bytes.find('\xff\xaa\xee')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
if wind == 'Low':
if c != -1:
str = bytes[b+2:c]
print(str)
bytes = bytes[c+3:]
else:
bytes = bytes[b+2:]
else:
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow(wind, i)
cv2.waitKey(1)
if cv2.waitKey(1) == ord('q'):
stop.set()
break
except:
pass
if __name__ == '__main__':
st = mp.Event()
lowProc = mp.Process(target = procImg, args=(stream, 'Low', st))
HighProc = mp.Process(target = procImg, args=(stream2, 'High', st))
lowProc.start()
HighProc.start()
lowProc.join()
HighProc.join()
exit(0)
|
[
"multiprocessing.Event",
"multiprocessing.Process",
"urllib.urlopen",
"cv2.imshow",
"numpy.fromstring",
"cv2.waitKey"
] |
[((241, 260), 'urllib.urlopen', 'urllib.urlopen', (['str'], {}), '(str)\n', (255, 260), False, 'import urllib\n'), ((1203, 1213), 'multiprocessing.Event', 'mp.Event', ([], {}), '()\n', (1211, 1213), True, 'import multiprocessing as mp\n'), ((1228, 1280), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'procImg', 'args': "(stream, 'Low', st)"}), "(target=procImg, args=(stream, 'Low', st))\n", (1238, 1280), True, 'import multiprocessing as mp\n'), ((1298, 1352), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'procImg', 'args': "(stream2, 'High', st)"}), "(target=procImg, args=(stream2, 'High', st))\n", (1308, 1352), True, 'import multiprocessing as mp\n'), ((991, 1010), 'cv2.imshow', 'cv2.imshow', (['wind', 'i'], {}), '(wind, i)\n', (1001, 1010), False, 'import cv2\n'), ((1027, 1041), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1038, 1041), False, 'import cv2\n'), ((1057, 1071), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1068, 1071), False, 'import cv2\n'), ((921, 955), 'numpy.fromstring', 'np.fromstring', (['jpg'], {'dtype': 'np.uint8'}), '(jpg, dtype=np.uint8)\n', (934, 955), True, 'import numpy as np\n')]
|
'''
This library is used to incorporate
'''
import numpy as np
def cell_prob_with_nucleus(cell, nucleus):
'''
This function is used to figure out whether one region is cell or empty hole (without nucleus)
:param cell: segmentations results with different labels
:param nucleus: nucleus RawMemb image (after resize)
:return cell: cells without cavity
:return hole: cavity inside the embryos
'''
labels = np.unique(cell).tolist()
labels.remove(0)
hole = np.zeros_like(cell, dtype=np.uint8)
for label in labels:
one_cell_mask = (cell == label)
# After checking on all intensity values, the segmented region should be regarded as empty when the intensity is
# lower than 100. Most are equals 0
if (nucleus[one_cell_mask].sum() == 0):
cell[one_cell_mask] = 0
hole[one_cell_mask] = 1
return cell, hole
|
[
"numpy.zeros_like",
"numpy.unique"
] |
[((496, 531), 'numpy.zeros_like', 'np.zeros_like', (['cell'], {'dtype': 'np.uint8'}), '(cell, dtype=np.uint8)\n', (509, 531), True, 'import numpy as np\n'), ((439, 454), 'numpy.unique', 'np.unique', (['cell'], {}), '(cell)\n', (448, 454), True, 'import numpy as np\n')]
|
import numpy as np
from .image_transforms import mat_to_gray
def rgb2hcv(Blue, Green, Red):
"""transform red green blue arrays to a color space
Parameters
----------
Blue : np.array, size=(m,n)
Blue band of satellite image
Green : np.array, size=(m,n)
Green band of satellite image
Red : np.array, size=(m,n)
Red band of satellite image
Returns
-------
V : np.array, size=(m,n)
array with dominant frequency
H : np.array, size=(m,n)
array with amount of color
C : np.array, size=(m,n)
luminance
See also
--------
rgb2yiq, rgb2ycbcr, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] Smith, "Putting colors in order", Dr. Dobb’s Journal, pp 40, 1993.
.. [2] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
"""
NanBol = Blue == 0
Blue, Green = mat_to_gray(Blue, NanBol), mat_to_gray(Green, NanBol)
Red = Red = mat_to_gray(Red, NanBol)
np.amax( np.dstack((Red, Green)))
V = 0.3*(Red + Green + Blue)
H = np.arctan2( Red-Blue, np.sqrt(3)*(V-Green))
IN = abs(np.cos(H))<= 0.2
C = np.divide(V-Green, np.cos(H))
C2 = np.divide(Red-Blue, np.sqrt(3)*np.sin(H))
C[IN] = C2[IN]
return H, C, V
def rgb2yiq(Red, Green, Blue):
"""transform red, green, blue to luminance, inphase, quadrature values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luminance
I : np.array, size=(m,n)
inphase
Q : np.array, size=(m,n)
quadrature
See also
--------
yiq2rgb, rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] <NAME> "Digital image processing", 1992.
"""
L = np.array([(+0.299, +0.587, +0.114),
(+0.596, -0.275, -0.321),
(+0.212, -0.523, +0.311)])
RGB = np.dstack((Red, Green, Blue))
YIQ = np.einsum('ij,klj->kli', L, RGB)
Y,I,Q = YIQ[:,:,0], YIQ[:,:,1], YIQ[:,:,2]
return Y, I, Q
def yiq2rgb(Y,I,Q):
"""transform luminance, inphase, quadrature values to red, green, blue
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luminance
I : np.array, size=(m,n)
inphase
Q : np.array, size=(m,n)
quadrature
See also
--------
rgb2yiq
Notes
-----
.. [1] <NAME> "Digital image processing", 1992.
"""
L = np.array([(+0.299, +0.587, +0.114),
(+0.596, -0.275, -0.321),
(+0.212, -0.523, +0.311)])
Linv = np.linalg.inv(L)
YIQ = np.dstack((Y, I, Q))
RGB = np.einsum('ij,klj->kli', Linv, YIQ)
R,G,B = RGB[:,:,0], RGB[:,:,1], RGB[:,:,2]
return R, G, B
def rgb2ycbcr(Red, Green, Blue):
"""transform red, green, blue arrays to luna and chroma values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Y : np.array, size=(m,n)
luma
Cb : np.array, size=(m,n)
chroma
Cr : np.array, size=(m,n)
chroma
See also
--------
rgb2hcv, rgb2yiq, rgb2hsi, rgb2xyz, rgb2lms
Notes
-----
.. [1] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
"""
L = np.array([(+0.257, +0.504, +0.098),
(-0.148, -0.291, +0.439),
(+0.439, -0.368, -0.071)])
C = np.array([16, 128, 128])/2**8
RGB = np.dstack((Red, Green, Blue))
YCC = np.einsum('ij,klj->kli', L, RGB)
del RGB
Y = YCC[:,:,0] + C[0]
Cb= YCC[:,:,1] + C[1]
Cr= YCC[:,:,2] + C[2]
return Y, Cb, Cr
def rgb2hsi(Red, Green, Blue):
"""transform red, green, blue arrays to hue, saturation, intensity arrays
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
Hue : np.array, size=(m,n), range=0...1
Hue
Sat : np.array, size=(m,n), range=0...1
Saturation
Int : np.array, size=(m,n), range=0...1
Intensity
See also
--------
erdas2hsi, rgb2hcv, rgb2yiq, rgb2ycbcr, rgb2xyz, rgb2lms
Notes
-----
.. [1] Tsai, "A comparative study on shadow compensation of color aerial
images in invariant color models", IEEE transactions in geoscience and
remote sensing, vol. 44(6) pp. 1661--1671, 2006.
.. [2] Pratt, "Digital image processing" Wiley, 1991.
"""
if np.ptp(Red.flatten())>1:
Red = mat_to_gray(Red)
if np.ptp(Green.flatten())>1:
Green = mat_to_gray(Green)
if np.ptp(Blue.flatten())>1:
Blue = mat_to_gray(Blue)
Tsai = np.array([(1/3, 1/3, 1/3),
(-np.sqrt(6)/6, -np.sqrt(6)/6, -np.sqrt(6)/3),
(1/np.sqrt(6), 2/-np.sqrt(6), 0)])
RGB = np.dstack((Red, Green, Blue))
HSI = np.einsum('ij,klj->kli', Tsai, RGB)
Int = HSI[:,:,0]
Sat = np.sqrt(HSI[:,:,1] ** 2 + HSI[:,:,2] ** 2)
Hue = np.arctan2(HSI[:,:,1], HSI[:,:,2])/np.pi
Hue = np.remainder(Hue, 1) # bring to from -.5...+.5 to 0...1 range
return Hue, Sat, Int
def hsi2rgb(Hue, Sat, Int): #todo
Red,Green,Blue = np.zeros_like(Hue), np.zeros_like(Hue), np.zeros_like(Hue)
Class = np.ceil(Hue/3)
Color = 1 + Sat * np.divide(Hue, np.cos(np.radians(60)))
# red-green space
Sel = Class==1
Blue[Sel] = np.divide(1 - Sat[Sel], 3)
Red[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Green[Sel] = 1 - (Red[Sel] + Blue[Sel])
# green-blue space
Sel = Class==2
Red[Sel] = np.divide(1 - Sat[Sel], 3)
Green[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Blue[Sel] = 1 - (Green[Sel] + Red[Sel])
# blue-red space
Sel = Class==3
Green[Sel] = np.divide(1 - Sat[Sel], 3)
Blue[Sel] = np.divide(Int[Sel] + Color[Sel], 3)
Red[Sel] = 1 - (Blue[Sel] + Green[Sel])
return Red, Green, Blue
def erdas2hsi(Blue, Green, Red):
"""transform red, green, blue arrays to hue, saturation, intensity arrays
Parameters
----------
Blue : np.array, size=(m,n)
blue band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Red : np.array, size=(m,n)
red band of satellite image
Returns
-------
Hue : np.array, size=(m,n), float
hue
Sat : np.array, size=(m,n), float
saturation
Int : np.array, size=(m,n), float
intensity
See also
--------
rgb2hsi
Notes
-----
.. [1] ERDAS, "User handbook", 2013.
"""
if np.ptp(Red.flatten())>1:
Red = mat_to_gray(Red)
if np.ptp(Green.flatten())>1:
Green = mat_to_gray(Green)
if np.ptp(Blue.flatten())>1:
Blue = mat_to_gray(Blue)
Stack = np.dstack((Blue, Green, Red))
min_Stack = np.amin(Stack, axis=2)
max_Stack = np.amax(Stack, axis=2)
Int = (max_Stack + min_Stack)/2
Sat = np.copy(Blue)
Sat[Int==0] = 0
Sat[Int<=.5] = (max_Stack[Int<=.5] -
min_Stack[Int<=.5]) / (max_Stack[Int<=.5] +
min_Stack[Int<=.5])
Sat[Int>.5] = (max_Stack[Int>.5] -
min_Stack[Int>.5]) / ( 2 - max_Stack[Int>.5] +
min_Stack[Int>.5])
Hue = np.copy(Blue)
Hue[Blue==max_Stack] = (1/6) *(6
+ Green[Blue==max_Stack]
- Red[Blue==max_Stack])
Hue[Green==max_Stack] = (1/6) *(4
+ Red[Green==max_Stack]
- Blue[Green==max_Stack])
Hue[Red==max_Stack] = (1/6) *(2
+ Blue[Red==max_Stack]
- Green[Red==max_Stack])
return Hue, Sat, Int
def rgb2xyz(Red, Green, Blue, method='reinhardt'):
"""transform red, green, blue arrays to XYZ tristimulus values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
method :
'reinhardt'
XYZitu601-1 axis
'ford'
D65 illuminant
Returns
-------
X : np.array, size=(m,n)
Y : np.array, size=(m,n)
Z : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2lms, xyz2lms
Notes
-----
.. [1] <NAME> al. "Color transfer between images" IEEE Computer graphics
and applications vol.21(5) pp.34-41, 2001.
.. [2] <NAME>. "Color space conversion", pp. 1--31, 1998.
"""
if method=='ford':
M = np.array([(0.4124564, 0.3575761, 0.1804375),
(0.2126729, 0.7151522, 0.0721750),
(0.0193339, 0.1191920, 0.9503041)])
else:
M = np.array([(0.5141, 0.3239, 0.1604),
(0.2651, 0.6702, 0.0641),
(0.0241, 0.1228, 0.8444)])
RGB = np.dstack((Red, Green, Blue))
XYZ = np.einsum('ij,klj->kli', M, RGB)
X,Y,Z = XYZ[:,:,0], XYZ[:,:,1], XYZ[:,:,2]
return X, Y, Z
def xyz2lms(X, Y, Z):
"""transform XYZ tristimulus arrays to LMS values
Parameters
----------
X : np.array, size=(m,n)
modified XYZitu601-1 axis
Y : np.array, size=(m,n)
modified XYZitu601-1 axis
Z : np.array, size=(m,n)
modified XYZitu601-1 axis
Returns
-------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2lms
Notes
-----
.. [1] <NAME> al. "Color transfer between images" IEEE Computer graphics
and applications vol.21(5) pp.34-41, 2001.
"""
N = np.array([(+0.3897, +0.6890, -0.0787),
(-0.2298, +1.1834, +0.0464),
(+0.0000, +0.0000, +0.0000)])
RGB = np.dstack((X, Y, Z))
LMS = np.einsum('ij,klj->kli', N, RGB)
L,M,S = LMS[:,:,0], LMS[:,:,1], LMS[:,:,2]
return L, M, S
def xyz2lab(X, Y, Z, th=0.008856):
"""transform XYZ tristimulus arrays to Lab values
Parameters
----------
X : np.array, size=(m,n)
Y : np.array, size=(m,n)
Z : np.array, size=(m,n)
Returns
-------
L : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
See also
--------
rgb2xyz, xyz2lms, lms2lch
Notes
-----
.. [1] Ford & Roberts. "Color space conversion", pp. 1--31, 1998.
.. [2] Silva et al. "Near real-time shadow detection and removal in aerial
motion imagery application" ISPRS journal of photogrammetry and remote
sensing, vol.140 pp.104--121, 2018.
"""
Xn,Yn,Zn = 95.047, 100.00, 108.883 # D65 illuminant
YYn = Y/Yn
L_1 = 116* YYn**(1/3.)
L_2 = 903.3 * YYn
L = L_1
L[YYn<=th] = L_2[YYn<=th]
def f(tau, th):
fx = X**(1/3.)
fx[X<=th] = 7.787*X[X<th] + 16/116
return fx
a = 500*( f(X/Xn, th) - f(Z/Zn, th) )
b = 200*( f(Y/Yn, th) - f(Z/Zn, th) )
return L, a, b
def lab2lch(L, a, b):
"""transform XYZ tristimulus arrays to Lab values
Parameters
----------
L : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
Returns
-------
C : np.array, size=(m,n)
h : np.array, size=(m,n)
See also
--------
rgb2xyz, xyz2lms, xyz2lab
Notes
-----
.. [1] Ford & Roberts. "Color space conversion", pp. 1--31, 1998.
.. [2] Silva et al. "Near real-time shadow detection and removal in aerial
motion imagery application" ISPRS journal of photogrammetry and remote
sensing, vol.140 pp.104--121, 2018.
"""
C = np.sqrt( a**2 + b**2)
# calculate angle, and let it range from 0...1
h = ((np.arctan2(b, a) + 2*np.pi)% 2*np.pi) / 2*np.pi
return C, h
def rgb2lms(Red, Green, Blue):
"""transform red, green, blue arrays to XYZ tristimulus values
Parameters
----------
Red : np.array, size=(m,n)
red band of satellite image
Green : np.array, size=(m,n)
green band of satellite image
Blue : np.array, size=(m,n)
blue band of satellite image
Returns
-------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2xyz, xyz2lms
Notes
-----
.. [1] Reinhard et al. "Color transfer between images", 2001.
"""
I = np.array([(0.3811, 0.5783, 0.0402),
(0.1967, 0.7244, 0.0782),
(0.0241, 0.1228, 0.8444)])
RGB = np.dstack((Red, Green, Blue))
LMS = np.einsum('ij,klj->kli', I, RGB)
L,M,S = LMS[:,:,0], LMS[:,:,1], LMS[:,:,2]
return L, M, S
def lms2lab(L, M, S):
"""transform L, M, S arrays to lab color space
Parameters
----------
L : np.array, size=(m,n)
M : np.array, size=(m,n)
S : np.array, size=(m,n)
Returns
-------
l : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
See also
--------
rgb2hcv, rgb2ycbcr, rgb2hsi, rgb2yiq, rgb2xyz, xyz2lms
Notes
-----
.. [1] Reinhard et al. "Color transfer between images", 2001.
"""
I = np.matmul(np.array([(1/np.sqrt(3), 0, 0),
(0, 1/np.sqrt(6), 0),
(0, 0, 1/np.sqrt(2))]),
np.array([(+1, +1, +1),
(+1, +1, -2),
(+1, -1, +0)]))
LMS = np.dstack((L, M, S))
lab = np.einsum('ij,klj->kli', I, LMS)
l,a,b = lab[:,:,0], lab[:,:,1], lab[:,:,2]
return l, a, b
|
[
"numpy.radians",
"numpy.dstack",
"numpy.ceil",
"numpy.copy",
"numpy.sqrt",
"numpy.amin",
"numpy.divide",
"numpy.zeros_like",
"numpy.array",
"numpy.linalg.inv",
"numpy.einsum",
"numpy.cos",
"numpy.arctan2",
"numpy.sin",
"numpy.amax",
"numpy.remainder"
] |
[((2091, 2184), 'numpy.array', 'np.array', (['[(+0.299, +0.587, +0.114), (+0.596, -0.275, -0.321), (+0.212, -0.523, +0.311)]'], {}), '([(+0.299, +0.587, +0.114), (+0.596, -0.275, -0.321), (+0.212, -\n 0.523, +0.311)])\n', (2099, 2184), True, 'import numpy as np\n'), ((2227, 2256), 'numpy.dstack', 'np.dstack', (['(Red, Green, Blue)'], {}), '((Red, Green, Blue))\n', (2236, 2256), True, 'import numpy as np\n'), ((2267, 2299), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'L', 'RGB'], {}), "('ij,klj->kli', L, RGB)\n", (2276, 2299), True, 'import numpy as np\n'), ((2993, 3086), 'numpy.array', 'np.array', (['[(+0.299, +0.587, +0.114), (+0.596, -0.275, -0.321), (+0.212, -0.523, +0.311)]'], {}), '([(+0.299, +0.587, +0.114), (+0.596, -0.275, -0.321), (+0.212, -\n 0.523, +0.311)])\n', (3001, 3086), True, 'import numpy as np\n'), ((3129, 3145), 'numpy.linalg.inv', 'np.linalg.inv', (['L'], {}), '(L)\n', (3142, 3145), True, 'import numpy as np\n'), ((3156, 3176), 'numpy.dstack', 'np.dstack', (['(Y, I, Q)'], {}), '((Y, I, Q))\n', (3165, 3176), True, 'import numpy as np\n'), ((3187, 3222), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'Linv', 'YIQ'], {}), "('ij,klj->kli', Linv, YIQ)\n", (3196, 3222), True, 'import numpy as np\n'), ((4109, 4202), 'numpy.array', 'np.array', (['[(+0.257, +0.504, +0.098), (-0.148, -0.291, +0.439), (+0.439, -0.368, -0.071)]'], {}), '([(+0.257, +0.504, +0.098), (-0.148, -0.291, +0.439), (+0.439, -\n 0.368, -0.071)])\n', (4117, 4202), True, 'import numpy as np\n'), ((4283, 4312), 'numpy.dstack', 'np.dstack', (['(Red, Green, Blue)'], {}), '((Red, Green, Blue))\n', (4292, 4312), True, 'import numpy as np\n'), ((4323, 4355), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'L', 'RGB'], {}), "('ij,klj->kli', L, RGB)\n", (4332, 4355), True, 'import numpy as np\n'), ((5780, 5809), 'numpy.dstack', 'np.dstack', (['(Red, Green, Blue)'], {}), '((Red, Green, Blue))\n', (5789, 5809), True, 'import numpy as np\n'), ((5820, 5855), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'Tsai', 'RGB'], {}), "('ij,klj->kli', Tsai, RGB)\n", (5829, 5855), True, 'import numpy as np\n'), ((5887, 5933), 'numpy.sqrt', 'np.sqrt', (['(HSI[:, :, 1] ** 2 + HSI[:, :, 2] ** 2)'], {}), '(HSI[:, :, 1] ** 2 + HSI[:, :, 2] ** 2)\n', (5894, 5933), True, 'import numpy as np\n'), ((5991, 6011), 'numpy.remainder', 'np.remainder', (['Hue', '(1)'], {}), '(Hue, 1)\n', (6003, 6011), True, 'import numpy as np\n'), ((6205, 6221), 'numpy.ceil', 'np.ceil', (['(Hue / 3)'], {}), '(Hue / 3)\n', (6212, 6221), True, 'import numpy as np\n'), ((6339, 6365), 'numpy.divide', 'np.divide', (['(1 - Sat[Sel])', '(3)'], {}), '(1 - Sat[Sel], 3)\n', (6348, 6365), True, 'import numpy as np\n'), ((6381, 6416), 'numpy.divide', 'np.divide', (['(Int[Sel] + Color[Sel])', '(3)'], {}), '(Int[Sel] + Color[Sel], 3)\n', (6390, 6416), True, 'import numpy as np\n'), ((6519, 6545), 'numpy.divide', 'np.divide', (['(1 - Sat[Sel])', '(3)'], {}), '(1 - Sat[Sel], 3)\n', (6528, 6545), True, 'import numpy as np\n'), ((6563, 6598), 'numpy.divide', 'np.divide', (['(Int[Sel] + Color[Sel])', '(3)'], {}), '(Int[Sel] + Color[Sel], 3)\n', (6572, 6598), True, 'import numpy as np\n'), ((6701, 6727), 'numpy.divide', 'np.divide', (['(1 - Sat[Sel])', '(3)'], {}), '(1 - Sat[Sel], 3)\n', (6710, 6727), True, 'import numpy as np\n'), ((6744, 6779), 'numpy.divide', 'np.divide', (['(Int[Sel] + Color[Sel])', '(3)'], {}), '(Int[Sel] + Color[Sel], 3)\n', (6753, 6779), True, 'import numpy as np\n'), ((7712, 7741), 'numpy.dstack', 'np.dstack', (['(Blue, Green, Red)'], {}), '((Blue, Green, Red))\n', (7721, 7741), True, 'import numpy as np\n'), ((7758, 7780), 'numpy.amin', 'np.amin', (['Stack'], {'axis': '(2)'}), '(Stack, axis=2)\n', (7765, 7780), True, 'import numpy as np\n'), ((7797, 7819), 'numpy.amax', 'np.amax', (['Stack'], {'axis': '(2)'}), '(Stack, axis=2)\n', (7804, 7819), True, 'import numpy as np\n'), ((7867, 7880), 'numpy.copy', 'np.copy', (['Blue'], {}), '(Blue)\n', (7874, 7880), True, 'import numpy as np\n'), ((8245, 8258), 'numpy.copy', 'np.copy', (['Blue'], {}), '(Blue)\n', (8252, 8258), True, 'import numpy as np\n'), ((9996, 10025), 'numpy.dstack', 'np.dstack', (['(Red, Green, Blue)'], {}), '((Red, Green, Blue))\n', (10005, 10025), True, 'import numpy as np\n'), ((10036, 10068), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'M', 'RGB'], {}), "('ij,klj->kli', M, RGB)\n", (10045, 10068), True, 'import numpy as np\n'), ((10785, 10877), 'numpy.array', 'np.array', (['[(+0.3897, +0.689, -0.0787), (-0.2298, +1.1834, +0.0464), (+0.0, +0.0, +0.0)]'], {}), '([(+0.3897, +0.689, -0.0787), (-0.2298, +1.1834, +0.0464), (+0.0, +\n 0.0, +0.0)])\n', (10793, 10877), True, 'import numpy as np\n'), ((10930, 10950), 'numpy.dstack', 'np.dstack', (['(X, Y, Z)'], {}), '((X, Y, Z))\n', (10939, 10950), True, 'import numpy as np\n'), ((10961, 10993), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'N', 'RGB'], {}), "('ij,klj->kli', N, RGB)\n", (10970, 10993), True, 'import numpy as np\n'), ((12752, 12776), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (12759, 12776), True, 'import numpy as np\n'), ((13538, 13631), 'numpy.array', 'np.array', (['[(0.3811, 0.5783, 0.0402), (0.1967, 0.7244, 0.0782), (0.0241, 0.1228, 0.8444)]'], {}), '([(0.3811, 0.5783, 0.0402), (0.1967, 0.7244, 0.0782), (0.0241, \n 0.1228, 0.8444)])\n', (13546, 13631), True, 'import numpy as np\n'), ((13674, 13703), 'numpy.dstack', 'np.dstack', (['(Red, Green, Blue)'], {}), '((Red, Green, Blue))\n', (13683, 13703), True, 'import numpy as np\n'), ((13714, 13746), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'I', 'RGB'], {}), "('ij,klj->kli', I, RGB)\n", (13723, 13746), True, 'import numpy as np\n'), ((14589, 14609), 'numpy.dstack', 'np.dstack', (['(L, M, S)'], {}), '((L, M, S))\n', (14598, 14609), True, 'import numpy as np\n'), ((14620, 14652), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'I', 'LMS'], {}), "('ij,klj->kli', I, LMS)\n", (14629, 14652), True, 'import numpy as np\n'), ((1137, 1160), 'numpy.dstack', 'np.dstack', (['(Red, Green)'], {}), '((Red, Green))\n', (1146, 1160), True, 'import numpy as np\n'), ((1305, 1314), 'numpy.cos', 'np.cos', (['H'], {}), '(H)\n', (1311, 1314), True, 'import numpy as np\n'), ((4242, 4266), 'numpy.array', 'np.array', (['[16, 128, 128]'], {}), '([16, 128, 128])\n', (4250, 4266), True, 'import numpy as np\n'), ((5940, 5978), 'numpy.arctan2', 'np.arctan2', (['HSI[:, :, 1]', 'HSI[:, :, 2]'], {}), '(HSI[:, :, 1], HSI[:, :, 2])\n', (5950, 5978), True, 'import numpy as np\n'), ((6134, 6152), 'numpy.zeros_like', 'np.zeros_like', (['Hue'], {}), '(Hue)\n', (6147, 6152), True, 'import numpy as np\n'), ((6154, 6172), 'numpy.zeros_like', 'np.zeros_like', (['Hue'], {}), '(Hue)\n', (6167, 6172), True, 'import numpy as np\n'), ((6174, 6192), 'numpy.zeros_like', 'np.zeros_like', (['Hue'], {}), '(Hue)\n', (6187, 6192), True, 'import numpy as np\n'), ((9670, 9788), 'numpy.array', 'np.array', (['[(0.4124564, 0.3575761, 0.1804375), (0.2126729, 0.7151522, 0.072175), (\n 0.0193339, 0.119192, 0.9503041)]'], {}), '([(0.4124564, 0.3575761, 0.1804375), (0.2126729, 0.7151522, \n 0.072175), (0.0193339, 0.119192, 0.9503041)])\n', (9678, 9788), True, 'import numpy as np\n'), ((9852, 9945), 'numpy.array', 'np.array', (['[(0.5141, 0.3239, 0.1604), (0.2651, 0.6702, 0.0641), (0.0241, 0.1228, 0.8444)]'], {}), '([(0.5141, 0.3239, 0.1604), (0.2651, 0.6702, 0.0641), (0.0241, \n 0.1228, 0.8444)])\n', (9860, 9945), True, 'import numpy as np\n'), ((14468, 14520), 'numpy.array', 'np.array', (['[(+1, +1, +1), (+1, +1, -2), (+1, -1, +0)]'], {}), '([(+1, +1, +1), (+1, +1, -2), (+1, -1, +0)])\n', (14476, 14520), True, 'import numpy as np\n'), ((1225, 1235), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1232, 1235), True, 'import numpy as np\n'), ((1261, 1270), 'numpy.cos', 'np.cos', (['H'], {}), '(H)\n', (1267, 1270), True, 'import numpy as np\n'), ((1345, 1355), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1352, 1355), True, 'import numpy as np\n'), ((1356, 1365), 'numpy.sin', 'np.sin', (['H'], {}), '(H)\n', (1362, 1365), True, 'import numpy as np\n'), ((5737, 5747), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (5744, 5747), True, 'import numpy as np\n'), ((6264, 6278), 'numpy.radians', 'np.radians', (['(60)'], {}), '(60)\n', (6274, 6278), True, 'import numpy as np\n'), ((5668, 5678), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (5675, 5678), True, 'import numpy as np\n'), ((5683, 5693), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (5690, 5693), True, 'import numpy as np\n'), ((5698, 5708), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (5705, 5708), True, 'import numpy as np\n'), ((5752, 5762), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (5759, 5762), True, 'import numpy as np\n'), ((12836, 12852), 'numpy.arctan2', 'np.arctan2', (['b', 'a'], {}), '(b, a)\n', (12846, 12852), True, 'import numpy as np\n'), ((14329, 14339), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (14336, 14339), True, 'import numpy as np\n'), ((14382, 14392), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (14389, 14392), True, 'import numpy as np\n'), ((14435, 14445), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14442, 14445), True, 'import numpy as np\n')]
|
import sys
from os.path import exists
from unittest.mock import patch
import numpy as np # type: ignore
import pytest
from despace.spatial_sort import SortND
sys.path.append("..")
coords_1d = np.array([1.0, 0.1, 1.5, -0.3, 0.0])
sorted_coords_1d = np.array([-0.3, 0.0, 0.1, 1.0, 1.5])
coords_2d = np.array(
[[1.0, 0.1, 1.5, -0.3, 0.0], [1.5, 0.2, 1.3, -0.1, 0.7]]
).transpose()
sorted_coords_2d = np.array(
[[-0.3, -0.1], [0.0, 0.7], [0.1, 0.2], [1.0, 1.5], [1.5, 1.3]]
)
coords_3d = np.array(
[[1.2, 0.0, 1.7, -0.4, 0.1], [1.4, 0.9, 1.0, -0.6, 0.3], [2.0, 0.0, 1.4, -0.2, 0.2]]
).transpose()
sorted_coords_3d = np.array(
[
[-0.4, -0.6, -0.2],
[0.0, 0.9, 0.0],
[0.1, 0.3, 0.2],
[1.7, 1.0, 1.4],
[1.2, 1.4, 2.0],
]
)
grid_16 = np.array([[i, j] for i in range(4) for j in range(4)])
morton_grid_16 = np.array(
[
[0, 0],
[0, 1],
[1, 0],
[1, 1],
[0, 2],
[0, 3],
[1, 2],
[1, 3],
[2, 0],
[2, 1],
[3, 0],
[3, 1],
[2, 2],
[2, 3],
[3, 2],
[3, 3],
]
)
hilbert_grid_16 = np.array(
[
[0, 0],
[1, 0],
[1, 1],
[0, 1],
[0, 2],
[0, 3],
[1, 3],
[1, 2],
[2, 2],
[2, 3],
[3, 3],
[3, 2],
[3, 1],
[2, 1],
[2, 0],
[3, 0],
]
)
def test_sort():
# Init and call the sort method
t = SortND(coords_1d)
assert np.array_equal(t.sort(), sorted_coords_1d)
t = SortND(coords_2d)
assert np.array_equal(t.sort(), sorted_coords_2d)
t = SortND(coords_3d)
assert np.array_equal(t.sort(), sorted_coords_3d)
with pytest.raises(ValueError):
SortND(np.random.rand(2, 2, 2))
# init and directly call
s = SortND()
assert np.array_equal(s(coords_1d), sorted_coords_1d)
assert np.array_equal(s(coords_2d), sorted_coords_2d)
assert np.array_equal(s(coords_3d), sorted_coords_3d)
with pytest.raises(ValueError):
s(np.random.rand(2, 2, 2))
# test Morton
s = SortND(sort_type="Morton")
assert np.array_equal(s(grid_16), morton_grid_16)
# test Hilbert
s = SortND(sort_type="Hilbert")
assert np.array_equal(s(grid_16), hilbert_grid_16)
with pytest.raises(NotImplementedError):
s(np.random.rand(5, 3))
@patch("matplotlib.pyplot.show")
def test_plot(mock_show):
s = SortND()
# show plots
s(coords_1d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
s(coords_2d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
s(coords_3d)
assert s.plot(save_plot=False)
assert s.plot(save_plot=False, show_plot=True)
# save plots
s(coords_1d)
s.plot(save_plot=True)
assert exists("1D_5.png")
s.plot(save_plot=True, file_name="test_1d.png")
assert exists("test_1d.png")
s(coords_2d)
s.plot(save_plot=True)
assert exists("2D_5.png")
s.plot(save_plot=True, file_name="test_2d.png")
assert exists("test_2d.png")
s(coords_3d)
s.plot(save_plot=True)
assert exists("3D_5.png")
s.plot(save_plot=True, file_name="test_3d.png")
assert exists("test_3d.png")
|
[
"os.path.exists",
"numpy.random.rand",
"numpy.array",
"pytest.raises",
"unittest.mock.patch",
"sys.path.append",
"despace.spatial_sort.SortND"
] |
[((162, 183), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (177, 183), False, 'import sys\n'), ((197, 233), 'numpy.array', 'np.array', (['[1.0, 0.1, 1.5, -0.3, 0.0]'], {}), '([1.0, 0.1, 1.5, -0.3, 0.0])\n', (205, 233), True, 'import numpy as np\n'), ((253, 289), 'numpy.array', 'np.array', (['[-0.3, 0.0, 0.1, 1.0, 1.5]'], {}), '([-0.3, 0.0, 0.1, 1.0, 1.5])\n', (261, 289), True, 'import numpy as np\n'), ((407, 479), 'numpy.array', 'np.array', (['[[-0.3, -0.1], [0.0, 0.7], [0.1, 0.2], [1.0, 1.5], [1.5, 1.3]]'], {}), '([[-0.3, -0.1], [0.0, 0.7], [0.1, 0.2], [1.0, 1.5], [1.5, 1.3]])\n', (415, 479), True, 'import numpy as np\n'), ((631, 734), 'numpy.array', 'np.array', (['[[-0.4, -0.6, -0.2], [0.0, 0.9, 0.0], [0.1, 0.3, 0.2], [1.7, 1.0, 1.4], [\n 1.2, 1.4, 2.0]]'], {}), '([[-0.4, -0.6, -0.2], [0.0, 0.9, 0.0], [0.1, 0.3, 0.2], [1.7, 1.0, \n 1.4], [1.2, 1.4, 2.0]])\n', (639, 734), True, 'import numpy as np\n'), ((866, 1009), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 0], [2,\n 1], [3, 0], [3, 1], [2, 2], [2, 3], [3, 2], [3, 3]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1], [0, 2], [0, 3], [1, 2], [1, 3], [\n 2, 0], [2, 1], [3, 0], [3, 1], [2, 2], [2, 3], [3, 2], [3, 3]])\n', (874, 1009), True, 'import numpy as np\n'), ((1164, 1307), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 1], [0, 1], [0, 2], [0, 3], [1, 3], [1, 2], [2, 2], [2,\n 3], [3, 3], [3, 2], [3, 1], [2, 1], [2, 0], [3, 0]]'], {}), '([[0, 0], [1, 0], [1, 1], [0, 1], [0, 2], [0, 3], [1, 3], [1, 2], [\n 2, 2], [2, 3], [3, 3], [3, 2], [3, 1], [2, 1], [2, 0], [3, 0]])\n', (1172, 1307), True, 'import numpy as np\n'), ((2406, 2437), 'unittest.mock.patch', 'patch', (['"""matplotlib.pyplot.show"""'], {}), "('matplotlib.pyplot.show')\n", (2411, 2437), False, 'from unittest.mock import patch\n'), ((1507, 1524), 'despace.spatial_sort.SortND', 'SortND', (['coords_1d'], {}), '(coords_1d)\n', (1513, 1524), False, 'from despace.spatial_sort import SortND\n'), ((1587, 1604), 'despace.spatial_sort.SortND', 'SortND', (['coords_2d'], {}), '(coords_2d)\n', (1593, 1604), False, 'from despace.spatial_sort import SortND\n'), ((1667, 1684), 'despace.spatial_sort.SortND', 'SortND', (['coords_3d'], {}), '(coords_3d)\n', (1673, 1684), False, 'from despace.spatial_sort import SortND\n'), ((1853, 1861), 'despace.spatial_sort.SortND', 'SortND', ([], {}), '()\n', (1859, 1861), False, 'from despace.spatial_sort import SortND\n'), ((2134, 2160), 'despace.spatial_sort.SortND', 'SortND', ([], {'sort_type': '"""Morton"""'}), "(sort_type='Morton')\n", (2140, 2160), False, 'from despace.spatial_sort import SortND\n'), ((2243, 2270), 'despace.spatial_sort.SortND', 'SortND', ([], {'sort_type': '"""Hilbert"""'}), "(sort_type='Hilbert')\n", (2249, 2270), False, 'from despace.spatial_sort import SortND\n'), ((2472, 2480), 'despace.spatial_sort.SortND', 'SortND', ([], {}), '()\n', (2478, 2480), False, 'from despace.spatial_sort import SortND\n'), ((2881, 2899), 'os.path.exists', 'exists', (['"""1D_5.png"""'], {}), "('1D_5.png')\n", (2887, 2899), False, 'from os.path import exists\n'), ((2963, 2984), 'os.path.exists', 'exists', (['"""test_1d.png"""'], {}), "('test_1d.png')\n", (2969, 2984), False, 'from os.path import exists\n'), ((3040, 3058), 'os.path.exists', 'exists', (['"""2D_5.png"""'], {}), "('2D_5.png')\n", (3046, 3058), False, 'from os.path import exists\n'), ((3122, 3143), 'os.path.exists', 'exists', (['"""test_2d.png"""'], {}), "('test_2d.png')\n", (3128, 3143), False, 'from os.path import exists\n'), ((3199, 3217), 'os.path.exists', 'exists', (['"""3D_5.png"""'], {}), "('3D_5.png')\n", (3205, 3217), False, 'from os.path import exists\n'), ((3281, 3302), 'os.path.exists', 'exists', (['"""test_3d.png"""'], {}), "('test_3d.png')\n", (3287, 3302), False, 'from os.path import exists\n'), ((303, 369), 'numpy.array', 'np.array', (['[[1.0, 0.1, 1.5, -0.3, 0.0], [1.5, 0.2, 1.3, -0.1, 0.7]]'], {}), '([[1.0, 0.1, 1.5, -0.3, 0.0], [1.5, 0.2, 1.3, -0.1, 0.7]])\n', (311, 369), True, 'import numpy as np\n'), ((499, 597), 'numpy.array', 'np.array', (['[[1.2, 0.0, 1.7, -0.4, 0.1], [1.4, 0.9, 1.0, -0.6, 0.3], [2.0, 0.0, 1.4, -\n 0.2, 0.2]]'], {}), '([[1.2, 0.0, 1.7, -0.4, 0.1], [1.4, 0.9, 1.0, -0.6, 0.3], [2.0, 0.0,\n 1.4, -0.2, 0.2]])\n', (507, 597), True, 'import numpy as np\n'), ((1748, 1773), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1761, 1773), False, 'import pytest\n'), ((2045, 2070), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2058, 2070), False, 'import pytest\n'), ((2335, 2369), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2348, 2369), False, 'import pytest\n'), ((1790, 1813), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1804, 1813), True, 'import numpy as np\n'), ((2082, 2105), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2096, 2105), True, 'import numpy as np\n'), ((2381, 2401), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)'], {}), '(5, 3)\n', (2395, 2401), True, 'import numpy as np\n')]
|
import numpy as np
def main():
from fuzzi.evaluation import pate_train
from fuzzi.generated import pate_label
predictions = pate_label.outputs
truth = [x[-1] for x in pate_label.db_test]
print(predictions)
print(truth)
print('PATE accuracy = %f' % (np.mean(predictions == truth)))
|
[
"numpy.mean"
] |
[((279, 308), 'numpy.mean', 'np.mean', (['(predictions == truth)'], {}), '(predictions == truth)\n', (286, 308), True, 'import numpy as np\n')]
|
"""
Test functions for regular module.
"""
import pytest
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.base import clone
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from adapt.parameter_based import (RegularTransferLR,
RegularTransferLC,
RegularTransferNN)
np.random.seed(0)
Xs = np.concatenate((
np.random.randn(50)*0.1,
np.random.randn(50)*0.1 + 1.,
)).reshape(-1, 1)
Xt = (np.random.randn(100) * 0.1).reshape(-1, 1)
ys_reg = np.array([0.2 * x if x<0.5 else
10 for x in Xs.ravel()]).reshape(-1, 1)
yt_reg = np.array([0.2 * x if x<0.5 else
10 for x in Xt.ravel()]).reshape(-1, 1)
ys_classif = np.sign(np.array(
[x<0 if x<0.5 else x<1 for x in Xs.ravel()]
).astype(float) - 0.5).reshape(-1, 1)
yt_classif = np.sign(np.array(
[x<0 if x<0.5 else x<1 for x in Xt.ravel()]
).astype(float) - 0.5).reshape(-1, 1)
def _get_network(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
input_shape=input_shape,
use_bias=False))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_setup():
lr = LinearRegression(fit_intercept=False)
lr.fit(Xs, ys_reg)
assert np.abs(lr.coef_[0][0] - 10) < 1
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys_classif)
assert (lr.predict(Xt) == yt_classif.ravel()).sum() < 70
def test_regularlr_fit():
np.random.seed(0)
lr = LinearRegression(fit_intercept=False)
lr.fit(Xs, ys_reg)
model = RegularTransferLR(lr, lambda_=0.)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 0.2) < 1
assert np.abs(model.predict(Xt) - yt_reg).sum() < 2
model = RegularTransferLR(lr, lambda_=1000000)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 10) < 1
assert np.abs(model.estimator_.coef_[0] - lr.coef_[0]) < 0.001
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 4) < 1
def test_regularlr_multioutput():
np.random.seed(0)
X = np.random.randn(100, 5)+2.
y = X[:, :2]
lr = LinearRegression()
lr.fit(X, y)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(X, y)
assert np.abs(model.predict(X) - y).sum() < 2
assert np.all(model.coef_.shape == (2, 5))
assert np.all(model.intercept_.shape == (2,))
assert model.score(X, y) > 0.9
def test_regularlr_error():
np.random.seed(0)
Xs = np.random.randn(100, 5)
Xt = np.random.randn(100, 5)
ys = np.random.randn(100)
yt = np.random.randn(100)
lr = LinearRegression()
lr.fit(Xs, ys)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xt, yt)
with pytest.raises(ValueError) as excinfo:
model.fit(np.random.randn(100, 4), yt)
assert "expected 5, got 4" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
model.fit(Xt, np.random.randn(100, 2))
assert "expected 1, got 2" in str(excinfo.value)
def test_regularlc_fit():
np.random.seed(0)
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys_classif)
model = RegularTransferLC(lr, lambda_=0)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() > 90
model = RegularTransferLC(lr, lambda_=100000000)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() < 70
assert np.abs(model.estimator_.coef_[0][0] - lr.coef_[0][0]) < 0.001
assert np.abs(model.estimator_.intercept_ - lr.intercept_[0]) < 0.001
model = RegularTransferLC(lr, lambda_=1.2)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() > 95
def test_regularlc_multiclass():
np.random.seed(0)
X = np.random.randn(100, 5)
y = np.zeros(len(X))
y[X[:, :2].sum(1)<0] = 1
y[X[:, 3:].sum(1)>0] = 2
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(X, y)
model = RegularTransferLC(lr, lambda_=1.)
model.fit(X, y)
assert (model.predict(X) == y).sum() > 90
assert np.all(model.coef_.shape == (3, 5))
assert np.all(model.intercept_.shape == (3,))
assert model.score(X, y) > 0.9
def test_regularnn_fit():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
model = RegularTransferNN(network, lambdas=0., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
# assert np.abs(network.predict(Xs) - ys_reg).sum() < 1
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) > 4.
assert np.abs(model.predict(Xt) - yt_reg).sum() < 10
model = RegularTransferNN(network, lambdas=10000000., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) < 0.001
assert np.abs(model.predict(Xt) - yt_reg).sum() > 10
def test_regularnn_reg():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
model = RegularTransferNN(network, regularizer="l1")
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
with pytest.raises(ValueError) as excinfo:
model = RegularTransferNN(network, regularizer="l3")
assert "l1' or 'l2', got, l3" in str(excinfo.value)
def test_clone():
Xs = np.random.randn(100, 5)
ys = np.random.choice(2, 100)
lr = LinearRegression()
lr.fit(Xs, ys)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xs, ys)
new_model = clone(model)
new_model.fit(Xs, ys)
new_model.predict(Xs);
assert model is not new_model
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys)
model = RegularTransferLC(lr, lambda_=1.)
model.fit(Xs, ys)
new_model = clone(model)
new_model.fit(Xs, ys)
new_model.predict(Xs);
assert model is not new_model
|
[
"numpy.abs",
"numpy.prod",
"adapt.parameter_based.RegularTransferLR",
"tensorflow.random.set_seed",
"tensorflow.keras.Sequential",
"adapt.parameter_based.RegularTransferNN",
"numpy.random.choice",
"sklearn.base.clone",
"sklearn.linear_model.LogisticRegression",
"tensorflow.keras.optimizers.Adam",
"pytest.raises",
"numpy.random.seed",
"adapt.parameter_based.RegularTransferLC",
"numpy.all",
"numpy.random.randn",
"sklearn.linear_model.LinearRegression"
] |
[((500, 517), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (514, 517), True, 'import numpy as np\n'), ((1173, 1185), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (1183, 1185), False, 'from tensorflow.keras import Sequential, Model\n'), ((1408, 1445), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (1424, 1445), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1526, 1576), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""', 'solver': '"""lbfgs"""'}), "(penalty='none', solver='lbfgs')\n", (1544, 1576), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1697, 1714), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1711, 1714), True, 'import numpy as np\n'), ((1724, 1761), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (1740, 1761), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1797, 1831), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(0.0)'}), '(lr, lambda_=0.0)\n', (1814, 1831), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((1985, 2023), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1000000)'}), '(lr, lambda_=1000000)\n', (2002, 2023), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((2188, 2222), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (2205, 2222), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((2349, 2366), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2363, 2366), True, 'import numpy as np\n'), ((2428, 2446), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2444, 2446), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((2476, 2510), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (2493, 2510), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((2591, 2626), 'numpy.all', 'np.all', (['(model.coef_.shape == (2, 5))'], {}), '(model.coef_.shape == (2, 5))\n', (2597, 2626), True, 'import numpy as np\n'), ((2638, 2676), 'numpy.all', 'np.all', (['(model.intercept_.shape == (2,))'], {}), '(model.intercept_.shape == (2,))\n', (2644, 2676), True, 'import numpy as np\n'), ((2754, 2771), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2768, 2771), True, 'import numpy as np\n'), ((2781, 2804), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (2796, 2804), True, 'import numpy as np\n'), ((2814, 2837), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (2829, 2837), True, 'import numpy as np\n'), ((2847, 2867), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (2862, 2867), True, 'import numpy as np\n'), ((2877, 2897), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (2892, 2897), True, 'import numpy as np\n'), ((2907, 2925), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2923, 2925), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((2957, 2991), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (2974, 2991), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((3351, 3368), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3365, 3368), True, 'import numpy as np\n'), ((3378, 3428), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""', 'solver': '"""lbfgs"""'}), "(penalty='none', solver='lbfgs')\n", (3396, 3428), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((3468, 3500), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(0)'}), '(lr, lambda_=0)\n', (3485, 3500), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((3612, 3652), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(100000000)'}), '(lr, lambda_=100000000)\n', (3629, 3652), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((3911, 3945), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(1.2)'}), '(lr, lambda_=1.2)\n', (3928, 3945), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((4087, 4104), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4101, 4104), True, 'import numpy as np\n'), ((4113, 4136), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (4128, 4136), True, 'import numpy as np\n'), ((4229, 4279), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""', 'solver': '"""lbfgs"""'}), "(penalty='none', solver='lbfgs')\n", (4247, 4279), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((4309, 4343), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (4326, 4343), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((4420, 4455), 'numpy.all', 'np.all', (['(model.coef_.shape == (3, 5))'], {}), '(model.coef_.shape == (3, 5))\n', (4426, 4455), True, 'import numpy as np\n'), ((4467, 4505), 'numpy.all', 'np.all', (['(model.intercept_.shape == (3,))'], {}), '(model.intercept_.shape == (3,))\n', (4473, 4505), True, 'import numpy as np\n'), ((4573, 4594), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (4591, 4594), True, 'import tensorflow as tf\n'), ((4599, 4616), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4613, 4616), True, 'import numpy as np\n'), ((5385, 5406), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (5403, 5406), True, 'import tensorflow as tf\n'), ((5411, 5428), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5425, 5428), True, 'import numpy as np\n'), ((5537, 5581), 'adapt.parameter_based.RegularTransferNN', 'RegularTransferNN', (['network'], {'regularizer': '"""l1"""'}), "(network, regularizer='l1')\n", (5554, 5581), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((5854, 5877), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (5869, 5877), True, 'import numpy as np\n'), ((5887, 5911), 'numpy.random.choice', 'np.random.choice', (['(2)', '(100)'], {}), '(2, 100)\n', (5903, 5911), True, 'import numpy as np\n'), ((5921, 5939), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5937, 5939), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((5971, 6005), 'adapt.parameter_based.RegularTransferLR', 'RegularTransferLR', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (5988, 6005), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((6048, 6060), 'sklearn.base.clone', 'clone', (['model'], {}), '(model)\n', (6053, 6060), False, 'from sklearn.base import clone\n'), ((6162, 6212), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""none"""', 'solver': '"""lbfgs"""'}), "(penalty='none', solver='lbfgs')\n", (6180, 6212), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((6244, 6278), 'adapt.parameter_based.RegularTransferLC', 'RegularTransferLC', (['lr'], {'lambda_': '(1.0)'}), '(lr, lambda_=1.0)\n', (6261, 6278), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((6321, 6333), 'sklearn.base.clone', 'clone', (['model'], {}), '(model)\n', (6326, 6333), False, 'from sklearn.base import clone\n'), ((1480, 1507), 'numpy.abs', 'np.abs', (['(lr.coef_[0][0] - 10)'], {}), '(lr.coef_[0][0] - 10)\n', (1486, 1507), True, 'import numpy as np\n'), ((1868, 1907), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0] - 0.2)'], {}), '(model.estimator_.coef_[0] - 0.2)\n', (1874, 1907), True, 'import numpy as np\n'), ((2061, 2099), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0] - 10)'], {}), '(model.estimator_.coef_[0] - 10)\n', (2067, 2099), True, 'import numpy as np\n'), ((2115, 2162), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0] - lr.coef_[0])'], {}), '(model.estimator_.coef_[0] - lr.coef_[0])\n', (2121, 2162), True, 'import numpy as np\n'), ((2259, 2296), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0] - 4)'], {}), '(model.estimator_.coef_[0] - 4)\n', (2265, 2296), True, 'import numpy as np\n'), ((2375, 2398), 'numpy.random.randn', 'np.random.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (2390, 2398), True, 'import numpy as np\n'), ((3027, 3052), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3040, 3052), False, 'import pytest\n'), ((3180, 3205), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3193, 3205), False, 'import pytest\n'), ((3758, 3811), 'numpy.abs', 'np.abs', (['(model.estimator_.coef_[0][0] - lr.coef_[0][0])'], {}), '(model.estimator_.coef_[0][0] - lr.coef_[0][0])\n', (3764, 3811), True, 'import numpy as np\n'), ((3831, 3885), 'numpy.abs', 'np.abs', (['(model.estimator_.intercept_ - lr.intercept_[0])'], {}), '(model.estimator_.intercept_ - lr.intercept_[0])\n', (3837, 3885), True, 'import numpy as np\n'), ((5661, 5686), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5674, 5686), False, 'import pytest\n'), ((5716, 5760), 'adapt.parameter_based.RegularTransferNN', 'RegularTransferNN', (['network'], {'regularizer': '"""l3"""'}), "(network, regularizer='l3')\n", (5733, 5760), False, 'from adapt.parameter_based import RegularTransferLR, RegularTransferLC, RegularTransferNN\n'), ((627, 647), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (642, 647), True, 'import numpy as np\n'), ((1206, 1227), 'numpy.prod', 'np.prod', (['output_shape'], {}), '(output_shape)\n', (1213, 1227), True, 'import numpy as np\n'), ((1351, 1360), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.1)'], {}), '(0.1)\n', (1355, 1360), False, 'from tensorflow.keras.optimizers import Adam\n'), ((3084, 3107), 'numpy.random.randn', 'np.random.randn', (['(100)', '(4)'], {}), '(100, 4)\n', (3099, 3107), True, 'import numpy as np\n'), ((3241, 3264), 'numpy.random.randn', 'np.random.randn', (['(100)', '(2)'], {}), '(100, 2)\n', (3256, 3264), True, 'import numpy as np\n'), ((4774, 4783), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.1)'], {}), '(0.1)\n', (4778, 4783), False, 'from tensorflow.keras.optimizers import Adam\n'), ((5122, 5131), 'tensorflow.keras.optimizers.Adam', 'Adam', (['(0.1)'], {}), '(0.1)\n', (5126, 5131), False, 'from tensorflow.keras.optimizers import Adam\n'), ((544, 563), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (559, 563), True, 'import numpy as np\n'), ((573, 592), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (588, 592), True, 'import numpy as np\n')]
|
'''
Created on 4 Feb 2022
@author: ucacsjj
'''
import random
from enum import Enum
import numpy as np
from gym import Env, spaces
from .robot_states_and_actions import *
# This environment affords a much lower level control of the robot than the
# battery environment. It is partially inspired by the AI Gymn Frozen Lake
# example.
class RecyclingRobotEnvironment(Env):
def __init__(self):
# The action space
self.action_space = spaces.Discrete(RobotActions.NUMBER_OF_ACTIONS)
self.observation_space = spaces.Discrete(RobotBatteryState.NUMBER_OF_STATES)
# Values
# Probability of discharging high => medium
self._alpha = 0.4
# Probability of discharging medium => low
self._beta = 0.1
# Probability of discharging low => discharged
self._gamma = 0.1
# Probability of charging up a level low => medium, medium => high
self._delta = 0.9
self._r_search = 10
self._r_wait = 5
self._r_charge = 0
self._r_discharged = -20
# State transition table. The dictionary consists of (s, a) values. The
# value is a tuple which is the conditional value of the probabilities of
# DISCHARGED, LOW, MEDIUM, HIGH, conditioned on s and a.
self._state_transition_table = {
# New state when a=SEARCH
(RobotBatteryState.HIGH, RobotActions.SEARCH) : \
(0, self._alpha / 3, 2 * self._alpha / 3, 1 - self._alpha),
(RobotBatteryState.MEDIUM, RobotActions.SEARCH) : \
(0, self._beta, 1 - self._beta, 0),
(RobotBatteryState.LOW, RobotActions.SEARCH) : \
(self._gamma, 1 - self._gamma, 0 , 0),
(RobotBatteryState.DISCHARGED, RobotActions.SEARCH) : \
(0, 0, 0, 0),
# a = WAIT
(RobotBatteryState.HIGH, RobotActions.WAIT) : \
(0, 0, 0, 1),
(RobotBatteryState.MEDIUM, RobotActions.WAIT) : \
(0, 0 ,1, 0),
(RobotBatteryState.LOW, RobotActions.WAIT) : \
(0, 1, 0, 0),
(RobotBatteryState.DISCHARGED, RobotActions.WAIT) : \
(0, 0, 0, 0),
# a = RECHARGE
(RobotBatteryState.HIGH, RobotActions.RECHARGE) : \
(0, 0, 0, 1),
(RobotBatteryState.MEDIUM, RobotActions.RECHARGE) : \
(0, 0, 1 - self._delta, self._delta),
(RobotBatteryState.LOW, RobotActions.RECHARGE) : \
(0, 1 - self._delta, self._delta, 0),
(RobotBatteryState.DISCHARGED, RobotActions.RECHARGE) : \
(0, 0, 0, 0)
}
# The rewards. In this case, they are only a function of the actions
# and not the state.
self._action_reward_table = {
RobotActions.SEARCH : self._r_search,
RobotActions.WAIT: self._r_wait,
RobotActions.RECHARGE: self._r_charge,
RobotActions.TERMINATE: self._r_discharged
}
# Reset to the initial state
self.reset()
# Reset the scenario to the initial state
def reset(self):
self._battery_state = RobotBatteryState.HIGH
# Reset the initial value function
def initial_value_function(self):
v_initial = np.zeros(RobotBatteryState.NUMBER_OF_STATES)
v_initial[RobotBatteryState.DISCHARGED] = self._r_discharged
return v_initial
# An initial random policy under consideration
def initial_policy(self):
pi_initial = {
RobotBatteryState.HIGH: (0, 1/3, 1/3, 1/3),
RobotBatteryState.MEDIUM: (0, 1/3, 1/3, 1/3),
RobotBatteryState.LOW: (0, 1/3, 1/3, 1/3)}
return pi_initial
def step(self, action):
# From the (s, a) pair, get the appropriate row in the table
transition_key = (self._battery_state, action)
# Sanity check
assert transition_key in self._state_transition_table
# Get the state transition probabilities and rewards
p = self._state_transition_table[transition_key]
r = self._reward_table[transition_key]
print(str(self._battery_state) + ":" + str(p) + str(r))
# Work out the state transition
sample = random.random()
done = False
# Probability of transitioning to high state
if sample < p[0]:
self._battery_state = RobotBatteryState.HIGH
reward = r[0]
elif sample < p[0] + p[1]:
self._battery_state = RobotBatteryState.MEDIUM
reward = r[1]
elif sample < p[0] + p[1] + p[2]:
self._battery_state = RobotBatteryState.LOW
reward = r[2]
if sample < p[0] + p[1] + p[2] + p[3]:
self._battery_state = RobotBatteryState.DISCHARGED
reward = r[3]
done = True
return self._battery_state, reward, done, {}
# Return the state, reward and probability distributions
def next_state_and_reward_distribution(self, state, action):
# From the (s, a) pair, get the appropriate row in the table
transition_key = (state, action)
# Sanity check
#print(transition_key)
assert transition_key in self._state_transition_table
s_prime = [RobotBatteryState.DISCHARGED, RobotBatteryState.LOW, \
RobotBatteryState.MEDIUM, RobotBatteryState.HIGH]
# Get the state transition probabilities and rewards
p = self._state_transition_table[transition_key]
#r = self._reward_table[transition_key]
r = self._action_reward_table[action]
return s_prime, r, p
|
[
"random.random",
"numpy.zeros",
"gym.spaces.Discrete"
] |
[((457, 504), 'gym.spaces.Discrete', 'spaces.Discrete', (['RobotActions.NUMBER_OF_ACTIONS'], {}), '(RobotActions.NUMBER_OF_ACTIONS)\n', (472, 504), False, 'from gym import Env, spaces\n'), ((547, 598), 'gym.spaces.Discrete', 'spaces.Discrete', (['RobotBatteryState.NUMBER_OF_STATES'], {}), '(RobotBatteryState.NUMBER_OF_STATES)\n', (562, 598), False, 'from gym import Env, spaces\n'), ((3664, 3708), 'numpy.zeros', 'np.zeros', (['RobotBatteryState.NUMBER_OF_STATES'], {}), '(RobotBatteryState.NUMBER_OF_STATES)\n', (3672, 3708), True, 'import numpy as np\n'), ((4706, 4721), 'random.random', 'random.random', ([], {}), '()\n', (4719, 4721), False, 'import random\n')]
|
# coding=utf-8
# National Oceanic and Atmospheric Administration (NOAA)
# Alaskan Fisheries Science Center (AFSC)
# Resource Assessment and Conservation Engineering (RACE)
# Midwater Assessment and Conservation Engineering (MACE)
# THIS SOFTWARE AND ITS DOCUMENTATION ARE CONSIDERED TO BE IN THE PUBLIC DOMAIN
# AND THUS ARE AVAILABLE FOR UNRESTRICTED PUBLIC USE. THEY ARE FURNISHED "AS IS."
# THE AUTHORS, THE UNITED STATES GOVERNMENT, ITS INSTRUMENTALITIES, OFFICERS,
# EMPLOYEES, AND AGENTS MAKE NO WARRANTY, EXPRESS OR IMPLIED, AS TO THE USEFULNESS
# OF THE SOFTWARE AND DOCUMENTATION FOR ANY PURPOSE. THEY ASSUME NO RESPONSIBILITY
# (1) FOR THE USE OF THE SOFTWARE AND DOCUMENTATION; OR (2) TO PROVIDE TECHNICAL
# SUPPORT TO USERS.
"""
| Developed by: <NAME> <<EMAIL>>
| National Oceanic and Atmospheric Administration (NOAA)
| Alaska Fisheries Science Center (AFSC)
| Midwater Assesment and Conservation Engineering Group (MACE)
|
| Author:
| <NAME> <<EMAIL>>
| Maintained by:
| <NAME> <<EMAIL>>
"""
import numpy as np
class bottom_data(object):
'''
The bottom_data class stores data from TAG0 datagrams in Simrad raw files.
It may be useful if other sonar file types have a similar annotation
'''
CHUNK_SIZE = 500
def __init__(self, channel_id):
# Create a counter to keep track of the number of datagrams.
self.n_datagrams = 0
# set the channel ID
self.channel_id = channel_id
# Create arrays to store MRU0 data
self.times = np.empty(bottom_data.CHUNK_SIZE, dtype='datetime64[ms]')
self.annotation_text = np.empty(bottom_data.CHUNK_SIZE, dtype=object)
def add_datagram(self, time, annotation_datagram):
"""
Add annotation text
Args:
annotation_datagram (dict) - The motion datagram dictionary returned by
the simrad datagram parser.
"""
# Check if we need to resize our arrays.
if self.n_datagrams == self.annotation_times.shape[0]:
self._resize_arrays(self.annotation_times.shape[0] + annotation_data.CHUNK_SIZE)
# Add this datagram to our data arrays
self.annotation_times[self.n_datagrams] = annotation_datagram['timestamp']
self.annotation_text[self.n_datagrams] = annotation_datagram['text']
# Increment datagram counter.
self.n_datagrams += 1
def interpolate(self, p_data, data_type, start_time=None, end_time=None):
"""
interpolate returns the requested motion data interpolated to the ping times
that are present in the provided ping_data object.
p_data is a ping_data object that contains the ping_time vector
to interpolate to.
data_type is a string pecifying the motion attribute to interpolate, valid
values are: 'pitch', 'heave', 'roll', and 'heading'
start_time is a datetime or datetime64 object defining the starting time of the data
to return. If None, the start time is the earliest time.
end_time is a datetime or datetime64 object defining the ending time of the data
to return. If None, the end time is the latest time.
attributes is a string or list of strings specifying the motion attribute(s)
to interpolate and return. If None, all attributes are interpolated
and returned.
Returns a dictionary of numpy arrays keyed by attribute name that contain the
interpolated data for that attribute.
"""
# Create the dictionary to return
out_data = {}
# Return an empty dict if we don't contain any data
if self.n_datagrams < 1:
return out_data
# Get the index for all datagrams within the time span.
return_idxs = self.get_indices(start_time=start_time, end_time=end_time)
# Check if we're been given specific attributes to interpolate
if data_type is None:
# No - interpolate all
attributes = ['heave', 'pitch', 'roll', 'heading']
elif isinstance(data_type, str):
# We have a string, put it in a list
attributes = [data_type]
# Work through the attributes and interpolate
for attribute in attributes:
try:
# Interpolate this attribute using the time vector in the
# provided ping_data object
i_data = np.interp(p_data.ping_time.astype('d'),
self.time.astype('d'), getattr(self, attribute),
left=np.nan, right=np.nan)
out_data[attribute] = i_data[return_idxs]
except:
# Provided attribute doesn't exist
out_data[attribute] = None
return (attributes, out_data)
def get_indices(self, start_time=None, end_time=None, time_order=True):
"""
Return index of data contained in speciofied time range.
get_indices returns an index array containing the indices contained
in the range defined by the times provided. By default the indexes
are in time order.
Args:
start_time is a datetime or datetime64 object defining the starting
time of the data to return. If None, the start time is the
earliest time.
end_time is a datetime or datetime64 object defining the ending time
of the data to return. If None, the end time is the latest time.
time_order (bool): Control whether if indexes are returned in time
order (True) or not.
Returns: Index array containing indices of data to return.
"""
# Ensure that we have times to work with.
if start_time is None:
start_time = np.min(self.time)
if end_time is None:
end_time = np.max(self.time)
# Sort time index if returning time ordered indexes.
if time_order:
primary_index = self.time.argsort()
else:
primary_index = self.time
# Determine the indices of the data that fall within the time span
# provided.
mask = self.time[primary_index] >= start_time
mask = np.logical_and(mask, self.time[primary_index] <= end_time)
# and return the indices that are included in the specified range
return primary_index[mask]
def _resize_arrays(self, new_size):
"""
Resize arrays if needed to hold more data.
_resize_arrays expands our data arrays and is called when said arrays
are filled with data and more data need to be added.
Args:
new_size (int): New size for arrays, Since these are all 1d
arrays the value is simply an integer.
"""
self.time = np.resize(self.time,(new_size))
self.pitch = np.resize(self.pitch,(new_size))
self.roll = np.resize(self.roll,(new_size))
self.heading = np.resize(self.heading,(new_size))
self.heave = np.resize(self.heave,(new_size))
def trim(self):
"""
Trim arrays to proper size after all data are added.
trim is called when one is done adding data to the object. It
removes empty elements of the data arrays.
"""
self._resize_arrays(self.n_datagrams)
def __str__(self):
"""
Reimplemented string method that provides some basic info about the
nmea_data object.
"""
# print the class and address
msg = str(self.__class__) + " at " + str(hex(id(self))) + "\n"
# print some more info about the motion_data instance
if (self.n_datagrams > 0):
msg = "{0} MRU data start time: {1}\n".format(msg, self.time[0])
msg = "{0} MRU data end time: {1}\n".format(msg,self.time[self.n_datagrams-1])
msg = "{0} Number of datagrams: {1}\n".format(msg,self.n_datagrams+1)
else:
msg = msg + (" simrad_motion_data object contains no data\n")
return msg
|
[
"numpy.logical_and",
"numpy.max",
"numpy.resize",
"numpy.empty",
"numpy.min"
] |
[((1558, 1614), 'numpy.empty', 'np.empty', (['bottom_data.CHUNK_SIZE'], {'dtype': '"""datetime64[ms]"""'}), "(bottom_data.CHUNK_SIZE, dtype='datetime64[ms]')\n", (1566, 1614), True, 'import numpy as np\n'), ((1646, 1692), 'numpy.empty', 'np.empty', (['bottom_data.CHUNK_SIZE'], {'dtype': 'object'}), '(bottom_data.CHUNK_SIZE, dtype=object)\n', (1654, 1692), True, 'import numpy as np\n'), ((6366, 6424), 'numpy.logical_and', 'np.logical_and', (['mask', '(self.time[primary_index] <= end_time)'], {}), '(mask, self.time[primary_index] <= end_time)\n', (6380, 6424), True, 'import numpy as np\n'), ((6953, 6983), 'numpy.resize', 'np.resize', (['self.time', 'new_size'], {}), '(self.time, new_size)\n', (6962, 6983), True, 'import numpy as np\n'), ((7006, 7037), 'numpy.resize', 'np.resize', (['self.pitch', 'new_size'], {}), '(self.pitch, new_size)\n', (7015, 7037), True, 'import numpy as np\n'), ((7059, 7089), 'numpy.resize', 'np.resize', (['self.roll', 'new_size'], {}), '(self.roll, new_size)\n', (7068, 7089), True, 'import numpy as np\n'), ((7114, 7147), 'numpy.resize', 'np.resize', (['self.heading', 'new_size'], {}), '(self.heading, new_size)\n', (7123, 7147), True, 'import numpy as np\n'), ((7170, 7201), 'numpy.resize', 'np.resize', (['self.heave', 'new_size'], {}), '(self.heave, new_size)\n', (7179, 7201), True, 'import numpy as np\n'), ((5928, 5945), 'numpy.min', 'np.min', (['self.time'], {}), '(self.time)\n', (5934, 5945), True, 'import numpy as np\n'), ((5998, 6015), 'numpy.max', 'np.max', (['self.time'], {}), '(self.time)\n', (6004, 6015), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
a = np.arange(15)
out = a.reshape(5, 3)
c = np.arange(15) / 2
y_onehot = c.reshape(5, 3)
out_tensor = tf.convert_to_tensor(out, dtype=tf.float32)
y_onehot_tensor = tf.convert_to_tensor(y_onehot, dtype=tf.float32)
# y_onehot = tf.one_hot(y_onehot_tensor, depth=3) # one-hot编码
loss1 = tf.square(out_tensor - y_onehot_tensor)
loss2 = tf.reduce_sum(loss1) / 32
pass
|
[
"tensorflow.convert_to_tensor",
"tensorflow.reduce_sum",
"numpy.arange",
"tensorflow.square"
] |
[((48, 61), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (57, 61), True, 'import numpy as np\n'), ((148, 191), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['out'], {'dtype': 'tf.float32'}), '(out, dtype=tf.float32)\n', (168, 191), True, 'import tensorflow as tf\n'), ((210, 258), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['y_onehot'], {'dtype': 'tf.float32'}), '(y_onehot, dtype=tf.float32)\n', (230, 258), True, 'import tensorflow as tf\n'), ((331, 370), 'tensorflow.square', 'tf.square', (['(out_tensor - y_onehot_tensor)'], {}), '(out_tensor - y_onehot_tensor)\n', (340, 370), True, 'import tensorflow as tf\n'), ((89, 102), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (98, 102), True, 'import numpy as np\n'), ((379, 399), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss1'], {}), '(loss1)\n', (392, 399), True, 'import tensorflow as tf\n')]
|
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Author: <NAME> (<EMAIL>)
# Date: 05/15/2019
#
import os
import numpy as np
import math
import sys
from torch.utils.data import Sampler
__all__=['BatchSampler', 'DistributedBatchSampler', 'RandomSampler', 'SequentialSampler']
class BatchSampler(Sampler):
def __init__(self, sampler, batch_size):
self.sampler = sampler
self.batch_size = batch_size
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch)==self.batch_size:
yield batch
batch = []
if len(batch)>0:
yield batch
def __len__(self):
return (len(self.sampler) + self.batch_size - 1)//self.batch_size
class DistributedBatchSampler(Sampler):
def __init__(self, sampler, rank=0, world_size = 1, drop_last = False):
self.sampler = sampler
self.rank = rank
self.world_size = world_size
self.drop_last = drop_last
def __iter__(self):
for b in self.sampler:
if len(b)%self.world_size != 0:
if self.drop_last:
break
else:
b.extend([b[0] for _ in range(self.world_size-len(b)%self.world_size)])
chunk_size = len(b)//self.world_size
yield b[self.rank*chunk_size:(self.rank+1)*chunk_size]
def __len__(self):
return len(self.sampler)
class RandomSampler(Sampler):
def __init__(self, total_samples:int, data_seed:int = 0):
self.indices = np.array(np.arange(total_samples))
self.rng = np.random.RandomState(data_seed)
def __iter__(self):
self.rng.shuffle(self.indices)
for i in self.indices:
yield i
def __len__(self):
return len(self.indices)
class SequentialSampler(Sampler):
def __init__(self, total_samples:int):
self.indices = np.array(np.arange(total_samples))
def __iter__(self):
for i in self.indices:
yield i
def __len__(self):
return len(self.indices)
|
[
"numpy.random.RandomState",
"numpy.arange"
] |
[((1557, 1589), 'numpy.random.RandomState', 'np.random.RandomState', (['data_seed'], {}), '(data_seed)\n', (1578, 1589), True, 'import numpy as np\n'), ((1516, 1540), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (1525, 1540), True, 'import numpy as np\n'), ((1844, 1868), 'numpy.arange', 'np.arange', (['total_samples'], {}), '(total_samples)\n', (1853, 1868), True, 'import numpy as np\n')]
|
# Blurring and Sharpening Images
# Import Computer Vision package - cv2
import cv2
# Import Numerical Python package - numpy as np
import numpy as np
# Read the image using imread built-in function
image = cv2.imread('image_6.jpg')
# Display original image using imshow built-in function
cv2.imshow("Original", image)
# Wait until any key is pressed
cv2.waitKey(0)
# Blurring images: Averaging, cv2.blur built-in function
# Averaging: Convolving image with normalized box filter
# Convolution: Mathematical operation on 2 functions which produces third function.
# Normalized box filter having size 3 x 3 would be:
# (1/9) [[1, 1, 1],
# [1, 1, 1],
# [1, 1, 1]]
blur = cv2.blur(image,(9,9)) # (9 x 9) filter is used
# Display blurred image
cv2.imshow('Blurred', blur)
# Wait until any key is pressed
cv2.waitKey(0)
# Sharpening images: Emphasizes edges in an image
kernel = np.array([[-1,-1,-1],
[-1,9,-1],
[-1,-1,-1]])
# If we don't normalize to 1, image would be brighter or darker respectively
# cv2.filter2D is the built-in function used for sharpening images
# cv2.filter2D(image, ddepth, kernel)
sharpened = cv2.filter2D(image, -1, kernel)
# ddepth = -1, sharpened images will have same depth as original image
# Display sharpenend image
cv2.imshow('Sharpened', sharpened)
# Wait untill any key is pressed
cv2.waitKey(0)
# Close all windows
cv2.destroyAllWindows()
|
[
"cv2.filter2D",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.blur",
"cv2.imread"
] |
[((218, 243), 'cv2.imread', 'cv2.imread', (['"""image_6.jpg"""'], {}), "('image_6.jpg')\n", (228, 243), False, 'import cv2\n'), ((304, 333), 'cv2.imshow', 'cv2.imshow', (['"""Original"""', 'image'], {}), "('Original', image)\n", (314, 333), False, 'import cv2\n'), ((370, 384), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (381, 384), False, 'import cv2\n'), ((704, 727), 'cv2.blur', 'cv2.blur', (['image', '(9, 9)'], {}), '(image, (9, 9))\n', (712, 727), False, 'import cv2\n'), ((780, 807), 'cv2.imshow', 'cv2.imshow', (['"""Blurred"""', 'blur'], {}), "('Blurred', blur)\n", (790, 807), False, 'import cv2\n'), ((844, 858), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (855, 858), False, 'import cv2\n'), ((924, 975), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n', (932, 975), True, 'import numpy as np\n'), ((1242, 1273), 'cv2.filter2D', 'cv2.filter2D', (['image', '(-1)', 'kernel'], {}), '(image, -1, kernel)\n', (1254, 1273), False, 'import cv2\n'), ((1377, 1411), 'cv2.imshow', 'cv2.imshow', (['"""Sharpened"""', 'sharpened'], {}), "('Sharpened', sharpened)\n", (1387, 1411), False, 'import cv2\n'), ((1449, 1463), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1460, 1463), False, 'import cv2\n'), ((1488, 1511), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1509, 1511), False, 'import cv2\n')]
|
import os.path
import scipy.io as sio
import numpy as np # for algebraic operations, matrices
import keras.models
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout # , Layer, Flatten
# from keras.layers import containers
from keras.models import model_from_json,Model
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from hyperas.distributions import choice, uniform, conditional
from hyperopt import Trials, STATUS_OK
from sklearn.metrics import confusion_matrix
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D as pool2
from keras.callbacks import EarlyStopping,ModelCheckpoint
# from keras.layers.convolutional import ZeroPadding2D as zero2d
from keras.regularizers import l2 # , activity_l2
# from theano import functionfrom keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from keras.optimizers import SGD
from keras.layers.merge import concatenate
from keras.layers import Input,add
from keras.layers.advanced_activations import PReLU,ELU
from keras.layers.pooling import GlobalAveragePooling2D
#temp/Inception-ResNet for 180180
def create180180Model(patchSize):
seed=5
np.random.seed(seed)
input=Input(shape=(1,patchSize[0, 0], patchSize[0, 1]))
out1=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='valid',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(input)
out2=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='valid',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out1)
out2=pool2(pool_size=(2,2),data_format='channels_first')(out2)
out3=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out2)
out4=Conv2D(filters=64,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out3)
out4=add([out2,out4])
out4=pool2(pool_size=(2,2),data_format='channels_first')(out4)
out_3=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out4)
out_4=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_3)
out5_1=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_2=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_2=Conv2D(filters=128,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_2)
out5_3=Conv2D(filters=32,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out_4)
out5_3=Conv2D(filters=128,kernel_size=(5,5),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_3)
out5_4=pool2(pool_size=(3,3),strides=(1,1),padding='same',data_format='channels_first')(out_4)
out5_4=Conv2D(filters=128,kernel_size=(1,1),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),activation='relu')(out5_4)
out5=concatenate(inputs=[out5_1,out5_2,out5_3],axis=1)
out7=Conv2D(filters=288,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out5)
out7=add([out5, out7])
out7=pool2(pool_size=(2,2),data_format='channels_first')(out7)
sout7=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out7)
out8=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out7)
out9=Conv2D(filters=256,kernel_size=(3,3),kernel_initializer='he_normal',weights=None,padding='same',strides=(1, 1),kernel_regularizer=l2(1e-6),
activation='relu')(out8)
out9=add([sout7, out9])
out9=pool2(pool_size=(2,2),data_format='channels_first')(out9)
out10=Flatten()(out9)
out11=Dense(units=11,
kernel_initializer='normal',
kernel_regularizer='l2',
activation='softmax')(out10)
cnn = Model(inputs=input,outputs=out11)
return cnn
def fTrain(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSizes=None, learningRates=None, iEpochs=None):
# grid search on batch_sizes and learning rates
# parse inputs
batchSizes = 64 if batchSizes is None else batchSizes
learningRates = 0.01 if learningRates is None else learningRates
iEpochs = 300 if iEpochs is None else iEpochs
for iBatch in batchSizes:
for iLearn in learningRates:
fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, iBatch, iLearn, iEpochs)
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None, iEpochs=None):
# parse inputs
batchSize = 64 if batchSize is None else batchSize
learningRate = 0.01 if learningRate is None else learningRate
iEpochs = 300 if iEpochs is None else iEpochs
print('Training CNN InceptionNet')
print('with lr = ' + str(learningRate) + ' , batchSize = ' + str(batchSize))
# save names
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_lr_' + str(
learningRate) + '_bs_' + str(batchSize)
weight_name = model_name + '_weights.h5'
model_json = model_name + '_json'
model_all = model_name + '_model.h5'
model_mat = model_name + '.mat'
if (os.path.isfile(model_mat)): # no training if output file exists
return
# create model
if (patchSize[0,0]!=180 & patchSize[0,1]!=180):
print('NO model for patch size ' + patchSize[0, 0] + patchSize[0, 0])
else:
cnn = create180180Model(patchSize)
# opti = SGD(lr=learningRate, momentum=1e-8, decay=0.1, nesterov=True);#Adag(lr=0.01, epsilon=1e-06)
opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
callbacks = [EarlyStopping(monitor='val_loss', patience=20, verbose=1), ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
#callbacks = [ModelCheckpoint(filepath=model_name+'bestweights.hdf5',monitor='val_acc',verbose=0,save_best_only=True,save_weights_only=False)]
cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
cnn.summary()
result = cnn.fit(X_train,
y_train,
validation_data=[X_test, y_test],
epochs=iEpochs,
batch_size=batchSize,
callbacks=callbacks,
verbose=1)
score_test, acc_test = cnn.evaluate(X_test, y_test, batch_size=batchSize )
prob_test = cnn.predict(X_test, batchSize, 0)
y_pred=np.argmax(prob_test,axis=1)
y_test=np.argmax(y_test,axis=1)
confusion_mat=confusion_matrix(y_test,y_pred)
# save model
json_string = cnn.to_json()
open(model_json, 'w').write(json_string)
# wei = cnn.get_weights()
cnn.save_weights(weight_name, overwrite=True)
# cnn.save(model_all) # keras > v0.7
# matlab
acc = result.history['acc']
loss = result.history['loss']
val_acc = result.history['val_acc']
val_loss = result.history['val_loss']
print('Saving results: ' + model_name)
sio.savemat(model_name, {'model_settings': model_json,
'model': model_all,
'weights': weight_name,
'acc': acc,
'loss': loss,
'val_acc': val_acc,
'val_loss': val_loss,
'score_test': score_test,
'acc_test': acc_test,
'prob_test': prob_test,
'confusion_mat':confusion_mat})
def fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):
weight_name = model_name[0]
#model_json = model_name[1] + '_json'
#model_all = model_name[0] + '.hdf5'
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sOutPath)
#sFilename, sExt = os.path.splitext(sFilename)
#f = h5py.File(weight_name, 'r+')
#del f['optimizer_weights']
#f.close()
model=load_model(weight_name)
opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=1)]
#model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])
#model.load_weights(weight_name)
model.summary();
score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)
prob_pre = model.predict(X_test, batchSize, 0)
y_pred=np.argmax(prob_pre,axis=1)
y_test=np.argmax(y_test,axis=1)
confusion_mat=confusion_matrix(y_test,y_pred)
# modelSave = model_name[:-5] + '_pred.mat'
modelSave = sOutPath + '/' + sFilename + '_result.mat'
sio.savemat(modelSave, {'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test, 'confusion_mat':confusion_mat})
|
[
"keras.layers.core.Flatten",
"scipy.io.savemat",
"keras.callbacks.ModelCheckpoint",
"keras.layers.merge.concatenate",
"keras.layers.add",
"numpy.argmax",
"keras.layers.Input",
"keras.layers.core.Dense",
"keras.regularizers.l2",
"keras.models.Model",
"numpy.random.seed",
"keras.layers.convolutional.MaxPooling2D",
"keras.callbacks.EarlyStopping",
"sklearn.metrics.confusion_matrix"
] |
[((1422, 1442), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1436, 1442), True, 'import numpy as np\n'), ((1450, 1500), 'keras.layers.Input', 'Input', ([], {'shape': '(1, patchSize[0, 0], patchSize[0, 1])'}), '(shape=(1, patchSize[0, 0], patchSize[0, 1]))\n', (1455, 1500), False, 'from keras.layers import Input, add\n'), ((2250, 2267), 'keras.layers.add', 'add', (['[out2, out4]'], {}), '([out2, out4])\n', (2253, 2267), False, 'from keras.layers import Input, add\n'), ((3820, 3872), 'keras.layers.merge.concatenate', 'concatenate', ([], {'inputs': '[out5_1, out5_2, out5_3]', 'axis': '(1)'}), '(inputs=[out5_1, out5_2, out5_3], axis=1)\n', (3831, 3872), False, 'from keras.layers.merge import concatenate\n'), ((4061, 4078), 'keras.layers.add', 'add', (['[out5, out7]'], {}), '([out5, out7])\n', (4064, 4078), False, 'from keras.layers import Input, add\n'), ((4703, 4721), 'keras.layers.add', 'add', (['[sout7, out9]'], {}), '([sout7, out9])\n', (4706, 4721), False, 'from keras.layers import Input, add\n'), ((4966, 5000), 'keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'out11'}), '(inputs=input, outputs=out11)\n', (4971, 5000), False, 'from keras.models import model_from_json, Model\n'), ((7656, 7684), 'numpy.argmax', 'np.argmax', (['prob_test'], {'axis': '(1)'}), '(prob_test, axis=1)\n', (7665, 7684), True, 'import numpy as np\n'), ((7692, 7717), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (7701, 7717), True, 'import numpy as np\n'), ((7732, 7764), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (7748, 7764), False, 'from sklearn.metrics import confusion_matrix\n'), ((8151, 8434), 'scipy.io.savemat', 'sio.savemat', (['model_name', "{'model_settings': model_json, 'model': model_all, 'weights': weight_name,\n 'acc': acc, 'loss': loss, 'val_acc': val_acc, 'val_loss': val_loss,\n 'score_test': score_test, 'acc_test': acc_test, 'prob_test': prob_test,\n 'confusion_mat': confusion_mat}"], {}), "(model_name, {'model_settings': model_json, 'model': model_all,\n 'weights': weight_name, 'acc': acc, 'loss': loss, 'val_acc': val_acc,\n 'val_loss': val_loss, 'score_test': score_test, 'acc_test': acc_test,\n 'prob_test': prob_test, 'confusion_mat': confusion_mat})\n", (8162, 8434), True, 'import scipy.io as sio\n'), ((9546, 9573), 'numpy.argmax', 'np.argmax', (['prob_pre'], {'axis': '(1)'}), '(prob_pre, axis=1)\n', (9555, 9573), True, 'import numpy as np\n'), ((9581, 9606), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (9590, 9606), True, 'import numpy as np\n'), ((9621, 9653), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (9637, 9653), False, 'from sklearn.metrics import confusion_matrix\n'), ((9755, 9885), 'scipy.io.savemat', 'sio.savemat', (['modelSave', "{'prob_pre': prob_pre, 'score_test': score_test, 'acc_test': acc_test,\n 'confusion_mat': confusion_mat}"], {}), "(modelSave, {'prob_pre': prob_pre, 'score_test': score_test,\n 'acc_test': acc_test, 'confusion_mat': confusion_mat})\n", (9766, 9885), True, 'import scipy.io as sio\n'), ((1847, 1900), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), "(pool_size=(2, 2), data_format='channels_first')\n", (1852, 1900), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((2273, 2326), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), "(pool_size=(2, 2), data_format='channels_first')\n", (2278, 2326), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((3551, 3641), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""', 'data_format': '"""channels_first"""'}), "(pool_size=(3, 3), strides=(1, 1), padding='same', data_format=\n 'channels_first')\n", (3556, 3641), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((4085, 4138), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), "(pool_size=(2, 2), data_format='channels_first')\n", (4090, 4138), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((4729, 4782), 'keras.layers.convolutional.MaxPooling2D', 'pool2', ([], {'pool_size': '(2, 2)', 'data_format': '"""channels_first"""'}), "(pool_size=(2, 2), data_format='channels_first')\n", (4734, 4782), True, 'from keras.layers.convolutional import MaxPooling2D as pool2\n'), ((4795, 4804), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (4802, 4804), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((4820, 4915), 'keras.layers.core.Dense', 'Dense', ([], {'units': '(11)', 'kernel_initializer': '"""normal"""', 'kernel_regularizer': '"""l2"""', 'activation': '"""softmax"""'}), "(units=11, kernel_initializer='normal', kernel_regularizer='l2',\n activation='softmax')\n", (4825, 4915), False, 'from keras.layers.core import Dense, Activation, Flatten, Dropout\n'), ((6843, 6900), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(20)', 'verbose': '(1)'}), "(monitor='val_loss', patience=20, verbose=1)\n", (6856, 6900), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((6902, 7039), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(model_name + 'bestweights.hdf5')", 'monitor': '"""val_acc"""', 'verbose': '(0)', 'save_best_only': '(True)', 'save_weights_only': '(False)'}), "(filepath=model_name + 'bestweights.hdf5', monitor='val_acc',\n verbose=0, save_best_only=True, save_weights_only=False)\n", (6917, 7039), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((9212, 9269), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(10)', 'verbose': '(1)'}), "(monitor='val_loss', patience=10, verbose=1)\n", (9225, 9269), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((1636, 1645), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (1638, 1645), False, 'from keras.regularizers import l2\n'), ((1807, 1816), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (1809, 1816), False, 'from keras.regularizers import l2\n'), ((2041, 2050), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2043, 2050), False, 'from keras.regularizers import l2\n'), ((2210, 2219), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2212, 2219), False, 'from keras.regularizers import l2\n'), ((2469, 2478), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2471, 2478), False, 'from keras.regularizers import l2\n'), ((2640, 2649), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2642, 2649), False, 'from keras.regularizers import l2\n'), ((2813, 2822), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2815, 2822), False, 'from keras.regularizers import l2\n'), ((2986, 2995), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (2988, 2995), False, 'from keras.regularizers import l2\n'), ((3159, 3168), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (3161, 3168), False, 'from keras.regularizers import l2\n'), ((3333, 3342), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (3335, 3342), False, 'from keras.regularizers import l2\n'), ((3506, 3515), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (3508, 3515), False, 'from keras.regularizers import l2\n'), ((3777, 3786), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (3779, 3786), False, 'from keras.regularizers import l2\n'), ((4007, 4016), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (4009, 4016), False, 'from keras.regularizers import l2\n'), ((4280, 4289), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (4282, 4289), False, 'from keras.regularizers import l2\n'), ((4465, 4474), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (4467, 4474), False, 'from keras.regularizers import l2\n'), ((4649, 4658), 'keras.regularizers.l2', 'l2', (['(1e-06)'], {}), '(1e-06)\n', (4651, 4658), False, 'from keras.regularizers import l2\n')]
|
import numpy as np
import random
import matplotlib.pyplot as plt
from load_data import loadLabel,loadImage
def der_activation_function(x,type):
if type==1:
return 1 - np.power(np.tanh(x), 2)
elif type==2:
return (1/(1+np.exp(-x)))*(1-1/(1+np.exp(-x)))
else:
x[x<=0]=0.25
x[x>0]=1
return x
def activation_function(x,type):
if type==1:
return np.tanh(x)
elif type==2:
return 1/(1+np.exp(-x))
else:
return np.where(x<=0,0.25*x,x)
def MLP_train(data,labels,hidden_nodes,epoch,test_data,test_labels):
alpha=0.002
size=data.shape
w1=np.zeros((hidden_nodes,size[1]))
for i in range(hidden_nodes):
for j in range(size[1]):
w1[i,j]=random.uniform(-0.4,0.4)
w2=np.zeros((10,hidden_nodes))
for i in range(10):
for j in range(hidden_nodes):
w2[i,j]=random.uniform(-0.4,0.4)
b1=np.zeros(hidden_nodes)
b2=np.zeros(10)
for i in range(epoch):
for x,y in zip(data,labels):
u=np.dot(w1,x.T)+b1
h=activation_function(u,3)
v=np.dot(w2,h)+b2
output=activation_function(v,3)
delta2=(output-y.T)*der_activation_function(v,3)
delta1=der_activation_function(u,3)*np.dot(w2.T,delta2)
d_w1=np.dot(np.expand_dims(delta1,axis=1),np.expand_dims(x,axis=0))
d_w2=np.dot(np.expand_dims(delta2,axis=1),np.expand_dims(h,axis=0))
w1=w1-alpha*d_w1
w2=w2-alpha*d_w2
b1=b1-alpha*delta1
b2=b2-alpha*delta2
u_test=np.dot(w1,test_data.T)+np.expand_dims(b1,axis=1)
h_test=activation_function(u_test,3)
v_test=np.dot(w2,h_test)+np.expand_dims(b2,axis=1)
output_test=activation_function(v_test.T,3)
right_times=0
for i in range(len(output_test)):
if np.argmax(output_test[i])==np.argmax(test_labels[i]):
right_times+=1
accuracy=right_times/len(output_test)
print(accuracy)
if __name__=='__main__':
train_imgs=loadImage("train-images-idx3-ubyte")
train_labels=loadLabel("train-labels-idx1-ubyte")
test_imgs=loadImage("t10k-images-idx3-ubyte")
random.seed(2)
test_labels=loadLabel("t10k-labels-idx1-ubyte")
# MLP_train(train_imgs,train_labels,25,15,test_imgs,test_labels)
for nodes in range(30,60,10):
print('activation function: PReLU')
print(nodes,"hidden nodes:")
MLP_train(train_imgs, train_labels, nodes, 30, test_imgs, test_labels)
|
[
"random.uniform",
"numpy.where",
"load_data.loadImage",
"numpy.tanh",
"random.seed",
"numpy.argmax",
"numpy.exp",
"numpy.zeros",
"numpy.dot",
"numpy.expand_dims",
"load_data.loadLabel"
] |
[((630, 663), 'numpy.zeros', 'np.zeros', (['(hidden_nodes, size[1])'], {}), '((hidden_nodes, size[1]))\n', (638, 663), True, 'import numpy as np\n'), ((782, 810), 'numpy.zeros', 'np.zeros', (['(10, hidden_nodes)'], {}), '((10, hidden_nodes))\n', (790, 810), True, 'import numpy as np\n'), ((924, 946), 'numpy.zeros', 'np.zeros', (['hidden_nodes'], {}), '(hidden_nodes)\n', (932, 946), True, 'import numpy as np\n'), ((954, 966), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (962, 966), True, 'import numpy as np\n'), ((2043, 2079), 'load_data.loadImage', 'loadImage', (['"""train-images-idx3-ubyte"""'], {}), "('train-images-idx3-ubyte')\n", (2052, 2079), False, 'from load_data import loadLabel, loadImage\n'), ((2097, 2133), 'load_data.loadLabel', 'loadLabel', (['"""train-labels-idx1-ubyte"""'], {}), "('train-labels-idx1-ubyte')\n", (2106, 2133), False, 'from load_data import loadLabel, loadImage\n'), ((2148, 2183), 'load_data.loadImage', 'loadImage', (['"""t10k-images-idx3-ubyte"""'], {}), "('t10k-images-idx3-ubyte')\n", (2157, 2183), False, 'from load_data import loadLabel, loadImage\n'), ((2188, 2202), 'random.seed', 'random.seed', (['(2)'], {}), '(2)\n', (2199, 2202), False, 'import random\n'), ((2219, 2254), 'load_data.loadLabel', 'loadLabel', (['"""t10k-labels-idx1-ubyte"""'], {}), "('t10k-labels-idx1-ubyte')\n", (2228, 2254), False, 'from load_data import loadLabel, loadImage\n'), ((407, 417), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (414, 417), True, 'import numpy as np\n'), ((1598, 1621), 'numpy.dot', 'np.dot', (['w1', 'test_data.T'], {}), '(w1, test_data.T)\n', (1604, 1621), True, 'import numpy as np\n'), ((1621, 1647), 'numpy.expand_dims', 'np.expand_dims', (['b1'], {'axis': '(1)'}), '(b1, axis=1)\n', (1635, 1647), True, 'import numpy as np\n'), ((1699, 1717), 'numpy.dot', 'np.dot', (['w2', 'h_test'], {}), '(w2, h_test)\n', (1705, 1717), True, 'import numpy as np\n'), ((1717, 1743), 'numpy.expand_dims', 'np.expand_dims', (['b2'], {'axis': '(1)'}), '(b2, axis=1)\n', (1731, 1743), True, 'import numpy as np\n'), ((493, 522), 'numpy.where', 'np.where', (['(x <= 0)', '(0.25 * x)', 'x'], {}), '(x <= 0, 0.25 * x, x)\n', (501, 522), True, 'import numpy as np\n'), ((750, 775), 'random.uniform', 'random.uniform', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (764, 775), False, 'import random\n'), ((892, 917), 'random.uniform', 'random.uniform', (['(-0.4)', '(0.4)'], {}), '(-0.4, 0.4)\n', (906, 917), False, 'import random\n'), ((1858, 1883), 'numpy.argmax', 'np.argmax', (['output_test[i]'], {}), '(output_test[i])\n', (1867, 1883), True, 'import numpy as np\n'), ((1885, 1910), 'numpy.argmax', 'np.argmax', (['test_labels[i]'], {}), '(test_labels[i])\n', (1894, 1910), True, 'import numpy as np\n'), ((189, 199), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (196, 199), True, 'import numpy as np\n'), ((1045, 1060), 'numpy.dot', 'np.dot', (['w1', 'x.T'], {}), '(w1, x.T)\n', (1051, 1060), True, 'import numpy as np\n'), ((1116, 1129), 'numpy.dot', 'np.dot', (['w2', 'h'], {}), '(w2, h)\n', (1122, 1129), True, 'import numpy as np\n'), ((1286, 1306), 'numpy.dot', 'np.dot', (['w2.T', 'delta2'], {}), '(w2.T, delta2)\n', (1292, 1306), True, 'import numpy as np\n'), ((1330, 1360), 'numpy.expand_dims', 'np.expand_dims', (['delta1'], {'axis': '(1)'}), '(delta1, axis=1)\n', (1344, 1360), True, 'import numpy as np\n'), ((1360, 1385), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1374, 1385), True, 'import numpy as np\n'), ((1410, 1440), 'numpy.expand_dims', 'np.expand_dims', (['delta2'], {'axis': '(1)'}), '(delta2, axis=1)\n', (1424, 1440), True, 'import numpy as np\n'), ((1440, 1465), 'numpy.expand_dims', 'np.expand_dims', (['h'], {'axis': '(0)'}), '(h, axis=0)\n', (1454, 1465), True, 'import numpy as np\n'), ((456, 466), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (462, 466), True, 'import numpy as np\n'), ((243, 253), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (249, 253), True, 'import numpy as np\n'), ((264, 274), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (270, 274), True, 'import numpy as np\n')]
|
from utils import (load_data, data_to_series_features,
apply_weight, is_minimum)
from algorithm import (initialize_weights, individual_to_key,
pop_to_weights, select, reconstruct_population)
from sklearn.metrics import mean_squared_error, mean_absolute_error
from tensorflow.keras import optimizers
from tensorflow.keras.models import clone_model
import argparse
import math
import numpy as np
from model import make_model
from copy import copy
from sklearn.model_selection import train_test_split
def parse_arguments():
# argument parsing
parser = argparse.ArgumentParser(description="Specify Params for Experimental Setting")
parser.add_argument('--iterations', type=int, default=20,
help="Specify the number of evolution iterations")
parser.add_argument('--batch_size', type=int, default=256,
help="Specify batch size")
parser.add_argument('--initial_epochs', type=int, default=100,
help="Specify the number of epochs for initial training")
parser.add_argument('--num_epochs', type=int, default=20,
help="Specify the number of epochs for competitive search")
parser.add_argument('--log_step', type=int, default=100,
help="Specify log step size for training")
parser.add_argument('--learning_rate', type=float, default=1e-3,
help="Learning rate")
parser.add_argument('--data', type=str, default='pollution.csv',
help="Path to the dataset")
parser.add_argument('--pop_size', type=int, default=36)
parser.add_argument('--code_length', type=int, default=6)
parser.add_argument('--n_select', type=int, default=6)
parser.add_argument('--time_steps', type=int, default=18)
parser.add_argument('--n_hidden', type=int, default=128)
parser.add_argument('--n_output', type=int, default=1)
parser.add_argument('--max_grad_norm', type=float, default=1.0)
return parser.parse_args()
def main():
args = parse_arguments()
data, y_scaler = load_data(args.data)
args.n_features = np.size(data, axis=-1)
X, y = data_to_series_features(data, args.time_steps)
train_X, X, train_y, y = train_test_split(X, y, test_size=0.3)
valid_X, test_X, valid_y, test_y = train_test_split(X, y, test_size=0.5)
optimizer = optimizers.Adam(learning_rate=args.learning_rate, clipnorm=args.max_grad_norm)
best_model = make_model(args)
best_weight = [1.0] * args.time_steps
best_model.compile(loss='mse', optimizer=optimizer)
print("Initial training before competitive random search")
best_model.fit(apply_weight(train_X, best_weight), train_y, epochs=args.initial_epochs,
validation_data=(apply_weight(valid_X, best_weight), valid_y), shuffle=True)
print("\nInitial training is done. Start competitive random search.\n")
pop, weights = initialize_weights(args.pop_size, args.time_steps, args.code_length)
key_to_rmse = {}
for iteration in range(args.iterations):
for enum, (indiv, weight) in enumerate(zip(pop, weights)):
print('iteration: [%d/%d] indiv_no: [%d/%d]' % (iteration + 1, args.iterations, enum + 1, args.pop_size))
key = individual_to_key(indiv)
if key not in key_to_rmse.keys():
model = make_model(args)
model.compile(loss='mse', optimizer=optimizer)
model.set_weights(best_model.get_weights())
model.fit(apply_weight(train_X, weight), train_y, epochs=args.num_epochs,
validation_data=(apply_weight(valid_X, weight), valid_y), shuffle=True)
pred_y = model.predict(apply_weight(valid_X, weight))
inv_pred_y = y_scaler.inverse_transform(pred_y)
inv_valid_y = y_scaler.inverse_transform(np.expand_dims(valid_y, axis=1))
rmse = math.sqrt(mean_squared_error(inv_valid_y, inv_pred_y))
mae = mean_absolute_error(inv_valid_y, inv_pred_y)
print("RMSE: %.4f, MAE: %.4f" % (rmse, mae))
if is_minimum(rmse, key_to_rmse):
best_model.set_weights(model.get_weights())
best_weight = copy(weight)
key_to_rmse[key] = rmse
pop_selected, fitness_selected = select(pop, args.n_select, key_to_rmse)
pop = reconstruct_population(pop_selected, args.pop_size)
weights = pop_to_weights(pop, args.time_steps, args.code_length)
print('test evaluation:')
pred_y = best_model.predict(apply_weight(test_X, best_weight))
inv_pred_y = y_scaler.inverse_transform(pred_y)
inv_test_y = y_scaler.inverse_transform(np.expand_dims(test_y, axis=1))
rmse = math.sqrt(mean_squared_error(inv_test_y, inv_pred_y))
mae = mean_absolute_error(inv_test_y, inv_pred_y)
print("RMSE: %.4f, MAE: %.4f" % (rmse, mae))
if __name__ == '__main__':
main()
|
[
"utils.data_to_series_features",
"utils.is_minimum",
"utils.load_data",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"algorithm.initialize_weights",
"numpy.size",
"algorithm.individual_to_key",
"algorithm.pop_to_weights",
"model.make_model",
"algorithm.select",
"tensorflow.keras.optimizers.Adam",
"sklearn.metrics.mean_squared_error",
"copy.copy",
"numpy.expand_dims",
"utils.apply_weight",
"sklearn.metrics.mean_absolute_error",
"algorithm.reconstruct_population"
] |
[((601, 679), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Specify Params for Experimental Setting"""'}), "(description='Specify Params for Experimental Setting')\n", (624, 679), False, 'import argparse\n'), ((2118, 2138), 'utils.load_data', 'load_data', (['args.data'], {}), '(args.data)\n', (2127, 2138), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((2161, 2183), 'numpy.size', 'np.size', (['data'], {'axis': '(-1)'}), '(data, axis=-1)\n', (2168, 2183), True, 'import numpy as np\n'), ((2195, 2241), 'utils.data_to_series_features', 'data_to_series_features', (['data', 'args.time_steps'], {}), '(data, args.time_steps)\n', (2218, 2241), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((2271, 2308), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (2287, 2308), False, 'from sklearn.model_selection import train_test_split\n'), ((2348, 2385), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.5)'}), '(X, y, test_size=0.5)\n', (2364, 2385), False, 'from sklearn.model_selection import train_test_split\n'), ((2403, 2481), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'learning_rate': 'args.learning_rate', 'clipnorm': 'args.max_grad_norm'}), '(learning_rate=args.learning_rate, clipnorm=args.max_grad_norm)\n', (2418, 2481), False, 'from tensorflow.keras import optimizers\n'), ((2499, 2515), 'model.make_model', 'make_model', (['args'], {}), '(args)\n', (2509, 2515), False, 'from model import make_model\n'), ((2961, 3029), 'algorithm.initialize_weights', 'initialize_weights', (['args.pop_size', 'args.time_steps', 'args.code_length'], {}), '(args.pop_size, args.time_steps, args.code_length)\n', (2979, 3029), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((4875, 4918), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['inv_test_y', 'inv_pred_y'], {}), '(inv_test_y, inv_pred_y)\n', (4894, 4918), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((2696, 2730), 'utils.apply_weight', 'apply_weight', (['train_X', 'best_weight'], {}), '(train_X, best_weight)\n', (2708, 2730), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((4395, 4434), 'algorithm.select', 'select', (['pop', 'args.n_select', 'key_to_rmse'], {}), '(pop, args.n_select, key_to_rmse)\n', (4401, 4434), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((4449, 4500), 'algorithm.reconstruct_population', 'reconstruct_population', (['pop_selected', 'args.pop_size'], {}), '(pop_selected, args.pop_size)\n', (4471, 4500), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((4519, 4573), 'algorithm.pop_to_weights', 'pop_to_weights', (['pop', 'args.time_steps', 'args.code_length'], {}), '(pop, args.time_steps, args.code_length)\n', (4533, 4573), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((4637, 4670), 'utils.apply_weight', 'apply_weight', (['test_X', 'best_weight'], {}), '(test_X, best_weight)\n', (4649, 4670), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((4768, 4798), 'numpy.expand_dims', 'np.expand_dims', (['test_y'], {'axis': '(1)'}), '(test_y, axis=1)\n', (4782, 4798), True, 'import numpy as np\n'), ((4821, 4863), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['inv_test_y', 'inv_pred_y'], {}), '(inv_test_y, inv_pred_y)\n', (4839, 4863), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((3299, 3323), 'algorithm.individual_to_key', 'individual_to_key', (['indiv'], {}), '(indiv)\n', (3316, 3323), False, 'from algorithm import initialize_weights, individual_to_key, pop_to_weights, select, reconstruct_population\n'), ((2805, 2839), 'utils.apply_weight', 'apply_weight', (['valid_X', 'best_weight'], {}), '(valid_X, best_weight)\n', (2817, 2839), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((3394, 3410), 'model.make_model', 'make_model', (['args'], {}), '(args)\n', (3404, 3410), False, 'from model import make_model\n'), ((4046, 4090), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['inv_valid_y', 'inv_pred_y'], {}), '(inv_valid_y, inv_pred_y)\n', (4065, 4090), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((4171, 4200), 'utils.is_minimum', 'is_minimum', (['rmse', 'key_to_rmse'], {}), '(rmse, key_to_rmse)\n', (4181, 4200), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((3560, 3589), 'utils.apply_weight', 'apply_weight', (['train_X', 'weight'], {}), '(train_X, weight)\n', (3572, 3589), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((3761, 3790), 'utils.apply_weight', 'apply_weight', (['valid_X', 'weight'], {}), '(valid_X, weight)\n', (3773, 3790), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n'), ((3913, 3944), 'numpy.expand_dims', 'np.expand_dims', (['valid_y'], {'axis': '(1)'}), '(valid_y, axis=1)\n', (3927, 3944), True, 'import numpy as np\n'), ((3979, 4022), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['inv_valid_y', 'inv_pred_y'], {}), '(inv_valid_y, inv_pred_y)\n', (3997, 4022), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((4300, 4312), 'copy.copy', 'copy', (['weight'], {}), '(weight)\n', (4304, 4312), False, 'from copy import copy\n'), ((3667, 3696), 'utils.apply_weight', 'apply_weight', (['valid_X', 'weight'], {}), '(valid_X, weight)\n', (3679, 3696), False, 'from utils import load_data, data_to_series_features, apply_weight, is_minimum\n')]
|
"""
Oxford Flowers dataset loader
1020 samples spanning 102 classes (avg 10 per class)
http://www.robots.ox.ac.uk/~vgg/data/flowers
"""
from __future__ import print_function
import numpy as np
from PIL import Image, ImageFile
from os.path import join
import os
import scipy.io
import tarfile
import shutil
from torch.utils.data.dataset import Dataset
from torchvision.datasets.utils import download_url
class OxfordFlowersDataset(Dataset):
# setup some class paths
sub_root_dir = 'OxfordFlowers'
download_url_prefix = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102'
images_dir = 'jpg'
def __init__(self,
root_dir,
split='train',
transform=None,
target_transform=None,
force_download=False,
categories_subset=None):
"""
:param root_dir: (string) the directory where the dataset will be stored
:param split: (string) 'train', 'trainval', 'val' or 'test'
:param transform: how to transform the input
:param target_transform: how to transform the target
:param force_download: (boolean) force a new download of the dataset
:param categories_subset: (iterable) specify a subset of categories to build this set from
"""
super(OxfordFlowersDataset, self).__init__()
# set instance variables
self.root_dir = join(os.path.expanduser(root_dir), self.sub_root_dir)
self.split = split
self.transform = transform
self.target_transform = target_transform
self.labels = []
# check if data exists, if not download
self.download(force=force_download)
# load the data samples for this split
self.data, self.labels, self.categories = self.load_data_split(categories_subset=categories_subset)
self.samples = list(zip(self.data, self.labels))
self.n_categories = len(np.unique(self.labels))
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
# get the data sample
sample_data, sample_target = self.samples[index]
# load the image
x = self.load_img(join(join(self.root_dir, self.images_dir), "image_%05d.jpg" % (sample_data+1)))
y = sample_target
# perform the transforms
if self.transform:
x = self.transform(x)
if self.target_transform:
y = self.target_transform(y)
return x, y
def download(self, force=False):
# check for existence, if so return
if os.path.exists(join(self.root_dir, 'jpg')) and os.path.exists(join(self.root_dir, 'imagelabels.mat'))\
and os.path.exists(join(self.root_dir, 'setid.mat')):
if not force and len(os.listdir(join(self.root_dir, 'jpg'))) == 8189:
print('Files already downloaded and verified')
return
else:
shutil.rmtree(self.root_dir)
# make the dirs and start the downloads
os.makedirs(self.root_dir, exist_ok=True)
filename = '102flowers'
tar_filename = filename + '.tgz'
url = join(self.download_url_prefix, tar_filename)
download_url(url, self.root_dir, tar_filename, None)
with tarfile.open(join(self.root_dir, tar_filename), 'r') as tar_file:
tar_file.extractall(self.root_dir)
os.remove(join(self.root_dir, tar_filename))
filename = 'imagelabels.mat'
url = join(self.download_url_prefix, filename)
download_url(url, self.root_dir, filename, None)
filename = 'setid.mat'
url = join(self.download_url_prefix, filename)
download_url(url, self.root_dir, filename, None)
def load_data_split(self, categories_subset=None):
# assert we can do this split
assert self.split in ['train', 'val', 'test']
# load all the samples and their labels
all_samples = scipy.io.loadmat(join(self.root_dir, 'setid.mat'))
all_categories = scipy.io.loadmat(join(self.root_dir, 'imagelabels.mat'))['labels']
# keep only the split samples and categories
if self.split == 'train':
split_samples = all_samples['trnid']
elif self.split == 'val':
split_samples = all_samples['valid']
elif self.split == 'test':
split_samples = all_samples['tstid']
split_samples = list(split_samples[0]-1) # index at 0 not 1
split_categories = list(all_categories[0][split_samples])
# lets now add if they are in the category_subset iterable
data = []
categories = []
for index in range(len(split_samples)):
category = split_categories[index]
if categories_subset:
if category in categories_subset:
data.append(split_samples[index])
categories.append(split_categories[index])
else: # categories_subset is None so add all
data.append(split_samples[index])
categories.append(split_categories[index])
# Build categories to labels (cats can be names, labels are ints starting from 0)
self.categories_to_labels = {}
self.labels_to_categories = {}
for c in categories:
if c not in self.categories_to_labels:
self.categories_to_labels[c] = len(self.categories_to_labels)
self.labels_to_categories[self.categories_to_labels[c]] = c
# Build labels list corresponding to each sample
labels = []
for c in categories:
labels.append(self.categories_to_labels[c])
# set the data, categories and labels used in this dataset
# (initially ordered with self.samples and not unique, careful with access post shuffling)
self.categories = categories
self.labels = labels
self.data = data
return data, labels, categories
@staticmethod
def load_img(path):
# todo either turn image to tensor in transform or do here
# Load the image
ImageFile.LOAD_TRUNCATED_IMAGES = True
image = Image.open(path)#.convert('RGB')
return image
def stats(self):
# get the stats to print
counts = self.class_counts()
return "%d samples spanning %d classes (avg %d per class)" % \
(len(self.samples), len(counts), int(float(len(self.samples))/float(len(counts))))
def class_counts(self):
# calculate the number of samples per category
counts = {}
for index in range(len(self.samples)):
sample_data, sample_target = self.samples[index]
if sample_target not in counts:
counts[sample_target] = 1
else:
counts[sample_target] += 1
return counts
if __name__ == "__main__":
# use this for debugging and checks
from utils.debug import set_working_dir
from config.config import config
import matplotlib.pyplot as plt
# set the working directory as appropriate
set_working_dir()
# load the dataset
dataset = OxfordFlowersDataset(root_dir=config.dataset.root_dir)
# print the stats
print(dataset.stats())
# lets plot some samples
fig = plt.figure()
for i in range(len(dataset)):
sample = dataset[i]
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample %d - Class %d' % (i, dataset.labels_to_categories[sample[1]])) # convert label to categ.
ax.axis('off')
plt.imshow(sample[0]) # todo when tensor will need to convert tensor to img
if i == 3:
plt.show()
break
|
[
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"numpy.unique",
"os.makedirs",
"os.path.join",
"torchvision.datasets.utils.download_url",
"utils.debug.set_working_dir",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"shutil.rmtree",
"matplotlib.pyplot.subplot",
"os.path.expanduser",
"matplotlib.pyplot.show"
] |
[((7147, 7164), 'utils.debug.set_working_dir', 'set_working_dir', ([], {}), '()\n', (7162, 7164), False, 'from utils.debug import set_working_dir\n'), ((7348, 7360), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7358, 7360), True, 'import matplotlib.pyplot as plt\n'), ((3059, 3100), 'os.makedirs', 'os.makedirs', (['self.root_dir'], {'exist_ok': '(True)'}), '(self.root_dir, exist_ok=True)\n', (3070, 3100), False, 'import os\n'), ((3188, 3232), 'os.path.join', 'join', (['self.download_url_prefix', 'tar_filename'], {}), '(self.download_url_prefix, tar_filename)\n', (3192, 3232), False, 'from os.path import join\n'), ((3241, 3293), 'torchvision.datasets.utils.download_url', 'download_url', (['url', 'self.root_dir', 'tar_filename', 'None'], {}), '(url, self.root_dir, tar_filename, None)\n', (3253, 3293), False, 'from torchvision.datasets.utils import download_url\n'), ((3525, 3565), 'os.path.join', 'join', (['self.download_url_prefix', 'filename'], {}), '(self.download_url_prefix, filename)\n', (3529, 3565), False, 'from os.path import join\n'), ((3574, 3622), 'torchvision.datasets.utils.download_url', 'download_url', (['url', 'self.root_dir', 'filename', 'None'], {}), '(url, self.root_dir, filename, None)\n', (3586, 3622), False, 'from torchvision.datasets.utils import download_url\n'), ((3669, 3709), 'os.path.join', 'join', (['self.download_url_prefix', 'filename'], {}), '(self.download_url_prefix, filename)\n', (3673, 3709), False, 'from os.path import join\n'), ((3718, 3766), 'torchvision.datasets.utils.download_url', 'download_url', (['url', 'self.root_dir', 'filename', 'None'], {}), '(url, self.root_dir, filename, None)\n', (3730, 3766), False, 'from torchvision.datasets.utils import download_url\n'), ((6208, 6224), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (6218, 6224), False, 'from PIL import Image, ImageFile\n'), ((7438, 7462), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(i + 1)'], {}), '(1, 4, i + 1)\n', (7449, 7462), True, 'import matplotlib.pyplot as plt\n'), ((7471, 7489), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7487, 7489), True, 'import matplotlib.pyplot as plt\n'), ((7640, 7661), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample[0]'], {}), '(sample[0])\n', (7650, 7661), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1456), 'os.path.expanduser', 'os.path.expanduser', (['root_dir'], {}), '(root_dir)\n', (1446, 1456), False, 'import os\n'), ((1952, 1974), 'numpy.unique', 'np.unique', (['self.labels'], {}), '(self.labels)\n', (1961, 1974), True, 'import numpy as np\n'), ((3438, 3471), 'os.path.join', 'join', (['self.root_dir', 'tar_filename'], {}), '(self.root_dir, tar_filename)\n', (3442, 3471), False, 'from os.path import join\n'), ((4005, 4037), 'os.path.join', 'join', (['self.root_dir', '"""setid.mat"""'], {}), "(self.root_dir, 'setid.mat')\n", (4009, 4037), False, 'from os.path import join\n'), ((7749, 7759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7757, 7759), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2248), 'os.path.join', 'join', (['self.root_dir', 'self.images_dir'], {}), '(self.root_dir, self.images_dir)\n', (2216, 2248), False, 'from os.path import join\n'), ((2613, 2639), 'os.path.join', 'join', (['self.root_dir', '"""jpg"""'], {}), "(self.root_dir, 'jpg')\n", (2617, 2639), False, 'from os.path import join\n'), ((2660, 2698), 'os.path.join', 'join', (['self.root_dir', '"""imagelabels.mat"""'], {}), "(self.root_dir, 'imagelabels.mat')\n", (2664, 2698), False, 'from os.path import join\n'), ((2736, 2768), 'os.path.join', 'join', (['self.root_dir', '"""setid.mat"""'], {}), "(self.root_dir, 'setid.mat')\n", (2740, 2768), False, 'from os.path import join\n'), ((2973, 3001), 'shutil.rmtree', 'shutil.rmtree', (['self.root_dir'], {}), '(self.root_dir)\n', (2986, 3001), False, 'import shutil\n'), ((3320, 3353), 'os.path.join', 'join', (['self.root_dir', 'tar_filename'], {}), '(self.root_dir, tar_filename)\n', (3324, 3353), False, 'from os.path import join\n'), ((4081, 4119), 'os.path.join', 'join', (['self.root_dir', '"""imagelabels.mat"""'], {}), "(self.root_dir, 'imagelabels.mat')\n", (4085, 4119), False, 'from os.path import join\n'), ((2815, 2841), 'os.path.join', 'join', (['self.root_dir', '"""jpg"""'], {}), "(self.root_dir, 'jpg')\n", (2819, 2841), False, 'from os.path import join\n')]
|
import numpy as np
import dace as dc
M, N = (dc.symbol(s, dtype=dc.int64) for s in ('M', 'N'))
@dc.program
def flip(A: dc.float64[M]):
B = np.ndarray((M, ), dtype=np.float64)
for i in dc.map[0:M]:
B[i] = A[M - 1 - i]
return B
@dc.program
def kernel(r: dc.float64[N]):
y = np.empty_like(r)
alpha = -r[0]
beta = 1.0
y[0] = -r[0]
for k in range(1, N):
beta *= 1.0 - alpha * alpha
alpha = -(r[k] + np.dot(flip(r[:k]), y[:k])) / beta
y[:k] += alpha * flip(y[:k])
y[k] = alpha
return y
|
[
"numpy.empty_like",
"numpy.ndarray",
"dace.symbol"
] |
[((46, 74), 'dace.symbol', 'dc.symbol', (['s'], {'dtype': 'dc.int64'}), '(s, dtype=dc.int64)\n', (55, 74), True, 'import dace as dc\n'), ((146, 180), 'numpy.ndarray', 'np.ndarray', (['(M,)'], {'dtype': 'np.float64'}), '((M,), dtype=np.float64)\n', (156, 180), True, 'import numpy as np\n'), ((302, 318), 'numpy.empty_like', 'np.empty_like', (['r'], {}), '(r)\n', (315, 318), True, 'import numpy as np\n')]
|
import starry
import numpy as np
import matplotlib.pyplot as plt
import pytest
@pytest.mark.parametrize("ydeg,nw", [[0, None], [0, 10], [1, None], [1, 10]])
def test_system(ydeg, nw):
# Oblate map
map = starry.Map(udeg=2, ydeg=ydeg, oblate=True, nw=nw)
map[1] = 0.5
map[2] = 0.25
map.omega = 0.5
map.beta = 1.23
map.tpole = 8000
map.f = 1 - 2 / (map.omega ** 2 + 2)
map.obl = 30
# Compute system flux
star = starry.Primary(map, r=1.5)
planet = starry.Secondary(starry.Map(amp=0, nw=nw), porb=1.0, r=0.1, m=0)
sys = starry.System(star, planet)
t = np.linspace(-0.1, 0.1, 1000)
flux_sys = sys.flux(t, integrated=True)
# Compute map flux manually
x, y, z = sys.position(t)
xo = x[1] / star._r
yo = y[1] / star._r
flux_map = map.flux(xo=xo, yo=yo, ro=planet._r / star._r, integrated=True)
# Check that they agree
assert np.allclose(flux_map, flux_sys)
|
[
"numpy.allclose",
"starry.Primary",
"starry.Map",
"starry.System",
"pytest.mark.parametrize",
"numpy.linspace"
] |
[((82, 158), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ydeg,nw"""', '[[0, None], [0, 10], [1, None], [1, 10]]'], {}), "('ydeg,nw', [[0, None], [0, 10], [1, None], [1, 10]])\n", (105, 158), False, 'import pytest\n'), ((214, 263), 'starry.Map', 'starry.Map', ([], {'udeg': '(2)', 'ydeg': 'ydeg', 'oblate': '(True)', 'nw': 'nw'}), '(udeg=2, ydeg=ydeg, oblate=True, nw=nw)\n', (224, 263), False, 'import starry\n'), ((456, 482), 'starry.Primary', 'starry.Primary', (['map'], {'r': '(1.5)'}), '(map, r=1.5)\n', (470, 482), False, 'import starry\n'), ((571, 598), 'starry.System', 'starry.System', (['star', 'planet'], {}), '(star, planet)\n', (584, 598), False, 'import starry\n'), ((607, 635), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(0.1)', '(1000)'], {}), '(-0.1, 0.1, 1000)\n', (618, 635), True, 'import numpy as np\n'), ((910, 941), 'numpy.allclose', 'np.allclose', (['flux_map', 'flux_sys'], {}), '(flux_map, flux_sys)\n', (921, 941), True, 'import numpy as np\n'), ((513, 537), 'starry.Map', 'starry.Map', ([], {'amp': '(0)', 'nw': 'nw'}), '(amp=0, nw=nw)\n', (523, 537), False, 'import starry\n')]
|
"""Utility class for multivariate time series transformation."""
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import numpy as np
from scipy.sparse import csr_matrix, hstack
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.utils.validation import check_is_fitted
from ..utils import check_3d_array
class MultivariateTransformer(BaseEstimator, TransformerMixin):
r"""Transformer for multivariate time series.
It provides a convenient class to transform multivariate time series with
transformers that can only deal with univariate time series.
Parameters
----------
estimator : estimator object or list thereof
Transformer. If one estimator is provided, it is cloned and each clone
transforms one feature. If a list of estimators is provided, each
estimator transforms one feature.
flatten : bool (default = True)
Affect shape of transform output. If True, ``transform``
returns an array with shape (n_samples, \*). If False, the output of
``transform`` from each estimator must have the same shape and
``transform`` returns an array with shape (n_samples, n_features, \*).
Ignored if the transformers return sparse matrices.
Attributes
----------
estimators_ : list of estimator objects
The collection of fitted transformers.
Examples
--------
>>> from pyts.datasets import load_basic_motions
>>> from pyts.multivariate.transformation import MultivariateTransformer
>>> from pyts.image import GramianAngularField
>>> X, _, _, _ = load_basic_motions(return_X_y=True)
>>> transformer = MultivariateTransformer(GramianAngularField(),
... flatten=False)
>>> X_new = transformer.fit_transform(X)
>>> X_new.shape
(40, 6, 100, 100)
"""
def __init__(self, estimator, flatten=True):
self.estimator = estimator
self.flatten = flatten
def fit(self, X, y=None):
"""Pass.
Parameters
----------
X : array-like, shape = (n_samples, n_features, n_timestamps)
Multivariate time series.
y : None or array-like, shape = (n_samples,) (default = None)
Class labels.
Returns
-------
self : object
"""
X = check_3d_array(X)
_, n_features, _ = X.shape
self._check_params(n_features)
for i, transformer in enumerate(self.estimators_):
transformer.fit(X[:, i, :], y)
return self
def transform(self, X):
r"""Apply transform to each feature.
Parameters
----------
X : array-like, shape = (n_samples, n_features, n_timestamps)
Multivariate time series.
Returns
-------
X_new : array, shape = (n_samples, *) or (n_samples, n_features, *)
Transformed time series.
"""
X = check_3d_array(X)
n_samples, _, _ = X.shape
check_is_fitted(self, 'estimators_')
X_transformed = [transformer.transform(X[:, i, :])
for i, transformer in enumerate(self.estimators_)]
all_sparse = np.all([isinstance(X_transformed_i, csr_matrix)
for X_transformed_i in X_transformed])
if all_sparse:
X_new = hstack(X_transformed)
else:
X_new = [self._convert_to_array(X_transformed_i)
for X_transformed_i in X_transformed]
ndims = [X_new_i.ndim for X_new_i in X_new]
shapes = [X_new_i.shape for X_new_i in X_new]
one_dim = (np.unique(ndims).size == 1)
if one_dim:
one_shape = np.unique(shapes, axis=0).shape[0] == 1
else:
one_shape = False
if (not one_shape) or self.flatten:
X_new = [X_new_i.reshape(n_samples, -1) for X_new_i in X_new]
X_new = np.concatenate(X_new, axis=1)
else:
X_new = np.asarray(X_new)
axes = [1, 0] + [i for i in range(2, X_new.ndim)]
X_new = np.transpose(X_new, axes=axes)
return X_new
def _check_params(self, n_features):
"""Check parameters."""
transformer = (isinstance(self.estimator, BaseEstimator)
and hasattr(self.estimator, 'transform'))
if transformer:
self.estimators_ = [clone(self.estimator)
for _ in range(n_features)]
elif isinstance(self.estimator, list):
if len(self.estimator) != n_features:
raise ValueError(
"If 'estimator' is a list, its length must be equal to "
"the number of features ({0} != {1})"
.format(len(self.estimator), n_features)
)
for i, estimator in enumerate(self.estimator):
if not (isinstance(estimator, BaseEstimator)
and hasattr(estimator, 'transform')):
raise ValueError("Estimator {} must be a transformer."
.format(i))
self.estimators_ = self.estimator
else:
raise TypeError(
"'estimator' must be a transformer that inherits from "
"sklearn.base.BaseEstimator or a list thereof.")
@staticmethod
def _convert_to_array(X):
"""Convert the input data to an array if necessary."""
if isinstance(X, csr_matrix):
return X.A
elif isinstance(X, np.ndarray):
return X
else:
raise ValueError('Unexpected type for X: {}.'
.format(type(X).__name__))
|
[
"sklearn.utils.validation.check_is_fitted",
"numpy.unique",
"sklearn.base.clone",
"numpy.asarray",
"scipy.sparse.hstack",
"numpy.concatenate",
"numpy.transpose"
] |
[((3024, 3060), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""estimators_"""'], {}), "(self, 'estimators_')\n", (3039, 3060), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((3377, 3398), 'scipy.sparse.hstack', 'hstack', (['X_transformed'], {}), '(X_transformed)\n', (3383, 3398), False, 'from scipy.sparse import csr_matrix, hstack\n'), ((3992, 4021), 'numpy.concatenate', 'np.concatenate', (['X_new'], {'axis': '(1)'}), '(X_new, axis=1)\n', (4006, 4021), True, 'import numpy as np\n'), ((4064, 4081), 'numpy.asarray', 'np.asarray', (['X_new'], {}), '(X_new)\n', (4074, 4081), True, 'import numpy as np\n'), ((4172, 4202), 'numpy.transpose', 'np.transpose', (['X_new'], {'axes': 'axes'}), '(X_new, axes=axes)\n', (4184, 4202), True, 'import numpy as np\n'), ((4484, 4505), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (4489, 4505), False, 'from sklearn.base import BaseEstimator, TransformerMixin, clone\n'), ((3670, 3686), 'numpy.unique', 'np.unique', (['ndims'], {}), '(ndims)\n', (3679, 3686), True, 'import numpy as np\n'), ((3750, 3775), 'numpy.unique', 'np.unique', (['shapes'], {'axis': '(0)'}), '(shapes, axis=0)\n', (3759, 3775), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression
import pdb
from sklearn.metrics import *
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import itertools
import json
import pickle
class User:
def __init__(self, id):
self.id = id
self.positive = []
self.negative = []
def add_positive(self, movie_id):
self.positive.append(movie_id)
def add_negative(self, movie_id):
self.negative.append(movie_id)
def get_positive(self):
return self.positive
def get_negative(self):
return self.negative
np.random.seed(1)
class EventsGenerator:
NUM_OF_USERS = 1
def __init__(self, learning_data, buy_probability, opened):
self.learning_data = learning_data
self.buy_probability = buy_probability
self.users = []
self.NUM_OF_OPENED_MOVIES_PER_USER = opened
for id in range(1, self.NUM_OF_USERS+1):
self.users.append(User(id))
def run(self, pairwise=False):
# print (self.users, "hellp")
for user in self.users:
# print (self.learning_data.index)
opened_movies = np.random.choice(self.learning_data.index.values, self.NUM_OF_OPENED_MOVIES_PER_USER)
self.__add_positives_and_negatives_to(user, opened_movies)
return self.__build_events_data()
def __add_positives_and_negatives_to(self, user, opened_movies):
# print (opened_movies)
for movie_id in opened_movies:
if np.random.binomial(1, self.buy_probability.loc[movie_id]):
user.add_positive(movie_id)
else:
user.add_negative(movie_id)
def __build_events_data(self):
events_data = []
for user in self.users:
for positive_id in user.get_positive():
# print(positive_id)
tmp = self.learning_data.loc[positive_id].to_dict()
tmp['outcome'] = 1
events_data += [tmp]
for negative_id in user.get_negative():
tmp = self.learning_data.loc[negative_id].to_dict()
tmp['outcome'] = 0
events_data += [tmp]
# print(events_data)
return pd.DataFrame(events_data)
def build_learning_data_from(movie_data):
feature_columns = np.setdiff1d(movie_data.columns, np.array(['top_prob']))
learning_data = movie_data.loc[:, feature_columns]
scaler = StandardScaler()
for i in range(feature_columns.shape[0]):
learning_data[feature_columns[i]] = scaler.fit_transform(learning_data[[feature_columns[i]]])
return learning_data, feature_columns
def get_test_train_data(events_data, feature_columns):
X = events_data.loc[:, feature_columns].values.astype(np.float32)
y = events_data.loc[:, ['outcome']].values.astype(np.float32).ravel()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
return [X_train, X_test, y_train, y_test]
def get_predicted_outcome(model, data):
return np.argmax(model.predict_proba(data), axis=1).astype(np.float32)
def get_predicted_rank(model, data):
return model.predict_proba(data)[:, 1]
def train_model(model, prediction_function, X_train, y_train, X_test, y_test):
model.fit(X_train, y_train)
y_train_pred = prediction_function(model, X_train)
y_test_pred = prediction_function(model, X_test)
return model, precision_score(y_train, y_train_pred), recall_score(y_train, y_train_pred), accuracy_score(y_train, y_train_pred),\
precision_score(y_test, y_test_pred), recall_score(y_test, y_test_pred), accuracy_score(y_test, y_test_pred)
def decide_rank(model, learning_data, predict_fun):
lg_input = learning_data.values.astype(np.float32)
learning_data_with_rank = learning_data.copy()
learning_data_with_rank['rank'] = predict_fun(model, lg_input)
return learning_data_with_rank
|
[
"sklearn.model_selection.train_test_split",
"numpy.random.choice",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.binomial"
] |
[((801, 818), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (815, 818), True, 'import numpy as np\n'), ((2715, 2731), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2729, 2731), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3169, 3223), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (3185, 3223), False, 'from sklearn.model_selection import train_test_split\n'), ((2493, 2518), 'pandas.DataFrame', 'pd.DataFrame', (['events_data'], {}), '(events_data)\n', (2505, 2518), True, 'import pandas as pd\n'), ((2618, 2640), 'numpy.array', 'np.array', (["['top_prob']"], {}), "(['top_prob'])\n", (2626, 2640), True, 'import numpy as np\n'), ((1373, 1463), 'numpy.random.choice', 'np.random.choice', (['self.learning_data.index.values', 'self.NUM_OF_OPENED_MOVIES_PER_USER'], {}), '(self.learning_data.index.values, self.\n NUM_OF_OPENED_MOVIES_PER_USER)\n', (1389, 1463), True, 'import numpy as np\n'), ((1730, 1787), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'self.buy_probability.loc[movie_id]'], {}), '(1, self.buy_probability.loc[movie_id])\n', (1748, 1787), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Helper functions for pybaselines.
Created on March 5, 2021
@author: <NAME>
"""
import numpy as np
# the minimum positive float values such that a + _MIN_FLOAT != a
_MIN_FLOAT = np.finfo(float).eps
def relative_difference(old, new, norm_order=None):
"""
Calculates the relative difference (norm(new-old) / norm(old)) of two values.
Used as an exit criteria in many baseline algorithms.
Parameters
----------
old : numpy.ndarray or float
The array or single value from the previous iteration.
new : numpy.ndarray or float
The array or single value from the current iteration.
norm_order : int, optional
The type of norm to calculate. Default is None, which is l2
norm for arrays, abs for scalars.
Returns
-------
float
The relative difference between the old and new values.
"""
numerator = np.linalg.norm(new - old, norm_order)
denominator = np.maximum(np.linalg.norm(old, norm_order), _MIN_FLOAT)
return numerator / denominator
def gaussian(x, height=1.0, center=0.0, sigma=1.0):
"""
Generates a gaussian distribution based on height, center, and sigma.
Parameters
----------
x : numpy.ndarray
The x-values at which to evaluate the distribution.
height : float, optional
The maximum height of the distribution. Default is 1.0.
center : float, optional
The center of the distribution. Default is 0.0.
sigma : float, optional
The standard deviation of the distribution. Default is 1.0.
Returns
-------
numpy.ndarray
The gaussian distribution evaluated with x.
"""
return height * np.exp(-0.5 * ((x - center)**2) / sigma**2)
def gaussian_kernel(window_size, sigma=1.0):
"""
Creates an area-normalized gaussian kernel for convolution.
Parameters
----------
window_size : int
The number of points for the entire kernel.
sigma : float, optional
The standard deviation of the gaussian model.
Returns
-------
numpy.ndarray, shape (window_size,)
The area-normalized gaussian kernel.
Notes
-----
Return gaus/sum(gaus) rather than creating a unit-area gaussian
since the unit-area gaussian would have an area smaller than 1
for window_size < ~ 6 * sigma.
"""
# centers distribution from -half_window to half_window
x = np.arange(0, window_size) - (window_size - 1) / 2
gaus = gaussian(x, 1, 0, sigma)
return gaus / np.sum(gaus)
def _get_edges(data, pad_length, mode='extrapolate', extrapolate_window=None, **pad_kwargs):
"""
Provides the left and right edges for padding data.
Parameters
----------
data : array-like
The array of the data.
pad_length : int
The number of points to add to the left and right edges.
mode : str, optional
The method for padding. Default is 'extrapolate'. Any method other than
'extrapolate' will use numpy.pad.
extrapolate_window : int, optional
The number of values to use for linear fitting on the left and right
edges. Default is None, which will set the extrapolate window size equal
to the `half_window` size.
**pad_kwargs
Any keyword arguments to pass to numpy.pad, which will be used if `mode`
is not 'extrapolate'.
Returns
-------
left_edge : numpy.ndarray, shape(pad_length,)
The array of data for the left padding.
right_edge : numpy.ndarray, shape(pad_length,)
The array of data for the right padding.
Notes
-----
If mode is 'extrapolate', then the left and right edges will be fit with
a first order polynomial and then extrapolated. Otherwise, uses numpy.pad.
"""
y = np.asarray(data)
if pad_length == 0:
return y
mode = mode.lower()
if mode == 'extrapolate':
if extrapolate_window is None:
extrapolate_window = 2 * pad_length + 1
x = np.arange(-pad_length, y.shape[0] + pad_length)
left_poly = np.polynomial.Polynomial.fit(
x[pad_length:-pad_length][:extrapolate_window],
y[:extrapolate_window], 1
)
right_poly = np.polynomial.Polynomial.fit(
x[pad_length:-pad_length][-extrapolate_window:],
y[-extrapolate_window:], 1
)
left_edge = left_poly(x[:pad_length])
right_edge = right_poly(x[-pad_length:])
else:
padded_data = np.pad(y, pad_length, mode, **pad_kwargs)
left_edge = padded_data[:pad_length]
right_edge = padded_data[-pad_length:]
return left_edge, right_edge
def pad_edges(data, pad_length, mode='extrapolate',
extrapolate_window=None, **pad_kwargs):
"""
Adds left and right edges to the data.
Parameters
----------
data : array-like
The array of the data.
pad_length : int
The number of points to add to the left and right edges.
mode : str, optional
The method for padding. Default is 'extrapolate'. Any method other than
'extrapolate' will use numpy.pad.
extrapolate_window : int, optional
The number of values to use for linear fitting on the left and right
edges. Default is None, which will set the extrapolate window size equal
to the `half_window` size.
**pad_kwargs
Any keyword arguments to pass to numpy.pad, which will be used if `mode`
is not 'extrapolate'.
Returns
-------
padded_data : numpy.ndarray, shape (N + 2 * half_window,)
The data with padding on the left and right edges.
Notes
-----
If mode is 'extrapolate', then the left and right edges will be fit with
a first order polynomial and then extrapolated. Otherwise, uses numpy.pad.
"""
y = np.asarray(data)
if pad_length == 0:
return y
if mode.lower() == 'extrapolate':
left_edge, right_edge = _get_edges(y, pad_length, mode, extrapolate_window)
padded_data = np.concatenate((left_edge, y, right_edge))
else:
padded_data = np.pad(y, pad_length, mode.lower(), **pad_kwargs)
return padded_data
def padded_convolve(data, kernel, mode='reflect', **pad_kwargs):
"""
Pads data before convolving to reduce edge effects.
Parameters
----------
data : numpy.ndarray, shape (N,)
The data to smooth.
kernel : numpy.ndarray, shape (M,)
A pre-computed, normalized kernel for the convolution. Indices should
span from -half_window to half_window.
Returns
-------
numpy.ndarray, shape (N,)
The smoothed input array.
Notes
-----
Mirrors the data near the edges so that convolution does not
produce edge effects.
"""
padding = (min(data.shape[0], kernel.shape[0]) // 2)
convolution = np.convolve(
pad_edges(data, padding, mode, **pad_kwargs), kernel, mode='valid'
)
return convolution
|
[
"numpy.polynomial.Polynomial.fit",
"numpy.asarray",
"numpy.exp",
"numpy.sum",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.finfo",
"numpy.pad",
"numpy.arange"
] |
[((209, 224), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (217, 224), True, 'import numpy as np\n'), ((919, 956), 'numpy.linalg.norm', 'np.linalg.norm', (['(new - old)', 'norm_order'], {}), '(new - old, norm_order)\n', (933, 956), True, 'import numpy as np\n'), ((3811, 3827), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3821, 3827), True, 'import numpy as np\n'), ((5866, 5882), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (5876, 5882), True, 'import numpy as np\n'), ((986, 1017), 'numpy.linalg.norm', 'np.linalg.norm', (['old', 'norm_order'], {}), '(old, norm_order)\n', (1000, 1017), True, 'import numpy as np\n'), ((1713, 1758), 'numpy.exp', 'np.exp', (['(-0.5 * (x - center) ** 2 / sigma ** 2)'], {}), '(-0.5 * (x - center) ** 2 / sigma ** 2)\n', (1719, 1758), True, 'import numpy as np\n'), ((2441, 2466), 'numpy.arange', 'np.arange', (['(0)', 'window_size'], {}), '(0, window_size)\n', (2450, 2466), True, 'import numpy as np\n'), ((2545, 2557), 'numpy.sum', 'np.sum', (['gaus'], {}), '(gaus)\n', (2551, 2557), True, 'import numpy as np\n'), ((4027, 4074), 'numpy.arange', 'np.arange', (['(-pad_length)', '(y.shape[0] + pad_length)'], {}), '(-pad_length, y.shape[0] + pad_length)\n', (4036, 4074), True, 'import numpy as np\n'), ((4095, 4202), 'numpy.polynomial.Polynomial.fit', 'np.polynomial.Polynomial.fit', (['x[pad_length:-pad_length][:extrapolate_window]', 'y[:extrapolate_window]', '(1)'], {}), '(x[pad_length:-pad_length][:extrapolate_window],\n y[:extrapolate_window], 1)\n', (4123, 4202), True, 'import numpy as np\n'), ((4254, 4364), 'numpy.polynomial.Polynomial.fit', 'np.polynomial.Polynomial.fit', (['x[pad_length:-pad_length][-extrapolate_window:]', 'y[-extrapolate_window:]', '(1)'], {}), '(x[pad_length:-pad_length][-extrapolate_window:\n ], y[-extrapolate_window:], 1)\n', (4282, 4364), True, 'import numpy as np\n'), ((4522, 4563), 'numpy.pad', 'np.pad', (['y', 'pad_length', 'mode'], {}), '(y, pad_length, mode, **pad_kwargs)\n', (4528, 4563), True, 'import numpy as np\n'), ((6069, 6111), 'numpy.concatenate', 'np.concatenate', (['(left_edge, y, right_edge)'], {}), '((left_edge, y, right_edge))\n', (6083, 6111), True, 'import numpy as np\n')]
|
import numpy as np
from pykeops.common.lazy_tensor import GenericLazyTensor
from pykeops.numpy.utils import numpytools
# Convenient aliases:
def Var(x_or_ind, dim=None, cat=None):
if dim is None:
# init via data: we assume x_or_ind is data
return LazyTensor(x_or_ind, axis=cat)
else:
# init via symbolic variable given as triplet (ind,dim,cat)
return LazyTensor((x_or_ind, dim, cat))
def Vi(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 0.
"""
return Var(x_or_ind, dim, 0)
def Vj(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 1.
"""
return Var(x_or_ind, dim, 1)
def Pm(x_or_ind, dim=None):
r"""
Simple wrapper that return an instantiation of :class:`LazyTensor` of type 2.
"""
return Var(x_or_ind, dim, 2)
class LazyTensor(GenericLazyTensor):
r"""Symbolic wrapper for NumPy arrays.
:class:`LazyTensor` encode numerical arrays through the combination
of a symbolic, **mathematical formula** and a list of **small data arrays**.
They can be used to implement efficient algorithms on objects
that are **easy to define**, but **impossible to store** in memory
(e.g. the matrix of pairwise distances between
two large point clouds).
:class:`LazyTensor` may be created from standard NumPy arrays or PyTorch tensors,
combined using simple mathematical operations and converted
back to NumPy arrays or PyTorch tensors with
efficient reduction routines, which outperform
standard tensorized implementations by two orders of magnitude.
"""
def __init__(self, x=None, axis=None):
super().__init__(x=x, axis=axis)
# numpy specialization
typex = type(x)
if (
typex
not in [type(None), tuple, int, float, list, np.ndarray] + self.float_types
):
raise TypeError(
"LazyTensors should be built from NumPy arrays, "
"float/integer numbers, lists of floats or 3-uples of "
"integers. Received: {}".format(typex)
)
if typex in self.float_types: # NumPy scalar -> NumPy array
x = np.array(x).reshape(1)
if typex == np.ndarray:
self.infer_dim(x, axis)
def get_tools(self):
self.tools = numpytools
self.Genred = numpytools.Genred
self.KernelSolve = numpytools.KernelSolve
def lt_constructor(self, x=None, axis=None):
return LazyTensor(x=x, axis=axis)
float_types = [float, np.float16, np.float32, np.float64]
|
[
"numpy.array"
] |
[((2288, 2299), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2296, 2299), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : tokenzier.py
@Time : 2021/09/11 16:00:04
@Author : <NAME>
@Version : 1.0
@Contact : <EMAIL>
@Desc : None
'''
import json
import os
from typing import List
import numpy as np
from pypinyin import Style, pinyin
from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer
__all__ = ['ChineseBertTokenizer']
class ChineseBertTokenizer(PretrainedTokenizer):
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {}
pretrained_init_configuration = {}
padding_side = 'right'
def __init__(self,
bert_path,
max_seq_len=512,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
vocab_file = os.path.join(bert_path, 'vocab.txt')
config_path = os.path.join(bert_path, 'config')
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.max_seq_len = max_seq_len
# load pinyin map dict
with open(os.path.join(config_path, 'pinyin_map.json'),
encoding='utf8') as fin:
self.pinyin_dict = json.load(fin)
# load char id map tensor
with open(os.path.join(config_path, 'id2pinyin.json'),
encoding='utf8') as fin:
self.id2pinyin = json.load(fin)
# load pinyin map tensor
with open(os.path.join(config_path, 'pinyin2tensor.json'),
encoding='utf8') as fin:
self.pinyin2tensor = json.load(fin)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab,
unk_token=unk_token)
def tokenize_sentence(self, sentence):
# convert sentence to ids
tokenizer_output = self.encode(sentence)
input_ids = tokenizer_output['input_ids']
pinyin_ids = self.convert_sentence_to_pinyin_ids(sentence)
# assert,token nums should be same as pinyin token nums
# assert len(input_ids) <= self.max_seq_len
# assert len(input_ids) == len(pinyin_ids)
# convert list to tensor
# input_ids = paddle.to_tensor(input_ids)
# pinyin_ids = paddle.to_tensor(pinyin_ids).reshape([-1])
# convert list to np.array
input_ids = np.array(input_ids)
pinyin_ids = np.array(pinyin_ids).reshape([-1, 8])
return {"input_ids": input_ids, "pinyin_ids": pinyin_ids}
def convert_sentence_to_pinyin_ids(self, sentence: str, with_specail_token=True) -> List[List[int]]:
# get offsets
bert_tokens_offsets = self.get_offset_mapping(sentence)
if with_specail_token:
bert_tokens_offsets.insert(0, (0, 0))
bert_tokens_offsets.append((0, 0))
# get tokens
bert_tokens_tokens = self.tokenize(sentence)
if with_specail_token:
bert_tokens_tokens.insert(0, '[CLS]')
bert_tokens_tokens.append('[SEP]')
# get pinyin of a sentence
pinyin_list = pinyin(sentence,
style=Style.TONE3,
heteronym=True,
errors=lambda x: [['not chinese'] for _ in x])
pinyin_locs = {}
# get pinyin of each location
for index, item in enumerate(pinyin_list):
pinyin_string = item[0]
# not a Chinese character, pass
if pinyin_string == "not chinese":
continue
if pinyin_string in self.pinyin2tensor:
pinyin_locs[index] = self.pinyin2tensor[pinyin_string]
else:
ids = [0] * 8
for i, p in enumerate(pinyin_string):
if p not in self.pinyin_dict["char2idx"]:
ids = [0] * 8
break
ids[i] = self.pinyin_dict["char2idx"][p]
pinyin_locs[index] = ids
# find chinese character location, and generate pinyin ids
pinyin_ids = []
for idx, (token, offset) in enumerate(
zip(bert_tokens_tokens, bert_tokens_offsets)):
if offset[1] - offset[0] != 1:
# 非单个字的token,以及 [CLS] [SEP] 特殊 token
pinyin_ids.append([0] * 8)
continue
if offset[0] in pinyin_locs:
# 单个字为token且有拼音tensor
pinyin_ids.append(pinyin_locs[offset[0]])
else:
# 单个字为token但无拼音tensor
pinyin_ids.append([0] * 8)
return pinyin_ids
def convert_tokens_to_pinyin_ids(self,
tokens: List[str]) -> List[List[int]]:
"""
Example :
tokens: ['[CLS]', '你', '多', '大', '了', '?', '[SEP]', '我', '10', '岁', '了', '。', '[SEP]']
"""
pinyin_ids = []
for token in tokens:
if token == '[CLS]' or token == '[SEP]':
# [CLS]、[SEP] 的 token
pinyin_ids.append([0] * 8)
continue
offset = self.get_offset_mapping(token)[0]
if offset[1] - offset[0] != 1:
# 非单个字组成的 token
pinyin_ids.append([0] * 8)
continue
pinyin_string = pinyin(token,
style=Style.TONE3,
heteronym=True,
errors=lambda x: [['not chinese']
for _ in x])[0][0]
if pinyin_string == "not chinese":
# 不是中文
pinyin_ids.append([0] * 8)
else:
if pinyin_string in self.pinyin2tensor:
pinyin_ids.append(self.pinyin2tensor[pinyin_string])
else:
ids = [0] * 8
for i, p in enumerate(pinyin_string):
if p not in self.pinyin_dict["char2idx"]:
ids = [0] * 8
break
ids[i] = self.pinyin_dict["char2idx"][p]
pinyin_ids.append(ids)
return pinyin_ids
@property
def vocab_size(self):
"""
Return the size of vocabulary.
Returns:
int: The size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def tokenize(self, text):
return self._tokenize(text)
def convert_tokens_to_string(self, tokens):
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(
token_ids_0, token_ids_1 if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(
lambda x: 1
if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
|
[
"pypinyin.pinyin",
"os.path.join",
"os.path.isfile",
"numpy.array",
"json.load"
] |
[((961, 997), 'os.path.join', 'os.path.join', (['bert_path', '"""vocab.txt"""'], {}), "(bert_path, 'vocab.txt')\n", (973, 997), False, 'import os\n'), ((1020, 1053), 'os.path.join', 'os.path.join', (['bert_path', '"""config"""'], {}), "(bert_path, 'config')\n", (1032, 1053), False, 'import os\n'), ((2900, 2919), 'numpy.array', 'np.array', (['input_ids'], {}), '(input_ids)\n', (2908, 2919), True, 'import numpy as np\n'), ((3626, 3729), 'pypinyin.pinyin', 'pinyin', (['sentence'], {'style': 'Style.TONE3', 'heteronym': '(True)', 'errors': "(lambda x: [['not chinese'] for _ in x])"}), "(sentence, style=Style.TONE3, heteronym=True, errors=lambda x: [[\n 'not chinese'] for _ in x])\n", (3632, 3729), False, 'from pypinyin import Style, pinyin\n'), ((1069, 1095), 'os.path.isfile', 'os.path.isfile', (['vocab_file'], {}), '(vocab_file)\n', (1083, 1095), False, 'import os\n'), ((1671, 1685), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1680, 1685), False, 'import json\n'), ((1855, 1869), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (1864, 1869), False, 'import json\n'), ((2046, 2060), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (2055, 2060), False, 'import json\n'), ((1551, 1595), 'os.path.join', 'os.path.join', (['config_path', '"""pinyin_map.json"""'], {}), "(config_path, 'pinyin_map.json')\n", (1563, 1595), False, 'import os\n'), ((1738, 1781), 'os.path.join', 'os.path.join', (['config_path', '"""id2pinyin.json"""'], {}), "(config_path, 'id2pinyin.json')\n", (1750, 1781), False, 'import os\n'), ((1921, 1968), 'os.path.join', 'os.path.join', (['config_path', '"""pinyin2tensor.json"""'], {}), "(config_path, 'pinyin2tensor.json')\n", (1933, 1968), False, 'import os\n'), ((2941, 2961), 'numpy.array', 'np.array', (['pinyin_ids'], {}), '(pinyin_ids)\n', (2949, 2961), True, 'import numpy as np\n'), ((5864, 5964), 'pypinyin.pinyin', 'pinyin', (['token'], {'style': 'Style.TONE3', 'heteronym': '(True)', 'errors': "(lambda x: [['not chinese'] for _ in x])"}), "(token, style=Style.TONE3, heteronym=True, errors=lambda x: [[\n 'not chinese'] for _ in x])\n", (5870, 5964), False, 'from pypinyin import Style, pinyin\n')]
|
import numpy as np
class Compressor():
def __init__(self,
num_particles: int, num_spin_orbitals: int, rdm_ideal=None) -> None:
self.num_particles = num_particles
self.num_spin_orbitals = num_spin_orbitals
self.rdm_ideal = rdm_ideal
pass
def compress(self, rdm):
N = self.num_spin_orbitals ** 2 // 4
# get num of elements by square formula of triangle
S = self._get_num_elems_of_tri_mat(N)
n = self.num_spin_orbitals // 2
# mat = self._tensor2matrix(rdm)
utri_arr = np.zeros((3*S,))
utri_arr[: S] = \
self._compress_matrix_to_upper_triangle_array(self._tensor2matrix(rdm[:n, :n, :n, :n]))
utri_arr[S: 2*S] = \
self._compress_matrix_to_upper_triangle_array(self._tensor2matrix(rdm[:n, n:, :n, n:]))
utri_arr[2*S: ] = \
self._compress_matrix_to_upper_triangle_array(self._tensor2matrix(rdm[n:, n:, n:, n:]))
return utri_arr
def decompress(self, utri_arr):
# rdm = np.zeros((self.num_spin_orbitals ** 2,) * 2) # matrix
rdm = np.zeros((self.num_spin_orbitals,) * 4) # tensor
N = self.num_spin_orbitals ** 2 // 4
n = self.num_spin_orbitals // 2
# get num of elements by square formula of triangle
S = self._get_num_elems_of_tri_mat(N)
# restore from the second triangle
A = self._restore_matrix_by_upper_triangle_array(utri_arr[S: 2*S], N)
A_tensor = self._matrix2tensor(A)
B = - A_tensor.transpose([0, 1, 3, 2])
# B = self._tensor2matrix(B)
C = A_tensor.transpose([1, 0, 3, 2])
# C = self._tensor2matrix(C)
D = - A_tensor.transpose([1, 0, 2, 3])
# restore middle 4
# rdm[N: 2*N, N: 2*N] = A
# diff = np.linalg.norm(self._tensor2matrix(self.rdm_ideal)[N: 2*N, N: 2*N] - A)
rdm[:n, n:, :n, n:] = A_tensor
# diff = np.linalg.norm(self.rdm_ideal[:n, n:, :n, n:] - A_tensor)
# print('A', diff)
# rdm[N: 2*N, 2*N: 3*N] = B
# diff = np.linalg.norm(self._tensor2matrix(self.rdm_ideal)[N: 2*N, 2*N: 3*N] - B)
rdm[:n, n:, n:, :n] = B
# diff = np.linalg.norm(self.rdm_ideal[:n, n:, n:, :n] - B)
# print('B', diff)
# rdm[2*N: 3*N, 2*N: 3*N] = C
# diff = np.linalg.norm(self._tensor2matrix(self.rdm_ideal)[2*N: 3*N, 2*N: 3*N] - C)
rdm[n:, :n, n:, :n] = C
# diff = np.linalg.norm(self.rdm_ideal[n:, :n, n:, :n] - C)
# print('C', diff)
rdm[n:, :n, :n, n:] = D
# diff = np.linalg.norm(self.rdm_ideal[n:, :n, :n, n:] - D)
# print('D', diff)
# rdm = self._tensor2matrix(rdm)
# restore upper left
rdm[:n, :n, :n, :n] = \
self._matrix2tensor(self._restore_matrix_by_upper_triangle_array(utri_arr[: S], N))
# diff = np.linalg.norm(self.rdm_ideal[:n, :n, :n, :n] - rdm[:n, :n, :n, :n])
# print('upper left', diff)
# restore button right
rdm[n:, n:, n:, n:] = \
self._matrix2tensor(self._restore_matrix_by_upper_triangle_array(utri_arr[2*S:], N))
# diff = np.linalg.norm(self.rdm_ideal[n:, n:, n:, n:] - rdm[n:, n:, n:, n:])
# print('button right', diff)
# rdm = self._tensor2matrix(rdm)
# utri = np.triu(rdm)
# diag = np.diag(np.diag(rdm))
# utri -= diag
# rdm = utri + utri.T + diag
return rdm #self._matrix2tensor(rdm)
@staticmethod
def _restore_matrix_by_upper_triangle_array(utri_arr, n):
cnt = 0
utri = np.zeros((n,) * 2) # upper triangular matrix
for i in range(n):
for j in range(i, n):
utri[i, j] = utri_arr[cnt]
cnt += 1
diag = np.diag(np.diag(utri))
mat = utri + utri.T - diag
return mat
@staticmethod
def _compress_matrix_to_upper_triangle_array(mat):
n = mat.shape[0]
num_elements = Compressor._get_num_elems_of_tri_mat(n)
utri_arr = np.zeros((num_elements))
cnt = 0
for i in range(n):
for j in range(i, n):
utri_arr[cnt] = mat[i, j]
cnt += 1
return utri_arr
@staticmethod
def _get_num_elems_of_tri_mat(n):
return (n + 1) * n // 2
@staticmethod
def _matrix2tensor(mat, transpose=False):
n = int(np.sqrt(mat.shape[0]))
if transpose:
tensor = mat.reshape((n,) * 4).transpose([0, 1, 3, 2])
else:
tensor = mat.reshape((n,) * 4)
return tensor
@staticmethod
def _tensor2matrix(tensor, transpose=False):
n = tensor.shape[0]
if transpose:
mat = tensor.transpose([0, 1, 3, 2]).reshape((n*n,) * 2)
else:
mat = tensor.reshape((n*n,) * 2)
return mat
@staticmethod
def _utri_mat2real_sym_mat(utri):
diag = np.diag(np.diag(utri))
mat = utri + utri.T - diag
return mat
|
[
"numpy.zeros",
"numpy.sqrt",
"numpy.diag"
] |
[((577, 595), 'numpy.zeros', 'np.zeros', (['(3 * S,)'], {}), '((3 * S,))\n', (585, 595), True, 'import numpy as np\n'), ((1132, 1171), 'numpy.zeros', 'np.zeros', (['((self.num_spin_orbitals,) * 4)'], {}), '((self.num_spin_orbitals,) * 4)\n', (1140, 1171), True, 'import numpy as np\n'), ((3642, 3660), 'numpy.zeros', 'np.zeros', (['((n,) * 2)'], {}), '((n,) * 2)\n', (3650, 3660), True, 'import numpy as np\n'), ((4091, 4113), 'numpy.zeros', 'np.zeros', (['num_elements'], {}), '(num_elements)\n', (4099, 4113), True, 'import numpy as np\n'), ((3840, 3853), 'numpy.diag', 'np.diag', (['utri'], {}), '(utri)\n', (3847, 3853), True, 'import numpy as np\n'), ((4462, 4483), 'numpy.sqrt', 'np.sqrt', (['mat.shape[0]'], {}), '(mat.shape[0])\n', (4469, 4483), True, 'import numpy as np\n'), ((5009, 5022), 'numpy.diag', 'np.diag', (['utri'], {}), '(utri)\n', (5016, 5022), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
import prml.nn as nn
class TestGaussian(unittest.TestCase):
def test_gaussian_draw_forward(self):
mu = nn.array(0)
sigma = nn.softplus(nn.array(-1))
gaussian = nn.Gaussian(mu, sigma)
sample = []
for _ in range(1000):
sample.append(gaussian.draw().value)
self.assertTrue(np.allclose(np.mean(sample), 0, rtol=0.1, atol=0.1), np.mean(sample))
self.assertTrue(np.allclose(np.std(sample), gaussian.std.value, 0.1, 0.1))
def test_gaussian_draw_backward(self):
mu = nn.array(0)
s = nn.array(2)
optimizer = nn.optimizer.Gradient({0: mu, 1: s}, 0.01)
prior = nn.Gaussian(1, 1)
for _ in range(1000):
mu.cleargrad()
s.cleargrad()
gaussian = nn.Gaussian(mu, nn.softplus(s))
gaussian.draw()
loss = nn.loss.kl_divergence(gaussian, prior).sum()
optimizer.minimize(loss)
self.assertTrue(np.allclose(gaussian.mean.value, 1, 0.1, 0.1))
self.assertTrue(np.allclose(gaussian.std.value, 1, 0.1, 0.1))
if __name__ == "__main__":
unittest.main()
|
[
"numpy.mean",
"numpy.allclose",
"prml.nn.softplus",
"prml.nn.optimizer.Gradient",
"prml.nn.loss.kl_divergence",
"numpy.std",
"unittest.main",
"prml.nn.Gaussian",
"prml.nn.array"
] |
[((1156, 1171), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1169, 1171), False, 'import unittest\n'), ((153, 164), 'prml.nn.array', 'nn.array', (['(0)'], {}), '(0)\n', (161, 164), True, 'import prml.nn as nn\n'), ((226, 248), 'prml.nn.Gaussian', 'nn.Gaussian', (['mu', 'sigma'], {}), '(mu, sigma)\n', (237, 248), True, 'import prml.nn as nn\n'), ((582, 593), 'prml.nn.array', 'nn.array', (['(0)'], {}), '(0)\n', (590, 593), True, 'import prml.nn as nn\n'), ((606, 617), 'prml.nn.array', 'nn.array', (['(2)'], {}), '(2)\n', (614, 617), True, 'import prml.nn as nn\n'), ((638, 684), 'prml.nn.optimizer.Gradient', 'nn.optimizer.Gradient', (['{(0): mu, (1): s}', '(0.01)'], {}), '({(0): mu, (1): s}, 0.01)\n', (659, 684), True, 'import prml.nn as nn\n'), ((697, 714), 'prml.nn.Gaussian', 'nn.Gaussian', (['(1)', '(1)'], {}), '(1, 1)\n', (708, 714), True, 'import prml.nn as nn\n'), ((193, 205), 'prml.nn.array', 'nn.array', (['(-1)'], {}), '(-1)\n', (201, 205), True, 'import prml.nn as nn\n'), ((425, 440), 'numpy.mean', 'np.mean', (['sample'], {}), '(sample)\n', (432, 440), True, 'import numpy as np\n'), ((1006, 1051), 'numpy.allclose', 'np.allclose', (['gaussian.mean.value', '(1)', '(0.1)', '(0.1)'], {}), '(gaussian.mean.value, 1, 0.1, 0.1)\n', (1017, 1051), True, 'import numpy as np\n'), ((1077, 1121), 'numpy.allclose', 'np.allclose', (['gaussian.std.value', '(1)', '(0.1)', '(0.1)'], {}), '(gaussian.std.value, 1, 0.1, 0.1)\n', (1088, 1121), True, 'import numpy as np\n'), ((384, 399), 'numpy.mean', 'np.mean', (['sample'], {}), '(sample)\n', (391, 399), True, 'import numpy as np\n'), ((478, 492), 'numpy.std', 'np.std', (['sample'], {}), '(sample)\n', (484, 492), True, 'import numpy as np\n'), ((837, 851), 'prml.nn.softplus', 'nn.softplus', (['s'], {}), '(s)\n', (848, 851), True, 'import prml.nn as nn\n'), ((900, 938), 'prml.nn.loss.kl_divergence', 'nn.loss.kl_divergence', (['gaussian', 'prior'], {}), '(gaussian, prior)\n', (921, 938), True, 'import prml.nn as nn\n')]
|
import sys
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pickle
def load_obj(name):
pkl_path = ""
with open(pkl_path + name + ".pkl", 'rb') as f:
return pickle.load(f)
def load_prun_obj(name):
pkl_path = ""
with open(pkl_path + name + ".pkl", 'rb') as f:
return pickle.load(f)
def result_plt(results, label):
# lists = sorted(results.items())
# x, y = zip(*lists)
plt.plot(results, label = label)
matrices = ['acc', 'loss']
# labels = ['fedavg_5iid_5niid', 'fedavg_6iid_4niid', 'fedavg_2iid_8niid', 'fedavg_8iid_2niid' ]
# labels_prun = ['fedavg_5iid_5niid_prun', 'fedavg_6iid_4niid_prun', 'fedavg_8iid_2niid_prun']
labels = ['Optimal Aggregation', 'FedAvg' ]
labels_prun = ['fedadp_5iid_5niid_0.8_prun' , 'fedadp_5iid_5niid_current_prun','fedadp_5iid_5niid']
# iid_list = [5, 6, 2, 8]
# niid_list = [10 - x for x in iid_list]
iid_list = [10]
niid_list = [10]
prob_ratio = [0.1]
model = [ 'cnn']
num_exp = 10
num_exp_3 = 3
num_round = 50
def define_and_get_arguments(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Run figure plot"
)
parser.add_argument("--matrice", type=str, choices=matrices, default="loss", help = "result matrices")
parser.add_argument("--iid", type=int, default=5, help="number of nodes")
parser.add_argument("--training_rounds", type=int, default = 50, help= "number of training rounds")
args = parser.parse_args(args=args)
return args
def main():
args = define_and_get_arguments()
fedavg_data = {}
fedadp_data = {}
feddel_data = {}
remove_node = {}
# for exp in range(1,num_exp+1):
# remove_node[exp] = load_obj('remove node_exp%s' %(exp))
# print(remove_node[2][0])
for exp in range(1,num_exp+1):
fedadp_data[exp] = load_obj('feddel_mnist_%s_1_exp%s' %(model[0], exp))
for exp in range(1,num_exp+1):
fedavg_data[exp] = load_obj('fedavg_mnist_%s_1_exp%s' %(model[0],exp))
if args.matrice == "acc":
overall_avg = []
for k in range(1,num_exp+1):
# print(fedadp_data[k][0])
overall_avg.extend(fedadp_data[k][0])
temp_adp = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
acc_adp = np.mean(temp_adp, axis=0)
# print(acc_adp)
result_plt(acc_adp, labels[0])
overall_avg = []
for k in range(1,num_exp+1):
overall_avg.extend(fedavg_data[k][0])
temp_avg = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
acc_avg = np.mean(temp_avg, axis=0)
# print(acc_avg)
result_plt(acc_avg, labels[-1])
ylabel = "Testing Accuracy"
elif args.matrice == "loss":
overall_avg = []
for k in range(1,num_exp+1):
# print(fedadp_data[k][0])
overall_avg.extend(fedadp_data[k][1])
temp_adp = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
acc_adp = np.mean(temp_adp, axis=0)
# print(acc_adp)
plt.plot(list(range(num_round)), acc_adp, color='#069AF3', linewidth = '1.5', label = labels[0])
overall_avg = []
for k in range(1,num_exp+1):
overall_avg.extend(fedavg_data[k][1])
temp_avg = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(num_exp)])
acc_avg = np.mean(temp_avg, axis=0)
plt.plot(list(range(num_round)), acc_avg, '--', color='#F97306', linewidth = '1.5',label = labels[-1])
plt.xlabel("Communication Round", fontsize=13)
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.ylabel( "Training Loss", fontsize=13)
plt.legend(frameon=False, loc=7, prop={'size': 10})
elif args.matrice == "sts":
x = load_obj('observe node_exp10' )
# print(x[0])
overall_avg = []
for i in range(3):
temp = []
for j in range(50):
# print(x[0][j][i])
temp.append(x[0][j][i])
overall_avg.extend(temp)
data = np.array([overall_avg[num_round*i:num_round*(i+1)] for i in range(3)])
# print(data[0])
# plt.figure()
# plt.subplot()
# fig, ax = plt.subplots(nrows=2, ncols=1)
label = ['Selected', 'Labeled', 'Excluded']
index = np.arange(0, 25, 1)
index_2 = np.arange(25, 50, 1)
# plt.hist()
color_index= ['lightgray','lightsteelblue','springgreen']
plt.subplot(2,1,1)
for i in range(3):
j = i+1
#index+2:是起始坐标点 #width是bar的宽度
# print(data[i])
plt.bar(index, data[i][:25],width=0.6,color=color_index[i], label= label[i])
plt.xticks(index)
plt.xticks(fontsize=7)
plt.yticks(fontsize=8)
plt.subplot(2,1,2)
for i in range(3):
j = i+1
#index+2:是起始坐标点 #width是bar的宽度
plt.bar(index_2, data[i][25:],width=0.6,color=color_index[i], label= label[i])
plt.xticks(index_2)
plt.yticks([0,15, 5, 10])
plt.legend(loc='best', prop={'size': 7})
plt.xticks(fontsize=7)
plt.yticks(fontsize=8)
# plt.gca().spines['top'].set_visible(False)
# plt.hist(data[i], index, alpha = 0.5)
# plt.hist(data[0], index, alpha = 0.5)
# plt.hist(data[1], index, alpha = 0.5)
fig_path = ""
plt.savefig(fig_path + "%s_com_%siid_%s" %(args.matrice, str(iid_list[0]), model[0]) + ".eps", format='eps', dpi=1200)
if __name__ == "__main__":
main()
|
[
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pickle.load",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend"
] |
[((441, 471), 'matplotlib.pyplot.plot', 'plt.plot', (['results'], {'label': 'label'}), '(results, label=label)\n', (449, 471), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1141), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run figure plot"""'}), "(description='Run figure plot')\n", (1110, 1141), False, 'import argparse\n'), ((200, 214), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (211, 214), False, 'import pickle\n'), ((326, 340), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (337, 340), False, 'import pickle\n'), ((2333, 2358), 'numpy.mean', 'np.mean', (['temp_adp'], {'axis': '(0)'}), '(temp_adp, axis=0)\n', (2340, 2358), True, 'import numpy as np\n'), ((2662, 2687), 'numpy.mean', 'np.mean', (['temp_avg'], {'axis': '(0)'}), '(temp_avg, axis=0)\n', (2669, 2687), True, 'import numpy as np\n'), ((3102, 3127), 'numpy.mean', 'np.mean', (['temp_adp'], {'axis': '(0)'}), '(temp_adp, axis=0)\n', (3109, 3127), True, 'import numpy as np\n'), ((3495, 3520), 'numpy.mean', 'np.mean', (['temp_avg'], {'axis': '(0)'}), '(temp_avg, axis=0)\n', (3502, 3520), True, 'import numpy as np\n'), ((3651, 3697), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Communication Round"""'], {'fontsize': '(13)'}), "('Communication Round', fontsize=13)\n", (3661, 3697), True, 'import matplotlib.pyplot as plt\n'), ((3811, 3851), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Training Loss"""'], {'fontsize': '(13)'}), "('Training Loss', fontsize=13)\n", (3821, 3851), True, 'import matplotlib.pyplot as plt\n'), ((3870, 3921), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)', 'loc': '(7)', 'prop': "{'size': 10}"}), "(frameon=False, loc=7, prop={'size': 10})\n", (3880, 3921), True, 'import matplotlib.pyplot as plt\n'), ((4564, 4583), 'numpy.arange', 'np.arange', (['(0)', '(25)', '(1)'], {}), '(0, 25, 1)\n', (4573, 4583), True, 'import numpy as np\n'), ((4602, 4622), 'numpy.arange', 'np.arange', (['(25)', '(50)', '(1)'], {}), '(25, 50, 1)\n', (4611, 4622), True, 'import numpy as np\n'), ((4726, 4746), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (4737, 4746), True, 'import matplotlib.pyplot as plt\n'), ((4963, 4980), 'matplotlib.pyplot.xticks', 'plt.xticks', (['index'], {}), '(index)\n', (4973, 4980), True, 'import matplotlib.pyplot as plt\n'), ((4989, 5011), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(7)'}), '(fontsize=7)\n', (4999, 5011), True, 'import matplotlib.pyplot as plt\n'), ((5020, 5042), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (5030, 5042), True, 'import matplotlib.pyplot as plt\n'), ((5052, 5072), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (5063, 5072), True, 'import matplotlib.pyplot as plt\n'), ((5262, 5281), 'matplotlib.pyplot.xticks', 'plt.xticks', (['index_2'], {}), '(index_2)\n', (5272, 5281), True, 'import matplotlib.pyplot as plt\n'), ((5290, 5316), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 15, 5, 10]'], {}), '([0, 15, 5, 10])\n', (5300, 5316), True, 'import matplotlib.pyplot as plt\n'), ((5324, 5364), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'prop': "{'size': 7}"}), "(loc='best', prop={'size': 7})\n", (5334, 5364), True, 'import matplotlib.pyplot as plt\n'), ((5373, 5395), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(7)'}), '(fontsize=7)\n', (5383, 5395), True, 'import matplotlib.pyplot as plt\n'), ((5404, 5426), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(8)'}), '(fontsize=8)\n', (5414, 5426), True, 'import matplotlib.pyplot as plt\n'), ((4878, 4955), 'matplotlib.pyplot.bar', 'plt.bar', (['index', 'data[i][:25]'], {'width': '(0.6)', 'color': 'color_index[i]', 'label': 'label[i]'}), '(index, data[i][:25], width=0.6, color=color_index[i], label=label[i])\n', (4885, 4955), True, 'import matplotlib.pyplot as plt\n'), ((5175, 5254), 'matplotlib.pyplot.bar', 'plt.bar', (['index_2', 'data[i][25:]'], {'width': '(0.6)', 'color': 'color_index[i]', 'label': 'label[i]'}), '(index_2, data[i][25:], width=0.6, color=color_index[i], label=label[i])\n', (5182, 5254), True, 'import matplotlib.pyplot as plt\n'), ((3706, 3715), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3713, 3715), True, 'import matplotlib.pyplot as plt\n'), ((3759, 3768), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3766, 3768), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import cv2
import proper
from cats.cats_simus import *
def vortex(wfo, CAL, charge, f_lens, path, Debug_print):
n = int(proper.prop_get_gridsize(wfo))
ofst = 0 # no offset
ramp_sign = 1 #sign of charge is positive
#sampling = n
ramp_oversamp = 11. # vortex is oversampled for a better discretization
if charge!=0:
if CAL==1: # create the vortex for a perfectly circular pupil
if (Debug_print == True):
print ("CAL:1, charge ", charge)
writefield(path,'zz_psf', wfo.wfarr) # write the pre-vortex field
nramp = int(n*ramp_oversamp) #oversamp
# create the vortex by creating a matrix (theta) representing the ramp (created by atan 2 gradually varying matrix, x and y)
y1 = np.ones((nramp,), dtype=np.int)
y2 = np.arange(0, nramp, 1.) - (nramp/2) - int(ramp_oversamp)/2
y = np.outer(y2, y1)
x = np.transpose(y)
theta = np.arctan2(y,x)
x = 0
y = 0
#vvc_tmp_complex = np.array(np.zeros((nramp,nramp)), dtype=complex)
#vvc_tmp_complex.imag = ofst + ramp_sign*charge*theta
#vvc_tmp = np.exp(vvc_tmp_complex)
vvc_tmp = np.exp(1j*(ofst + ramp_sign*charge*theta))
theta = 0
vvc_real_resampled = cv2.resize(vvc_tmp.real, (0,0), fx=1/ramp_oversamp, fy=1/ramp_oversamp, interpolation=cv2.INTER_LINEAR) # scale the pupil to the pupil size of the simualtions
vvc_imag_resampled = cv2.resize(vvc_tmp.imag, (0,0), fx=1/ramp_oversamp, fy=1/ramp_oversamp, interpolation=cv2.INTER_LINEAR) # scale the pupil to the pupil size of the simualtions
vvc = np.array(vvc_real_resampled, dtype=complex)
vvc.imag = vvc_imag_resampled
vvcphase = np.arctan2(vvc.imag, vvc.real) # create the vortex phase
vvc_complex = np.array(np.zeros((n,n)), dtype=complex)
vvc_complex.imag = vvcphase
vvc = np.exp(vvc_complex)
vvc_tmp = 0.
writefield(path,'zz_vvc', vvc) # write the theoretical vortex field
wfo0 = wfo
proper.prop_multiply(wfo, vvc)
proper.prop_propagate(wfo, f_lens, 'OAP2')
proper.prop_lens(wfo, f_lens)
proper.prop_propagate(wfo, f_lens, 'forward to Lyot Stop')
proper.prop_circular_obscuration(wfo, 1., NORM=True) # null the amplitude iside the Lyot Stop
proper.prop_propagate(wfo, -f_lens) # back-propagation
proper.prop_lens(wfo, -f_lens)
proper.prop_propagate(wfo, -f_lens)
writefield(path,'zz_perf', wfo.wfarr) # write the perfect-result vortex field
wfo = wfo0
else:
if (Debug_print == True):
print ("CAL:0, charge ", charge)
vvc = readfield(path,'zz_vvc') # read the theoretical vortex field
vvc = proper.prop_shift_center(vvc)
scale_psf = wfo._wfarr[0,0]
psf_num = readfield(path,'zz_psf') # read the pre-vortex field
psf0 = psf_num[0,0]
psf_num = psf_num/psf0*scale_psf
perf_num = readfield(path,'zz_perf') # read the perfect-result vortex field
perf_num = perf_num/psf0*scale_psf
wfo._wfarr = (wfo._wfarr - psf_num)*vvc + perf_num # the wavefront takes into account the real pupil with the perfect-result vortex field
return
|
[
"proper.prop_circular_obscuration",
"numpy.ones",
"proper.prop_lens",
"proper.prop_get_gridsize",
"proper.prop_multiply",
"proper.prop_shift_center",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"numpy.arctan2",
"proper.prop_propagate",
"cv2.resize",
"numpy.transpose",
"numpy.arange"
] |
[((146, 175), 'proper.prop_get_gridsize', 'proper.prop_get_gridsize', (['wfo'], {}), '(wfo)\n', (170, 175), False, 'import proper\n'), ((801, 832), 'numpy.ones', 'np.ones', (['(nramp,)'], {'dtype': 'np.int'}), '((nramp,), dtype=np.int)\n', (808, 832), True, 'import numpy as np\n'), ((925, 941), 'numpy.outer', 'np.outer', (['y2', 'y1'], {}), '(y2, y1)\n', (933, 941), True, 'import numpy as np\n'), ((958, 973), 'numpy.transpose', 'np.transpose', (['y'], {}), '(y)\n', (970, 973), True, 'import numpy as np\n'), ((994, 1010), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (1004, 1010), True, 'import numpy as np\n'), ((1261, 1311), 'numpy.exp', 'np.exp', (['(1.0j * (ofst + ramp_sign * charge * theta))'], {}), '(1.0j * (ofst + ramp_sign * charge * theta))\n', (1267, 1311), True, 'import numpy as np\n'), ((1359, 1471), 'cv2.resize', 'cv2.resize', (['vvc_tmp.real', '(0, 0)'], {'fx': '(1 / ramp_oversamp)', 'fy': '(1 / ramp_oversamp)', 'interpolation': 'cv2.INTER_LINEAR'}), '(vvc_tmp.real, (0, 0), fx=1 / ramp_oversamp, fy=1 / ramp_oversamp,\n interpolation=cv2.INTER_LINEAR)\n', (1369, 1471), False, 'import cv2\n'), ((1551, 1663), 'cv2.resize', 'cv2.resize', (['vvc_tmp.imag', '(0, 0)'], {'fx': '(1 / ramp_oversamp)', 'fy': '(1 / ramp_oversamp)', 'interpolation': 'cv2.INTER_LINEAR'}), '(vvc_tmp.imag, (0, 0), fx=1 / ramp_oversamp, fy=1 / ramp_oversamp,\n interpolation=cv2.INTER_LINEAR)\n', (1561, 1663), False, 'import cv2\n'), ((1728, 1771), 'numpy.array', 'np.array', (['vvc_real_resampled'], {'dtype': 'complex'}), '(vvc_real_resampled, dtype=complex)\n', (1736, 1771), True, 'import numpy as np\n'), ((1837, 1867), 'numpy.arctan2', 'np.arctan2', (['vvc.imag', 'vvc.real'], {}), '(vvc.imag, vvc.real)\n', (1847, 1867), True, 'import numpy as np\n'), ((2019, 2038), 'numpy.exp', 'np.exp', (['vvc_complex'], {}), '(vvc_complex)\n', (2025, 2038), True, 'import numpy as np\n'), ((2179, 2209), 'proper.prop_multiply', 'proper.prop_multiply', (['wfo', 'vvc'], {}), '(wfo, vvc)\n', (2199, 2209), False, 'import proper\n'), ((2222, 2264), 'proper.prop_propagate', 'proper.prop_propagate', (['wfo', 'f_lens', '"""OAP2"""'], {}), "(wfo, f_lens, 'OAP2')\n", (2243, 2264), False, 'import proper\n'), ((2277, 2306), 'proper.prop_lens', 'proper.prop_lens', (['wfo', 'f_lens'], {}), '(wfo, f_lens)\n', (2293, 2306), False, 'import proper\n'), ((2319, 2377), 'proper.prop_propagate', 'proper.prop_propagate', (['wfo', 'f_lens', '"""forward to Lyot Stop"""'], {}), "(wfo, f_lens, 'forward to Lyot Stop')\n", (2340, 2377), False, 'import proper\n'), ((2390, 2443), 'proper.prop_circular_obscuration', 'proper.prop_circular_obscuration', (['wfo', '(1.0)'], {'NORM': '(True)'}), '(wfo, 1.0, NORM=True)\n', (2422, 2443), False, 'import proper\n'), ((2496, 2531), 'proper.prop_propagate', 'proper.prop_propagate', (['wfo', '(-f_lens)'], {}), '(wfo, -f_lens)\n', (2517, 2531), False, 'import proper\n'), ((2563, 2593), 'proper.prop_lens', 'proper.prop_lens', (['wfo', '(-f_lens)'], {}), '(wfo, -f_lens)\n', (2579, 2593), False, 'import proper\n'), ((2606, 2641), 'proper.prop_propagate', 'proper.prop_propagate', (['wfo', '(-f_lens)'], {}), '(wfo, -f_lens)\n', (2627, 2641), False, 'import proper\n'), ((2953, 2982), 'proper.prop_shift_center', 'proper.prop_shift_center', (['vvc'], {}), '(vvc)\n', (2977, 2982), False, 'import proper\n'), ((1929, 1945), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1937, 1945), True, 'import numpy as np\n'), ((850, 874), 'numpy.arange', 'np.arange', (['(0)', 'nramp', '(1.0)'], {}), '(0, nramp, 1.0)\n', (859, 874), True, 'import numpy as np\n')]
|
bl_info = {
"name": "Import Planar Code",
"author": "<NAME>",
"version": (1, 0),
"blender": (2, 80, 0),
"location": "File > Import > Planar Code",
"description": "Import planar code and construct mesh by assigning vertex positions.",
"warning": "",
"support": "TESTING",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export",
}
import bpy
import bmesh
import numpy as np
import mathutils as mu
from bpy.props import StringProperty, IntProperty, BoolProperty
import struct
import collections
import os
import random
class PlanarCodeReader:
def __init__(self, filename, index, embed2d, embed3d):
self.faceCounters = []
verts_loc, faces = self.read(filename, index)
if (not verts_loc):
return
if (len(verts_loc) <= 0):
return
# create new mesh
name = os.path.basename(filename) + "_" + str(index)
mesh = bpy.data.meshes.new(name)
mesh.from_pydata(verts_loc,[],faces)
mesh.update(calc_edges=True)
# create new bmesh
bm = bmesh.new()
bm.from_mesh(mesh)
# enable lookup
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
if (embed2d):
pv = self.embed(bm)
print(pv)
if (embed3d):
self.liftup(bm, pv)
bm.to_mesh(mesh)
# create new object
obj = bpy.data.objects.new(name, mesh)
# set object location
obj.location = bpy.context.scene.cursor.location
# link the object to collection
bpy.context.scene.collection.objects.link(obj)
def read(self, filename, index):
self.f = open(filename, "rb")
verts = []
faces = []
try:
DEFAULT_HEADER = b">>planar_code<<"
header = self.f.read(len(DEFAULT_HEADER))
if (header == DEFAULT_HEADER):
print(index)
self.skip(index)
# create verts
num_vert = struct.unpack('b', self.f.read(1))
i = 0
while i < num_vert[0]:
# create vertex
verts.append((0, 0, 0))
# read adjacant vertices
adj = []
while True:
tmp = struct.unpack('b', self.f.read(1))
if (tmp[0] <= 0): # 0 means separator
break
adj.append(tmp[0])
# add face counter
lastIndex = len(adj)-1
for j in range(lastIndex):
self.addIfAbsent(collections.Counter([i, adj[j]-1, adj[j+1]-1]))
self.addIfAbsent(collections.Counter([i, adj[0]-1, adj[lastIndex]-1]))
i += 1
for counter in self.faceCounters:
faces.append(tuple(counter))
except:
print(f"Error in reading {filename}")
self.f.close()
return
self.f.close()
del self.f
return verts, faces
def skip(self, index):
# skip to target index
for i in range(index):
num_vert = struct.unpack('b', self.f.read(1))
n = num_vert[0]
while n > 0:
d = struct.unpack('b', self.f.read(1))
if (d[0] == 0):
n -= 1
def addIfAbsent(self, fc):
for counter in self.faceCounters:
if (counter == fc):
break
else:
self.faceCounters.append(fc)
def embed(self, bm):
# randomly pick up a face
outerFace = bm.faces[random.randint(0, len(bm.faces)-1)]
# embed an outer face to form a regular polygon inscribed into a circle
n = len(outerFace.verts)
inv_sqrt = 1.0 / np.sqrt(n)
angle = 360.0 / n
for i, v in enumerate(outerFace.verts):
rad = (i * angle / 180.0) * np.pi
x = inv_sqrt * np.cos(rad)
y = inv_sqrt * np.sin(rad)
v.co.x = x
v.co.y = y
rests = []
for v in bm.verts:
if (not v in outerFace.verts):
rests.append(v)
# variables for the force F_uv on a Edge(u,v)
fuv = np.zeros((len(bm.edges), 3))
# force F_v on a Vertex(v)
fv = np.zeros((len(bm.verts), 3))
# Constant value
n_pi = np.sqrt(len(bm.verts) / np.pi)
# final double A = 2.5;
avg_area = np.pi / len(bm.verts)
loop = 0
# iterations
while (loop < 500):
# Set F_v to zero
fv[:] = 0
# Calculate F_uv for Edges
for j, e in enumerate(bm.edges):
v = e.verts[0]
u = e.verts[1]
C = n_pi
x = C * np.power(v.co.x - u.co.x, 3)
y = C * np.power(v.co.y - u.co.y, 3)
if (np.isfinite(x) and np.isfinite(y)):
fuv[j] = [x, y, 0]
# Update the forces on v and u
fv[v.index] -= fuv[j]
fv[u.index] += fuv[j]
# Move Vertices
cool = np.sqrt(avg_area) / (1.0 + np.power(np.sqrt(avg_area * loop), 3))
for v in rests:
f = np.linalg.norm(fv[v.index])
size = min(f, cool)
if f != 0:
fv[v.index] /= f
fv[v.index] *= size
v.co.x += fv[v.index, 0]
v.co.y += fv[v.index, 1]
loop += 1
return self.periphericity(bm, outerFace)
def periphericity(self, bm, outer):
stack0 = []
stack1 = []
per = 0
pv = np.full(len(bm.verts), -1)
for v in outer.verts:
stack0.append(v)
while (stack0):
for v in stack0:
pv[v.index] = per
# Search adjoining verts
for vi in stack0:
links = vi.link_edges
for e in links:
vo = e.verts[1] if vi.index == e.verts[0].index else e.verts[0]
if (pv[vo.index] < 0):
if (not vo in stack1):
stack1.append(vo)
stack0.clear()
stack0.extend(stack1)
stack1.clear()
per += 1
return pv
def liftup(self, bm, pv):
H = 0.3
for v in bm.verts:
z = H * pv[v.index]
v.co.z = z
class IMPORT_OT_pcode(bpy.types.Operator):
"""Import Planar Code Operator"""
bl_idname = "import_planarcode.pcode"
bl_label = "Import planar code"
bl_description = "Embed a graph written in planar code (binary file)"
bl_options = {"REGISTER", "UNDO"}
bpy.types.Scene.ch = None
bpy.types.Scene.poly = None
filepath: StringProperty(
name="File Path",
description="Filepath used for importing the Planar code file",
maxlen=1024,
default="",
)
CODE_INDEX: IntProperty(
name="Planer code index",
description="An index follows generated order",
default=0,
min=0,
)
EMBED_2D: BoolProperty(
name="Embedding in 2D",
description="Embed a graph in the plane",
default=True,
)
EMBED_3D: BoolProperty(
name="Realizing a graph",
description="Make a polyhedron by giving the heights to the vertices",
default=True,
)
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {"RUNNING_MODAL"}
def execute(self, context):
PlanarCodeReader(self.filepath, self.CODE_INDEX, self.EMBED_2D, self.EMBED_3D)
return {"FINISHED"}
def menu_func(self, context):
self.layout.operator(IMPORT_OT_pcode.bl_idname, text="Planar code (.*)")
classes = (
IMPORT_OT_pcode,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.TOPBAR_MT_file_import.append(menu_func)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
bpy.types.TOPBAR_MT_file_import.remove(menu_func)
if __name__ == "__main__":
register()
|
[
"bpy.props.StringProperty",
"numpy.sqrt",
"bpy.data.objects.new",
"bmesh.new",
"bpy.types.TOPBAR_MT_file_import.remove",
"numpy.isfinite",
"numpy.linalg.norm",
"numpy.sin",
"bpy.context.scene.collection.objects.link",
"bpy.utils.unregister_class",
"bpy.props.BoolProperty",
"bpy.types.TOPBAR_MT_file_import.append",
"bpy.props.IntProperty",
"numpy.cos",
"numpy.power",
"bpy.data.meshes.new",
"collections.Counter",
"os.path.basename",
"bpy.utils.register_class"
] |
[((7250, 7381), 'bpy.props.StringProperty', 'StringProperty', ([], {'name': '"""File Path"""', 'description': '"""Filepath used for importing the Planar code file"""', 'maxlen': '(1024)', 'default': '""""""'}), "(name='File Path', description=\n 'Filepath used for importing the Planar code file', maxlen=1024, default=''\n )\n", (7264, 7381), False, 'from bpy.props import StringProperty, IntProperty, BoolProperty\n'), ((7433, 7541), 'bpy.props.IntProperty', 'IntProperty', ([], {'name': '"""Planer code index"""', 'description': '"""An index follows generated order"""', 'default': '(0)', 'min': '(0)'}), "(name='Planer code index', description=\n 'An index follows generated order', default=0, min=0)\n", (7444, 7541), False, 'from bpy.props import StringProperty, IntProperty, BoolProperty\n'), ((7596, 7693), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Embedding in 2D"""', 'description': '"""Embed a graph in the plane"""', 'default': '(True)'}), "(name='Embedding in 2D', description=\n 'Embed a graph in the plane', default=True)\n", (7608, 7693), False, 'from bpy.props import StringProperty, IntProperty, BoolProperty\n'), ((7739, 7867), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Realizing a graph"""', 'description': '"""Make a polyhedron by giving the heights to the vertices"""', 'default': '(True)'}), "(name='Realizing a graph', description=\n 'Make a polyhedron by giving the heights to the vertices', default=True)\n", (7751, 7867), False, 'from bpy.props import StringProperty, IntProperty, BoolProperty\n'), ((8438, 8487), 'bpy.types.TOPBAR_MT_file_import.append', 'bpy.types.TOPBAR_MT_file_import.append', (['menu_func'], {}), '(menu_func)\n', (8476, 8487), False, 'import bpy\n'), ((8580, 8629), 'bpy.types.TOPBAR_MT_file_import.remove', 'bpy.types.TOPBAR_MT_file_import.remove', (['menu_func'], {}), '(menu_func)\n', (8618, 8629), False, 'import bpy\n'), ((974, 999), 'bpy.data.meshes.new', 'bpy.data.meshes.new', (['name'], {}), '(name)\n', (993, 999), False, 'import bpy\n'), ((1136, 1147), 'bmesh.new', 'bmesh.new', ([], {}), '()\n', (1145, 1147), False, 'import bmesh\n'), ((1536, 1568), 'bpy.data.objects.new', 'bpy.data.objects.new', (['name', 'mesh'], {}), '(name, mesh)\n', (1556, 1568), False, 'import bpy\n'), ((1708, 1754), 'bpy.context.scene.collection.objects.link', 'bpy.context.scene.collection.objects.link', (['obj'], {}), '(obj)\n', (1749, 1754), False, 'import bpy\n'), ((8401, 8430), 'bpy.utils.register_class', 'bpy.utils.register_class', (['cls'], {}), '(cls)\n', (8425, 8430), False, 'import bpy\n'), ((8543, 8574), 'bpy.utils.unregister_class', 'bpy.utils.unregister_class', (['cls'], {}), '(cls)\n', (8569, 8574), False, 'import bpy\n'), ((4087, 4097), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (4094, 4097), True, 'import numpy as np\n'), ((912, 938), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (928, 938), False, 'import os\n'), ((4249, 4260), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (4255, 4260), True, 'import numpy as np\n'), ((4289, 4300), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (4295, 4300), True, 'import numpy as np\n'), ((5488, 5505), 'numpy.sqrt', 'np.sqrt', (['avg_area'], {}), '(avg_area)\n', (5495, 5505), True, 'import numpy as np\n'), ((5604, 5631), 'numpy.linalg.norm', 'np.linalg.norm', (['fv[v.index]'], {}), '(fv[v.index])\n', (5618, 5631), True, 'import numpy as np\n'), ((5128, 5156), 'numpy.power', 'np.power', (['(v.co.x - u.co.x)', '(3)'], {}), '(v.co.x - u.co.x, 3)\n', (5136, 5156), True, 'import numpy as np\n'), ((5182, 5210), 'numpy.power', 'np.power', (['(v.co.y - u.co.y)', '(3)'], {}), '(v.co.y - u.co.y, 3)\n', (5190, 5210), True, 'import numpy as np\n'), ((5232, 5246), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (5243, 5246), True, 'import numpy as np\n'), ((5251, 5265), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (5262, 5265), True, 'import numpy as np\n'), ((2922, 2978), 'collections.Counter', 'collections.Counter', (['[i, adj[0] - 1, adj[lastIndex] - 1]'], {}), '([i, adj[0] - 1, adj[lastIndex] - 1])\n', (2941, 2978), False, 'import collections\n'), ((5524, 5548), 'numpy.sqrt', 'np.sqrt', (['(avg_area * loop)'], {}), '(avg_area * loop)\n', (5531, 5548), True, 'import numpy as np\n'), ((2836, 2888), 'collections.Counter', 'collections.Counter', (['[i, adj[j] - 1, adj[j + 1] - 1]'], {}), '([i, adj[j] - 1, adj[j + 1] - 1])\n', (2855, 2888), False, 'import collections\n')]
|
import numpy as np
import pandas as pd
import scipy.stats as scs
import itertools
from collections import defaultdict
import textwrap
import pingouin as pg
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
from sklearn.cluster import DBSCAN, KMeans, OPTICS
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.preprocessing import StandardScaler
from sklearn.metrics.pairwise import cosine_distances, cosine_similarity
from sklearn.preprocessing import StandardScaler, MinMaxScaler
pd.options.display.max_columns = 150
from umap import UMAP
from statannot import add_stat_annotation
import plotly
import plotly.graph_objs as go
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.colors as mcolors
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from mpl_toolkits import mplot3d
import matplotlib.cm as cm
from adjustText import adjust_text
import matplotlib.patheffects as PathEffects
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
###
def simple_axis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def run_by_group(orig_df,
**kwargs):
g = orig_df.groupby(kwargs['groupby'])
base_name = kwargs['save_name']
for group, data in g:
kwargs['title'] = group
kwargs['save_name'] = base_name+'_'+group
run_graph(data, **kwargs)
return
def run_graph(df,
**kwargs):
fig, ax = plt.subplots(figsize=kwargs['figsize'])
if 'violin' in kwargs['save_name']:
ax = run_violin(df, ax, **kwargs)
elif 'scatter' in kwargs['save_name']:
if '5' in kwargs['save_name']:
ax = run_scatter_5(df, ax, **kwargs)
else:
ax = run_scatter(df, ax, **kwargs)
if 'comp_df' in kwargs:
ax = run_loadings(df, ax, **kwargs)
elif 'reg' in kwargs['save_name']:
ax = run_scatter(df, ax, **kwargs)
elif 'hist' in kwargs['save_name']:
ax = run_hist(df, ax, **kwargs)
elif 'bar' in kwargs['save_name']:
ax = run_bar(df, ax, **kwargs)
elif 'stacked' in kwargs['save_name']:
ax = run_stacked_bar(df, ax, **kwargs)
elif 'box' in kwargs['save_name']:
ax = run_box(df, ax, **kwargs)
elif 'sil' in kwargs['save_name']:
ax = run_sil(df, ax, **kwargs)
elif 'kde' in kwargs['save_name']:
ax = run_kde(df, ax, **kwargs)
elif 'line' in kwargs['save_name']:
ax = run_line(df, ax, **kwargs)
if 'log' in kwargs:
# ax.set_xscale('symlog', linthreshx=1e-1)
# ax.set_yscale('symlog', linthreshy=1e-1)
ax.set_xscale('log')
ax.set_yscale('log')
if 'xlims' in kwargs:
if len(kwargs['xlims']) == 1:
xlims = ax.get_xlim()
kwargs['xlims'] = (kwargs['xlims'][0], xlims[1])
ax.set_xlim(kwargs['xlims'])
if 'ylims' in kwargs:
if len(kwargs['ylims']) == 1:
ylims = ax.get_ylim()
kwargs['ylims'] = (kwargs['ylims'][0], ylims[1])
ax.set_ylim(kwargs['ylims'])
ax.set_xlabel(kwargs['x_label'], fontweight='bold', fontsize=11)
ax.set_ylabel(kwargs['y_label'], fontweight='bold', fontsize=11)
# if 'comp_df' in kwargs:
# ax2 = ax.twiny()
# ax2.set_xticks( ax.get_xticks() )
# ax2.set_xbound(ax.get_xbound())
# ax2.set_xticklabels([x/ax.get_xticks().max() for x in ax.get_xticks()])
# ax2.set_xlabel('Loadings on PC'+str(kwargs['x']+1), fontweight='bold', fontsize=11)
# ax3 = ax.twinx()
# ax3.set_yticks(ax.get_yticks())
# ax3.set_ybound(ax.get_ybound())
# ax3.set_yticklabels([y/ax.get_yticks().max() for y in ax.get_yticks()])
# ax3.set_ylabel('Loadings on PC'+str(kwargs['y']+1), fontweight='bold', fontsize=11)
ax.set_title(kwargs['title'], fontweight='bold', fontsize=12)
simple_axis(ax)
plt.tight_layout()
fig.savefig('viz/'+kwargs['save_name']+'.png')
return
def sample_df(df, x):
if x==1:
return df
elif x<1:
return df.sample(frac=x, random_state=56)
else:
return df.sample(n=x, random_state=56)
def run_violin(data, ax, **kwargs):
sub_df = sample_df(data, kwargs['sample_frac'])
# get ns from full dataset
if kwargs['x'] == 'region':
df = pd.melt(data.loc[:, kwargs['cols']],
id_vars='region', value_vars='tot_thc').drop(columns=['variable'])
sub_df = pd.melt(sub_data.loc[:, kwargs['cols']],
id_vars='region', value_vars='tot_thc').drop(columns=['variable'])
order, n_dict = violin_order(df, group_by='region')
else:
if 'cols' in kwargs:
df = pd.melt(data.loc[:, kwargs['cols']], var_name=kwargs['x'])
sub_df = pd.melt(sub_df.loc[:, kwargs['cols']], var_name=kwargs['x'])
order, n_dict = violin_order(df, group_by=kwargs['x'])
else:
df = data
sub_df = sub_df
order, n_dict = violin_order(df, group_by=kwargs['x'])
# if pre-set order, use that
if 'order' in kwargs:
order = kwargs['order']
# plot with sampled data
if 'palette' in kwargs:
sns.violinplot(x=kwargs['x'], y=kwargs['y'],
data=sub_df,
scale='width', order=order,
palette=kwargs['palette'], linewidth=0, ax=ax)
else:
sns.violinplot(x=kwargs['x'], y=kwargs['y'],
data=sub_df,
scale='width', order=order,
color='lightslategray', linewidth=0, ax=ax)
PROPS = {
'boxprops':{'facecolor':'black', 'edgecolor':'black', 'linewidth':3},
'medianprops':{'color':'white', 'linewidth':2},
'whiskerprops':{'color':'black', 'linewidth':2}
}
boxplot = sns.boxplot(x=kwargs['x'], y=kwargs['y'],
data=df, order=order,
showcaps=False, width=0.06,
fliersize=0.5, ax=ax, **PROPS)
if kwargs['avg']:
avg_avgs = df.groupby(kwargs['x'])[kwargs['y']].mean().mean()
ax.axhline(avg_avgs, color='black', linestyle='--')
if 'axhline' in kwargs:
ax.axhline(kwargs['axhline'], color='black', linestyle='--')
if 'sil-scores' in kwargs['save_name']:
ax.axhline(0, color='black', linestyle='--')
if kwargs['sig_comp']:
box_pairs = list(itertools.combinations(order,r=2))
test_results = add_stat_annotation(ax, data=df,
x=kwargs['x'], y=kwargs['y'],
order=order,
box_pairs=box_pairs,
text_annot_custom=[get_stats(df, pair, kwargs['x']) for pair in box_pairs],
perform_stat_test=False, pvalues=[0, 0, 0],
loc='outside', verbose=0)
# ttest_df = pd.DataFrame(index=order, columns=['y_val','p_val','cohens_d'])
# ttest_df[['y_val','p_val','cohens_d']] = ttest_df.apply(run_cohens, args=(df, ), axis=1, result_type='expand')
# p_val_adj = 0.05/ttest_df.shape[0]
# ttest_df['reject'] = ttest_df['p_val'] <= p_val_adj
# bins = [0, 0.2, 0.5, 0.8, np.inf]
# names = ['', '*', '**', '***']
# ttest_df['star'] = pd.cut(np.abs(ttest_df['cohens_d']), bins, labels=names)
# for i, region in enumerate(order):
# if ttest_df.loc[region, 'reject']:
# y = ttest_df.loc[region, 'y_val']
# ax.text(i, y+2, ttest_df.loc[region, 'star'], ha='center', size=20)
if 'v_xticklabels' in kwargs:
xtick_labels = ax.get_xticklabels()
labels = [textwrap.fill(x.get_text(),10) for x in xtick_labels]
_ = ax.set_xticklabels(labels, rotation=90, ha='center')
else:
xtick_labels = ax.get_xticklabels()
labels = [x.get_text()+'\nn='+str(n_dict[x.get_text()]['value']) for x in xtick_labels]
_ = ax.set_xticklabels(labels)
return ax
def violin_order(df, group_by='Cannab'):
order = df.groupby(group_by).median().sort_values(by='value', ascending=False).index
n_dict = df.groupby(group_by).count().T.to_dict(orient='dict')
return order.values, n_dict
def run_scatter(df, ax, **kwargs):
no_nan = df.dropna(subset=[kwargs['x'], kwargs['y']], how='any')
sub_df = sample_df(no_nan, kwargs['sample_frac'])
if 'size' in kwargs:
s = kwargs['size']
else:
s = mpl.rcParams['lines.markersize']**2
if 'edgecolor' in kwargs:
ec = kwargs['edgecolor']
else:
ec = 'white'
if 'hue' in kwargs:
if 'sort_list' in kwargs:
hue_order = kwargs['sort_list']
sub_df = sub_df.sort_values(kwargs['hue'], key=make_sorter(kwargs['sort_list']))
else:
hue_order = sub_df[kwargs['hue']].value_counts().index
sns.scatterplot(x=kwargs['x'], y=kwargs['y'],
hue=kwargs['hue'],
data=sub_df,
s=s,
edgecolor=ec,
alpha=0.5,
hue_order=hue_order,
palette=kwargs['palette'],
ax=ax)
# include full ns
handles, labels = ax.get_legend_handles_labels()
if 'n_display' in kwargs:
labels_n = [(cat, df.loc[df[kwargs['hue']]==cat].shape[0]) for cat in hue_order]
labels = [cat+'\nn='+str(n) for cat, n in labels_n]
ax.legend(handles=handles[:kwargs['n_display']], labels=labels[:kwargs['n_display']], title=kwargs['hue'].title(), handlelength=4)
else:
labels_n = [(cat, df.loc[df[kwargs['hue']]==cat].shape[0]) for cat in hue_order]
labels = [cat+'\nn='+str(n) for cat, n in labels_n]
ax.legend(handles=handles, labels=labels, title=kwargs['hue'].title(), handlelength=4)
else:
sns.regplot(x=kwargs['x'], y=kwargs['y'],
data=sub_df,
scatter_kws={'alpha':0.1, 'color':'lightslategray', 'rasterized':True},
line_kws={'color':'orange'},
ax=ax)
r, p = scs.spearmanr(no_nan[kwargs['x']], no_nan[kwargs['y']])
labels = ['rho = {:.2f}'.format(r)]
if p < 1e-300:
labels.append('p < 1e-300')
else:
labels.append('p = {:.1e}'.format(p))
ax.legend(labels=['\n'.join(labels)])
if 'prod_strains' in kwargs:
s_colors = ['black', 'gray', 'white']
s_markers = ['^', 'D', 'o']
n_strains = len(kwargs['prod_strains'])
for strain, color, marker in zip(kwargs['prod_strains'], s_colors[:n_strains], s_markers[:n_strains]):
sns.scatterplot(x=kwargs['x'], y=kwargs['y'],
data=sub_df.loc[sub_df['strain_slug']==strain],
s=s+35,
edgecolor='black',
linewidth=1.5,
color=color,
label=strain,
marker=marker,
ax=ax)
return ax
def get_log(df, cannab_1='tot_thc', cannab_2='tot_cbd'):
# get THC_CBD ratio for batches without 0 tot_thc (avoid dividing by 0)
df['ratio'] = 0
df.loc[df[cannab_2] != 0, 'ratio'] = (df.loc[df[cannab_2] != 0, cannab_1]) / (df.loc[df[cannab_2] != 0, cannab_2])
# get log_THC_CBD vals
df['log_ratio'] = 0
df.loc[df['ratio'] != 0, 'log_ratio'] = np.log10(df.loc[df['ratio'] != 0, 'ratio'])
# set the 0 tot_cbd batches to an extraneous high bin
df.loc[df[cannab_2] == 0, 'log_ratio'] = 4
df.loc[df[cannab_1] == 0, 'log_ratio'] = -2
log_ratio = df['log_ratio']
return log_ratio
def run_hist(df, ax, **kwargs):
sub_df = sample_df(df, kwargs['sample_frac'])
# some cut-offs
ct_thresh_high = 5
ct_thresh_low = 0.25
max_log = 4
min_log = -2.0
# get log data
log_cannab = get_log(sub_df, cannab_1='tot_'+kwargs['x'], cannab_2='tot_'+kwargs['y'])
# get histogram
hist, bins = np.histogram(log_cannab, bins=np.arange(min_log-0.1, max_log+0.1, 0.05))
# get colors
colors = []
for low, high in zip(bins,bins[1:]):
avg = np.mean([low, high])
if avg >= np.log10(ct_thresh_high):
colors.append('darkblue')
elif avg <= np.log10(ct_thresh_low):
colors.append('black')
else:
colors.append('steelblue')
# plot histogram, thresholds
ax.bar(bins[:-1], hist.astype(np.float32) / hist.sum(),
width=(bins[1]-bins[0]), color=colors)
ax.plot([np.log10(ct_thresh_high), np.log10(ct_thresh_high)], [0, kwargs['ylims'][1]-0.02],
linestyle='--', color='k', linewidth=1)
ax.plot([np.log10(ct_thresh_low), np.log10(ct_thresh_low)], [0, kwargs['ylims'][1]-0.02],
linestyle='--', color='k', linewidth=1)
ax.set_xticklabels(['',float("-inf"), -1, 0, 1, 2, 3, float("inf")])
# draw legend
chemotypes = ['THC-Dom', 'Bal THC/CBD', 'CBD-Dom']
ct_1 = mpatches.Patch(color='darkblue', label='THC-Dom')
ct_2 = mpatches.Patch(color='steelblue', label='Bal THC/CBD')
ct_3 = mpatches.Patch(color='black', label='CBD-Dom')
ct_handles, ct_labels = ax.get_legend_handles_labels()
ct_labels_n = [(x, df.loc[df['chemotype']==x].shape[0]) for x in chemotypes]
ct_labels = [x+'\nn='+str(n) for x, n in ct_labels_n]
ax.legend(handles=[ct_1,ct_2,ct_3], labels=ct_labels,title='Chemotype',handlelength=4)
return ax
def normalize(df, cols):
df.loc[:, cols] = (df.loc[:, cols]
.div(df.loc[:, cols].sum(axis=1), axis=0)
.multiply(100))
return df
def max_min(arr):
return arr/(arr.max()-arr.min())
def make_sorter(sort_list):
"""
Create a dict from the list to map to 0..len(l)
Returns a mapper to map a series to this custom sort order
"""
sort_order = {k:v for k,v in zip(sort_list, range(len(sort_list)))}
return lambda s: s.map(lambda x: sort_order[x])
def run_bar(df, ax, **kwargs):
if 'hue' in kwargs:
sns.barplot(x=kwargs['x'], y=kwargs['y'],
hue=kwargs['hue'],
data=df,
palette=kwargs['palette'],
order=kwargs['order'])
elif 'palette' in kwargs:
sns.barplot(x=kwargs['x'], y=kwargs['y'],
data=df,
palette=kwargs['palette'],
order=kwargs['order'])
else:
sns.barplot(x=kwargs['x'], y=kwargs['y'],
color='lightslategray')
return ax
def run_box(df, ax, **kwargs):
if 'palette' in kwargs:
sns.boxplot(x=kwargs['x'], y=kwargs['y'],
data=df,
palette=kwargs['palette'],
order=kwargs['order'])
else:
sns.boxplot(x=kwargs['x'], y=kwargs['y'],
color='lightslategray')
return ax
def run_pca(df, cols, norm=True, n_components=2, max_min_arr=False):
df[cols] = df[cols].fillna(0)
# get rid of rows that are all 0 for specified columns
zero_bool = (df[cols]==0).sum(axis=1)==len(cols)
df = df[~zero_bool].copy()
if norm:
X = normalize(df, cols).copy()
else:
X = df.copy()
model = PCA(n_components=n_components)
model.fit(X.loc[:, cols])
arr = model.fit_transform(X.loc[:, cols])
if max_min_arr:
arr = np.apply_along_axis(max_min, arr=arr, axis=0)
# add first three component scores to df
X[0] = arr[:,0]
X[1] = arr[:,1]
X[2] = arr[:,2]
return X, arr, model
def run_loadings(df, ax, **kwargs):
comp_df = kwargs['comp_df']
comp_df['combo_score'] = np.abs(comp_df[[kwargs['x'],kwargs['y']]]).sum(axis=1)
comp_df = comp_df.sort_values(by='combo_score', ascending=False).iloc[:kwargs['n_display']]
max_x = df[kwargs['x']].max()
max_y = df[kwargs['y']].max()
texts = []
for x in comp_df.iterrows():
texts.append(ax.text(x[1][kwargs['x']]*max_x,
x[1][kwargs['y']]*max_y,
x[0],
fontweight='bold',
bbox=dict(facecolor='white', edgecolor='blue', pad=2, alpha=0.75)))
ax.arrow(0, 0,
x[1][kwargs['x']]*max_x,
x[1][kwargs['y']]*max_y,
color='black', alpha=1,
lw=2, head_width=1)
adjust_text(texts)
return ax
def run_sil(df, ax, **kwargs):
sub_df = sample_df(df, kwargs['sample_frac'])
labels = df[kwargs['hue']]
sub_labels = sub_df[kwargs['hue']]
label_list = labels.value_counts().index
silhouette_avg = silhouette_score(df[kwargs['cols']], labels)
sample_sil_val = silhouette_samples(sub_df[kwargs['cols']], sub_labels)
y_lower=0
for i, label in enumerate(label_list[:kwargs['n_display']]):
ith_cluster_sil_val = sample_sil_val[sub_labels==label]
ith_cluster_sil_val.sort()
size_cluster_i = ith_cluster_sil_val.shape[0]
y_upper = y_lower+size_cluster_i
color = kwargs['palette'][label]
ax.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_sil_val,
facecolor=color, edgecolor=color, alpha=0.7)
ax.text(-0.05, y_lower+0.5*size_cluster_i, label)
y_lower = y_upper+1
ax.axvline(silhouette_avg, color='lightslategray', linestyle='--')
ax.legend(labels=['Avg Silhouette Score {:.2f}'.format(silhouette_avg)])
ax.set_ylim(0, y_upper+10)
return ax
def score2loading(x):
return x / x.max()
def loading2score(x):
return x * x.max()
def get_ct(df):
# determine THC/CBD ratio
df['chemotype_ratio'] = df['tot_thc'].div(df['tot_cbd'], fill_value=0)
df.loc[(df['tot_thc']==0)&(df['tot_cbd']!=0), 'chemotype_ratio'] = -np.inf
df.loc[(df['tot_thc']!=0)&(df['tot_cbd']==0), 'chemotype_ratio'] = np.inf
# bin chemotypes by ratio
df['chemotype'] = pd.cut(df['chemotype_ratio'],
[-np.inf, 0.2, 5, np.inf],
labels=['CBD-Dom','Bal THC/CBD', 'THC-Dom'],
include_lowest=True)
return df
def run_stacked_bar(df, ax, **kwargs):
if 'order' in kwargs:
df[kwargs['order']].plot(kind='bar', stacked=True, color=kwargs['palette'], ax=ax)
else:
df.plot(kind='bar', stacked=True, color=kwargs['palette'], ax=ax)
# .patches is everything inside of the chart
for rect in ax.patches:
# Find where everything is located
height = rect.get_height()
width = rect.get_width()
x = rect.get_x()
y = rect.get_y()
# The height of the bar is the data value and can be used as the label
label_text = f'{height:.1f}%' # f'{height:.2f}' to format decimal values
# ax.text(x, y, text)
label_x = x + width / 2
label_y = y + height / 2
# plot only when height is greater than specified value
if height > 5:
txt = ax.text(label_x, label_y, label_text, ha='center', va='center', fontsize=10, fontweight='bold', color='black')
txt.set_path_effects([PathEffects.withStroke(linewidth=4, foreground='w')])
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
return ax
# def run_polar_plot(df, ax, **kwargs):
# # print(df.loc[:,kwargs['cols']])
# mean_cols = list(df.loc[:,kwargs['cols']].mean(axis=0))
# mean_cols2 = mean_cols + mean_cols[:1]
# angles = [n / len(mean_cols) * 2 * np.pi for n in range(len(mean_cols))]
# angles = angles + angles[:1]
# # get color
# order = np.argsort(mean_cols)[::-1]
# top_val = np.array(kwargs['cols'])[order][0]
# if 'colors' in kwargs:
# colors = kwargs['colors']
# else:
# colors = kwargs['palette'][top_val]
# # # error bars
# # err_cols = list(df.loc[:,kwargs['cols']].std(axis=0))
# # err_cols2 = err_cols + err_cols[:1]
# # ax.errorbar(angles, mean_cols2, yerr=err_cols2, capsize=0, color=colors, linestyle='solid', ecolor='lightslategray')
# # plot
# if kwargs['avg']:
# ax.plot(angles, mean_cols2, color=colors, lw=1, linestyle='solid')
# ax.fill(angles, mean_cols2, colors, alpha=0.1)
# # y limits
# ax.set_ylim(0, np.max(mean_cols2))
# else:
# for row_idx, row in df[kwargs['cols']].iterrows():
# row_list = list(row)
# row_list2 = row_list + row_list[:1]
# if type(colors)==str:
# ax.plot(angles, row_list2, color=colors, lw=0.5)
# else:
# ax.plot(angles, row_list2, color=colors[row_idx], lw=0.5)
# ax.set_ylim(0, np.max(df[kwargs['cols']].max()))
# # tick labels
# tick_labs = kwargs['cols']
# ax.set_xticks(angles[:-1])
# ax.set_xticklabels(tick_labs, color='black', size=10)
# ax.set_yticks([])
# return ax
def run_polar_plot(df, ax, **kwargs):
# print(df.loc[:,kwargs['cols']])
mean_cols = list(df.loc[:,kwargs['cols']].mean(axis=0))
mean_cols2 = mean_cols + mean_cols[:1]
angles = [n / len(mean_cols) * 2 * np.pi for n in range(len(mean_cols))]
angles = angles + angles[:1]
# plot samples
if 'sub_n' in kwargs:
sub_data = df.sort_values('n_samps', ascending=False)[:kwargs['sub_n']]
else:
sub_data = df
for row_idx, row in sub_data[kwargs['cols']].iterrows():
row_list = list(row)
row_list2 = row_list + row_list[:1]
if type(kwargs['colors'])==str:
ax.plot(angles, row_list2, color=kwargs['colors'], lw=0.5, alpha=0.5)
else:
ax.plot(angles, row_list2, color=kwargs['colors'][row_idx], lw=0.5)
if kwargs['avg']:
# get for average color
order = np.argsort(mean_cols)[::-1]
top_val = np.array(kwargs['cols'])[order][0]
avg_color = kwargs['palette'][top_val]
ax.plot(angles, mean_cols2, color=avg_color, lw=1, linestyle='solid', zorder=11)
ax.fill(angles, mean_cols2, avg_color, alpha=0.5, zorder=10)
ax.set_ylim(0, np.max(mean_cols2))
else:
ax.set_ylim(0, np.max(sub_data[kwargs['cols']].max()))
# tick labels
tick_labs = kwargs['cols']
ax.set_xticks(angles[:-1])
ax.set_xticklabels(tick_labs, color='black', size=10)
ax.set_yticks([])
return ax
def run_pairwise(df,
cann_cols, terp_cols):
df_sim = pd.DataFrame(columns=['cann','terp','all'])
for idx, cols in enumerate([cann_cols, terp_cols, cann_cols+terp_cols]):
if idx==2:
df[cols] = MinMaxScaler().fit_transform(df[cols].fillna(0))
sim_scores = cosine_similarity(df[cols].fillna(0))
sim_scores[sim_scores > 0.9999999999999] = np.nan
df_sim.iloc[:, idx] = np.nanmean(sim_scores, axis=0)
return df_sim
def get_scaled_dfs(df, cann, terps):
X_cann = df[cann].fillna(0)
X_cann_standard = MinMaxScaler().fit_transform(X_cann)
X_terps = df[terps].fillna(0)
X_terps_standard = MinMaxScaler().fit_transform(X_terps)
X_all = df[cann+terps].fillna(0)
X_all_standard = MinMaxScaler().fit_transform(X_all)
return X_cann, X_cann_standard, X_terps, X_terps_standard, X_all, X_all_standard
def avg_sd(a,b):
num = np.var(a)+np.var(b)
return np.sqrt(num/2)
def get_stats(df, pair, col):
x = df.loc[df[col]==pair[0], 'value']
y = df.loc[df[col]==pair[1], 'value']
ttest = scs.ttest_ind(x, y, equal_var=False)
d_prime = (x.mean()-y.mean())/avg_sd(x,y)
labels = []
if ttest[1] < 1e-300:
labels.append('p < 1e-300')
else:
labels.append('p = {:.1e}'.format(ttest[1]))
labels.append("d'="+str(np.round(np.abs(d_prime),2)))
return ', '.join(labels)
def get_prod_df(df, n_samp_min, n_prod_min,
common_cannabs,
common_terps):
n_samp_df = df.groupby(['anon_producer','strain_slug'])['u_id'].count()
df = df.merge(n_samp_df.rename('n_samp'), left_on=['anon_producer','strain_slug'], right_index=True)
# create producer df
prod_df = df.loc[(df['n_samp']>=n_samp_min)].groupby(['anon_producer','strain_slug'])[common_cannabs+common_terps].mean()
prod_df = get_ct(prod_df)
prod_df = prod_df.reset_index(drop=False)
# get n_prod counts
n_prod_df = prod_df.groupby('strain_slug')['anon_producer'].count()
prod_df = prod_df.merge(n_prod_df.rename('n_prod'), left_on='strain_slug', right_index=True)
prod_df = prod_df.merge(n_samp_df.rename('n_samps'), left_on=['anon_producer','strain_slug'], right_index=True)
# subset to n_prod_min and thc-dom
fin_prod_df = prod_df.loc[(prod_df['n_prod']>=n_prod_min)].sort_values(['n_prod','strain_slug','anon_producer'], ascending=[False, False, True]).copy()
fin_prod_df['strain_slug'] = fin_prod_df['strain_slug'].astype(str)
return fin_prod_df
def get_pal_dict(df, common_terps, terp_dict):
pal_dict = {}
for label in set(df['kmeans_label']):
terp_order = df.loc[df['kmeans_label']==label, common_terps].mean().sort_values(ascending=False)
pal_dict[label] = terp_dict[terp_order[:1].index[0]]
return pal_dict
def get_kmeans(df, common_terps,
k=3):
df_norm, arr, model = run_pca(df, common_terps, norm=True, n_components=3)
# set up kmeans
clust = KMeans(3, random_state=56)
# get cluster labels
df_norm['kmeans_label'] = clust.fit_predict(df_norm[common_terps])
clust_dict = {x:y for x,y in zip(df_norm['kmeans_label'].value_counts().index, ['A','B','C'])}
df_norm['kmeans_label'] = df_norm['kmeans_label'].replace(clust_dict)
return df_norm
def get_umap(df, common_terps,
n_neighbors=6,
random_state=56):
umap_ = UMAP(n_components=2, n_neighbors=n_neighbors,
random_state=random_state)
X_terps_umap = umap_.fit_transform(df[common_terps])
df['umap_0'] = X_terps_umap[:,0]
df['umap_1'] = X_terps_umap[:,1]
return df
def get_round(arr, sig_fig=1):
return np.round(arr*100,sig_fig)
def get_cos_sim(df, add_nan=False):
sim_scores = cosine_similarity(df)
if add_nan:
sim_scores[sim_scores > 0.9999999999999] = np.nan
else:
sim_scores[sim_scores > 0.9999999999999] = 1
return sim_scores
def group_cos_sim(df, group_level=False):
if df.shape[0]==1:
# if only one product, do not return cos sim
return np.nan
else:
sim_scores = get_cos_sim(df, add_nan=True)
if group_level:
return np.mean(np.nanmean(sim_scores, axis=0))
else:
return list(np.nanmean(sim_scores, axis=0))
def format_df(df):
return df.explode().to_frame().rename(columns={0:'bw_prod_sim'}).reset_index(drop=False)
def weighted_avg(avgs, weights):
return np.average(avgs, weights=weights)
def run_all_cos_sims(df, cols,
groupby='strain_slug'):
groups = df.groupby(groupby)[cols]
bw_prod_df = format_df(groups.apply(lambda x: group_cos_sim(x)))
avgs = groups.apply(lambda x: group_cos_sim(x, group_level=True))
weights = groups.size()[groups.size()>1]
return bw_prod_df, avgs, weights
def run_kde(df, ax, **kwargs):
no_nan = df.dropna(subset=[kwargs['x'], kwargs['y']], how='any')
sub_df = sample_df(no_nan, kwargs['sample_frac'])
_ = sns.kdeplot(x=kwargs['x'],
y=kwargs['y'],
data=sub_df,
fill=True,
cmap='RdBu_r',
cbar=True,
vmin=0,
levels=75,
ax=ax)
return ax
def run_line(df, ax, **kwargs):
_ = ax.plot(kwargs['x'], kwargs['y'])
return ax
def run_scatter_5(df, ax, **kwargs):
no_nan = df.dropna(subset=[kwargs['x'], kwargs['y']], how='any')
sub_df = sample_df(no_nan, kwargs['sample_frac'])
if 'size' in kwargs:
s = kwargs['size']
else:
s = mpl.rcParams['lines.markersize']**2
if 'edgecolor' in kwargs:
ec = kwargs['edgecolor']
else:
ec = 'white'
hue_order = ['THC-Dom', 'Bal THC/CBD', 'CBD-Dom']
s_colors = ['darkblue', 'steelblue', 'black']
s_markers = ['D', '^', 'o']
for ct, color, marker in zip(hue_order, s_colors, s_markers):
if ct=='THC-Dom':
sns.scatterplot(x=kwargs['x'], y=kwargs['y'],
data=sub_df.loc[df['chemotype']==ct], alpha=.5,
color=color,
marker=marker,
s=25,
edgecolor='white',
linewidth=0.5,
label=ct,
ax=ax)
else:
sns.scatterplot(x=kwargs['x'], y=kwargs['y'],
data=sub_df.loc[df['chemotype']==ct], alpha=1,
color=color,
marker=marker,
s=25,
edgecolor='white',
linewidth=0.5,
label=ct,
ax=ax)
# include full ns
handles, labels = ax.get_legend_handles_labels()
if 'n_display' in kwargs:
labels_n = [(cat, df.loc[df[kwargs['hue']]==cat].shape[0]) for cat in hue_order]
labels = [cat+'\nn='+str(n) for cat, n in labels_n]
ax.legend(handles=handles[:kwargs['n_display']], labels=labels[:kwargs['n_display']], title=kwargs['hue'].title(), handlelength=4)
else:
labels_n = [(cat, df.loc[df[kwargs['hue']]==cat].shape[0]) for cat in hue_order]
labels = [cat+'\nn='+str(n) for cat, n in labels_n]
ax.legend(handles=handles, labels=labels, title=kwargs['hue'].title(), handlelength=4)
return ax
|
[
"numpy.log10",
"numpy.sqrt",
"numpy.argsort",
"numpy.nanmean",
"sklearn.metrics.silhouette_samples",
"numpy.array",
"scipy.stats.ttest_ind",
"umap.UMAP",
"seaborn.violinplot",
"seaborn.scatterplot",
"matplotlib.patheffects.withStroke",
"numpy.arange",
"numpy.mean",
"seaborn.regplot",
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.decomposition.PCA",
"numpy.max",
"pandas.DataFrame",
"warnings.simplefilter",
"scipy.stats.spearmanr",
"pandas.melt",
"sklearn.preprocessing.MinMaxScaler",
"numpy.round",
"numpy.abs",
"numpy.average",
"matplotlib.patches.Patch",
"adjustText.adjust_text",
"warnings.filterwarnings",
"sklearn.cluster.KMeans",
"pandas.cut",
"seaborn.boxplot",
"itertools.combinations",
"seaborn.kdeplot",
"numpy.apply_along_axis",
"matplotlib.pyplot.tight_layout",
"seaborn.barplot",
"sklearn.metrics.silhouette_score",
"matplotlib.pyplot.subplots",
"numpy.var"
] |
[((1130, 1163), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1153, 1163), False, 'import warnings\n'), ((1164, 1195), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1185, 1195), False, 'import warnings\n'), ((1725, 1764), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': "kwargs['figsize']"}), "(figsize=kwargs['figsize'])\n", (1737, 1764), True, 'import matplotlib.pyplot as plt\n'), ((4215, 4233), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4231, 4233), True, 'import matplotlib.pyplot as plt\n'), ((6183, 6310), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': 'df', 'order': 'order', 'showcaps': '(False)', 'width': '(0.06)', 'fliersize': '(0.5)', 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], data=df, order=order, showcaps=\n False, width=0.06, fliersize=0.5, ax=ax, **PROPS)\n", (6194, 6310), True, 'import seaborn as sns\n'), ((12079, 12122), 'numpy.log10', 'np.log10', (["df.loc[df['ratio'] != 0, 'ratio']"], {}), "(df.loc[df['ratio'] != 0, 'ratio'])\n", (12087, 12122), True, 'import numpy as np\n'), ((13719, 13768), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""darkblue"""', 'label': '"""THC-Dom"""'}), "(color='darkblue', label='THC-Dom')\n", (13733, 13768), True, 'import matplotlib.patches as mpatches\n'), ((13780, 13834), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""steelblue"""', 'label': '"""Bal THC/CBD"""'}), "(color='steelblue', label='Bal THC/CBD')\n", (13794, 13834), True, 'import matplotlib.patches as mpatches\n'), ((13846, 13892), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""black"""', 'label': '"""CBD-Dom"""'}), "(color='black', label='CBD-Dom')\n", (13860, 13892), True, 'import matplotlib.patches as mpatches\n'), ((16050, 16080), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (16053, 16080), False, 'from sklearn.decomposition import PCA\n'), ((17234, 17252), 'adjustText.adjust_text', 'adjust_text', (['texts'], {}), '(texts)\n', (17245, 17252), False, 'from adjustText import adjust_text\n'), ((17501, 17545), 'sklearn.metrics.silhouette_score', 'silhouette_score', (["df[kwargs['cols']]", 'labels'], {}), "(df[kwargs['cols']], labels)\n", (17517, 17545), False, 'from sklearn.metrics import silhouette_samples, silhouette_score\n'), ((17567, 17621), 'sklearn.metrics.silhouette_samples', 'silhouette_samples', (["sub_df[kwargs['cols']]", 'sub_labels'], {}), "(sub_df[kwargs['cols']], sub_labels)\n", (17585, 17621), False, 'from sklearn.metrics import silhouette_samples, silhouette_score\n'), ((18838, 18965), 'pandas.cut', 'pd.cut', (["df['chemotype_ratio']", '[-np.inf, 0.2, 5, np.inf]'], {'labels': "['CBD-Dom', 'Bal THC/CBD', 'THC-Dom']", 'include_lowest': '(True)'}), "(df['chemotype_ratio'], [-np.inf, 0.2, 5, np.inf], labels=['CBD-Dom',\n 'Bal THC/CBD', 'THC-Dom'], include_lowest=True)\n", (18844, 18965), True, 'import pandas as pd\n'), ((23450, 23495), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['cann', 'terp', 'all']"}), "(columns=['cann', 'terp', 'all'])\n", (23462, 23495), True, 'import pandas as pd\n'), ((24329, 24345), 'numpy.sqrt', 'np.sqrt', (['(num / 2)'], {}), '(num / 2)\n', (24336, 24345), True, 'import numpy as np\n'), ((24473, 24509), 'scipy.stats.ttest_ind', 'scs.ttest_ind', (['x', 'y'], {'equal_var': '(False)'}), '(x, y, equal_var=False)\n', (24486, 24509), True, 'import scipy.stats as scs\n'), ((26400, 26426), 'sklearn.cluster.KMeans', 'KMeans', (['(3)'], {'random_state': '(56)'}), '(3, random_state=56)\n', (26406, 26426), False, 'from sklearn.cluster import DBSCAN, KMeans, OPTICS\n'), ((26823, 26895), 'umap.UMAP', 'UMAP', ([], {'n_components': '(2)', 'n_neighbors': 'n_neighbors', 'random_state': 'random_state'}), '(n_components=2, n_neighbors=n_neighbors, random_state=random_state)\n', (26827, 26895), False, 'from umap import UMAP\n'), ((27109, 27137), 'numpy.round', 'np.round', (['(arr * 100)', 'sig_fig'], {}), '(arr * 100, sig_fig)\n', (27117, 27137), True, 'import numpy as np\n'), ((27190, 27211), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['df'], {}), '(df)\n', (27207, 27211), False, 'from sklearn.metrics.pairwise import cosine_distances, cosine_similarity\n'), ((27903, 27936), 'numpy.average', 'np.average', (['avgs'], {'weights': 'weights'}), '(avgs, weights=weights)\n', (27913, 27936), True, 'import numpy as np\n'), ((28454, 28576), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': 'sub_df', 'fill': '(True)', 'cmap': '"""RdBu_r"""', 'cbar': '(True)', 'vmin': '(0)', 'levels': '(75)', 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], data=sub_df, fill=True, cmap=\n 'RdBu_r', cbar=True, vmin=0, levels=75, ax=ax)\n", (28465, 28576), True, 'import seaborn as sns\n'), ((5534, 5670), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': 'sub_df', 'scale': '"""width"""', 'order': 'order', 'palette': "kwargs['palette']", 'linewidth': '(0)', 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], data=sub_df, scale='width',\n order=order, palette=kwargs['palette'], linewidth=0, ax=ax)\n", (5548, 5670), True, 'import seaborn as sns\n'), ((5754, 5887), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': 'sub_df', 'scale': '"""width"""', 'order': 'order', 'color': '"""lightslategray"""', 'linewidth': '(0)', 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], data=sub_df, scale='width',\n order=order, color='lightslategray', linewidth=0, ax=ax)\n", (5768, 5887), True, 'import seaborn as sns\n'), ((9469, 9641), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'hue': "kwargs['hue']", 'data': 'sub_df', 's': 's', 'edgecolor': 'ec', 'alpha': '(0.5)', 'hue_order': 'hue_order', 'palette': "kwargs['palette']", 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], hue=kwargs['hue'], data=\n sub_df, s=s, edgecolor=ec, alpha=0.5, hue_order=hue_order, palette=\n kwargs['palette'], ax=ax)\n", (9484, 9641), True, 'import seaborn as sns\n'), ((10533, 10707), 'seaborn.regplot', 'sns.regplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': 'sub_df', 'scatter_kws': "{'alpha': 0.1, 'color': 'lightslategray', 'rasterized': True}", 'line_kws': "{'color': 'orange'}", 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], data=sub_df, scatter_kws={'alpha':\n 0.1, 'color': 'lightslategray', 'rasterized': True}, line_kws={'color':\n 'orange'}, ax=ax)\n", (10544, 10707), True, 'import seaborn as sns\n'), ((10800, 10855), 'scipy.stats.spearmanr', 'scs.spearmanr', (["no_nan[kwargs['x']]", "no_nan[kwargs['y']]"], {}), "(no_nan[kwargs['x']], no_nan[kwargs['y']])\n", (10813, 10855), True, 'import scipy.stats as scs\n'), ((12855, 12875), 'numpy.mean', 'np.mean', (['[low, high]'], {}), '([low, high])\n', (12862, 12875), True, 'import numpy as np\n'), ((14821, 14944), 'seaborn.barplot', 'sns.barplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'hue': "kwargs['hue']", 'data': 'df', 'palette': "kwargs['palette']", 'order': "kwargs['order']"}), "(x=kwargs['x'], y=kwargs['y'], hue=kwargs['hue'], data=df,\n palette=kwargs['palette'], order=kwargs['order'])\n", (14832, 14944), True, 'import seaborn as sns\n'), ((15407, 15512), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': 'df', 'palette': "kwargs['palette']", 'order': "kwargs['order']"}), "(x=kwargs['x'], y=kwargs['y'], data=df, palette=kwargs['palette'\n ], order=kwargs['order'])\n", (15418, 15512), True, 'import seaborn as sns\n'), ((15586, 15651), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'color': '"""lightslategray"""'}), "(x=kwargs['x'], y=kwargs['y'], color='lightslategray')\n", (15597, 15651), True, 'import seaborn as sns\n'), ((16196, 16241), 'numpy.apply_along_axis', 'np.apply_along_axis', (['max_min'], {'arr': 'arr', 'axis': '(0)'}), '(max_min, arr=arr, axis=0)\n', (16215, 16241), True, 'import numpy as np\n'), ((23809, 23839), 'numpy.nanmean', 'np.nanmean', (['sim_scores'], {'axis': '(0)'}), '(sim_scores, axis=0)\n', (23819, 23839), True, 'import numpy as np\n'), ((24298, 24307), 'numpy.var', 'np.var', (['a'], {}), '(a)\n', (24304, 24307), True, 'import numpy as np\n'), ((24308, 24317), 'numpy.var', 'np.var', (['b'], {}), '(b)\n', (24314, 24317), True, 'import numpy as np\n'), ((5037, 5095), 'pandas.melt', 'pd.melt', (["data.loc[:, kwargs['cols']]"], {'var_name': "kwargs['x']"}), "(data.loc[:, kwargs['cols']], var_name=kwargs['x'])\n", (5044, 5095), True, 'import pandas as pd\n'), ((5117, 5177), 'pandas.melt', 'pd.melt', (["sub_df.loc[:, kwargs['cols']]"], {'var_name': "kwargs['x']"}), "(sub_df.loc[:, kwargs['cols']], var_name=kwargs['x'])\n", (5124, 5177), True, 'import pandas as pd\n'), ((6810, 6844), 'itertools.combinations', 'itertools.combinations', (['order'], {'r': '(2)'}), '(order, r=2)\n', (6832, 6844), False, 'import itertools\n'), ((11373, 11570), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': "sub_df.loc[sub_df['strain_slug'] == strain]", 's': '(s + 35)', 'edgecolor': '"""black"""', 'linewidth': '(1.5)', 'color': 'color', 'label': 'strain', 'marker': 'marker', 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], data=sub_df.loc[sub_df[\n 'strain_slug'] == strain], s=s + 35, edgecolor='black', linewidth=1.5,\n color=color, label=strain, marker=marker, ax=ax)\n", (11388, 11570), True, 'import seaborn as sns\n'), ((12723, 12768), 'numpy.arange', 'np.arange', (['(min_log - 0.1)', '(max_log + 0.1)', '(0.05)'], {}), '(min_log - 0.1, max_log + 0.1, 0.05)\n', (12732, 12768), True, 'import numpy as np\n'), ((12894, 12918), 'numpy.log10', 'np.log10', (['ct_thresh_high'], {}), '(ct_thresh_high)\n', (12902, 12918), True, 'import numpy as np\n'), ((13257, 13281), 'numpy.log10', 'np.log10', (['ct_thresh_high'], {}), '(ct_thresh_high)\n', (13265, 13281), True, 'import numpy as np\n'), ((13283, 13307), 'numpy.log10', 'np.log10', (['ct_thresh_high'], {}), '(ct_thresh_high)\n', (13291, 13307), True, 'import numpy as np\n'), ((13406, 13429), 'numpy.log10', 'np.log10', (['ct_thresh_low'], {}), '(ct_thresh_low)\n', (13414, 13429), True, 'import numpy as np\n'), ((13431, 13454), 'numpy.log10', 'np.log10', (['ct_thresh_low'], {}), '(ct_thresh_low)\n', (13439, 13454), True, 'import numpy as np\n'), ((15059, 15164), 'seaborn.barplot', 'sns.barplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': 'df', 'palette': "kwargs['palette']", 'order': "kwargs['order']"}), "(x=kwargs['x'], y=kwargs['y'], data=df, palette=kwargs['palette'\n ], order=kwargs['order'])\n", (15070, 15164), True, 'import seaborn as sns\n'), ((15238, 15303), 'seaborn.barplot', 'sns.barplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'color': '"""lightslategray"""'}), "(x=kwargs['x'], y=kwargs['y'], color='lightslategray')\n", (15249, 15303), True, 'import seaborn as sns\n'), ((16481, 16524), 'numpy.abs', 'np.abs', (["comp_df[[kwargs['x'], kwargs['y']]]"], {}), "(comp_df[[kwargs['x'], kwargs['y']]])\n", (16487, 16524), True, 'import numpy as np\n'), ((17968, 17995), 'numpy.arange', 'np.arange', (['y_lower', 'y_upper'], {}), '(y_lower, y_upper)\n', (17977, 17995), True, 'import numpy as np\n'), ((22773, 22794), 'numpy.argsort', 'np.argsort', (['mean_cols'], {}), '(mean_cols)\n', (22783, 22794), True, 'import numpy as np\n'), ((23092, 23110), 'numpy.max', 'np.max', (['mean_cols2'], {}), '(mean_cols2)\n', (23098, 23110), True, 'import numpy as np\n'), ((23951, 23965), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (23963, 23965), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((24046, 24060), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (24058, 24060), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((24143, 24157), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (24155, 24157), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((29433, 29623), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': "sub_df.loc[df['chemotype'] == ct]", 'alpha': '(0.5)', 'color': 'color', 'marker': 'marker', 's': '(25)', 'edgecolor': '"""white"""', 'linewidth': '(0.5)', 'label': 'ct', 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], data=sub_df.loc[df[\n 'chemotype'] == ct], alpha=0.5, color=color, marker=marker, s=25,\n edgecolor='white', linewidth=0.5, label=ct, ax=ax)\n", (29448, 29623), True, 'import seaborn as sns\n'), ((29862, 30050), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': "kwargs['x']", 'y': "kwargs['y']", 'data': "sub_df.loc[df['chemotype'] == ct]", 'alpha': '(1)', 'color': 'color', 'marker': 'marker', 's': '(25)', 'edgecolor': '"""white"""', 'linewidth': '(0.5)', 'label': 'ct', 'ax': 'ax'}), "(x=kwargs['x'], y=kwargs['y'], data=sub_df.loc[df[\n 'chemotype'] == ct], alpha=1, color=color, marker=marker, s=25,\n edgecolor='white', linewidth=0.5, label=ct, ax=ax)\n", (29877, 30050), True, 'import seaborn as sns\n'), ((4652, 4728), 'pandas.melt', 'pd.melt', (["data.loc[:, kwargs['cols']]"], {'id_vars': '"""region"""', 'value_vars': '"""tot_thc"""'}), "(data.loc[:, kwargs['cols']], id_vars='region', value_vars='tot_thc')\n", (4659, 4728), True, 'import pandas as pd\n'), ((4793, 4878), 'pandas.melt', 'pd.melt', (["sub_data.loc[:, kwargs['cols']]"], {'id_vars': '"""region"""', 'value_vars': '"""tot_thc"""'}), "(sub_data.loc[:, kwargs['cols']], id_vars='region', value_vars='tot_thc'\n )\n", (4800, 4878), True, 'import pandas as pd\n'), ((12978, 13001), 'numpy.log10', 'np.log10', (['ct_thresh_low'], {}), '(ct_thresh_low)\n', (12986, 13001), True, 'import numpy as np\n'), ((22819, 22843), 'numpy.array', 'np.array', (["kwargs['cols']"], {}), "(kwargs['cols'])\n", (22827, 22843), True, 'import numpy as np\n'), ((27625, 27655), 'numpy.nanmean', 'np.nanmean', (['sim_scores'], {'axis': '(0)'}), '(sim_scores, axis=0)\n', (27635, 27655), True, 'import numpy as np\n'), ((27695, 27725), 'numpy.nanmean', 'np.nanmean', (['sim_scores'], {'axis': '(0)'}), '(sim_scores, axis=0)\n', (27705, 27725), True, 'import numpy as np\n'), ((20062, 20113), 'matplotlib.patheffects.withStroke', 'PathEffects.withStroke', ([], {'linewidth': '(4)', 'foreground': '"""w"""'}), "(linewidth=4, foreground='w')\n", (20084, 20113), True, 'import matplotlib.patheffects as PathEffects\n'), ((23613, 23627), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (23625, 23627), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((24744, 24759), 'numpy.abs', 'np.abs', (['d_prime'], {}), '(d_prime)\n', (24750, 24759), True, 'import numpy as np\n')]
|
import random
import numpy as np
import torch
from torch import nn
from torchbenchmark.tasks import OTHER
from ...util.model import BenchmarkModel
torch.manual_seed(1337)
random.seed(1337)
np.random.seed(1337)
# pretend we are using MLP to predict CIFAR images
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
nn.Flatten(),
nn.Linear(3 * 32 * 32, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.ReLU(),
nn.Linear(1024, 10),
nn.Softmax(dim=-1),
)
def forward(self, x):
return self.main(x)
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
def __init__(self, device='cpu', jit=False, lr=1e-4, weight_decay=1e-4):
super().__init__()
self.device = device
self.jit = jit
batch_size = 4096
# mimic a normalized image
self.sample_inputs = torch.randn(batch_size, 3, 32,
32).clamp_(-1, 1).to(device)
self.sample_targets = torch.randint(0, 10, (batch_size, )).to(device)
self.model = MLP().to(device)
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=lr,
weight_decay=weight_decay)
self.criterion = nn.CrossEntropyLoss()
def train(self, niter=1):
if self.jit:
raise NotImplementedError()
self.model.train()
for _ in range(niter):
out = self.model(self.sample_inputs)
self.optimizer.zero_grad()
loss = self.criterion(out, self.sample_targets)
loss.backward()
self.optimizer.step()
def eval(self, niter=1):
if self.jit:
raise NotImplementedError()
self.model.eval()
with torch.no_grad():
for _ in range(niter):
out = self.model(self.sample_inputs)
def get_module(self):
if self.jit:
raise NotImplementedError()
return self.model, self.sample_inputs
if __name__ == '__main__':
for device in ['cpu', 'cuda']:
print("Testing device {}, JIT {}".format(device, False))
m = Model(device=device, jit=False)
m.train()
m.eval()
|
[
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"torch.nn.Softmax",
"torch.nn.Flatten",
"random.seed",
"torch.randint",
"numpy.random.seed",
"torch.nn.Linear",
"torch.no_grad",
"torch.randn"
] |
[((150, 173), 'torch.manual_seed', 'torch.manual_seed', (['(1337)'], {}), '(1337)\n', (167, 173), False, 'import torch\n'), ((174, 191), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (185, 191), False, 'import random\n'), ((192, 212), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (206, 212), True, 'import numpy as np\n'), ((1507, 1528), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1526, 1528), False, 'from torch import nn\n'), ((388, 400), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (398, 400), False, 'from torch import nn\n'), ((414, 442), 'torch.nn.Linear', 'nn.Linear', (['(3 * 32 * 32)', '(1024)'], {}), '(3 * 32 * 32, 1024)\n', (423, 442), False, 'from torch import nn\n'), ((456, 465), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (463, 465), False, 'from torch import nn\n'), ((479, 500), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (488, 500), False, 'from torch import nn\n'), ((514, 523), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (521, 523), False, 'from torch import nn\n'), ((537, 558), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (546, 558), False, 'from torch import nn\n'), ((572, 581), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (579, 581), False, 'from torch import nn\n'), ((595, 616), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (604, 616), False, 'from torch import nn\n'), ((630, 639), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (637, 639), False, 'from torch import nn\n'), ((653, 672), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(10)'], {}), '(1024, 10)\n', (662, 672), False, 'from torch import nn\n'), ((686, 704), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (696, 704), False, 'from torch import nn\n'), ((2021, 2036), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2034, 2036), False, 'import torch\n'), ((1210, 1245), 'torch.randint', 'torch.randint', (['(0)', '(10)', '(batch_size,)'], {}), '(0, 10, (batch_size,))\n', (1223, 1245), False, 'import torch\n'), ((1079, 1113), 'torch.randn', 'torch.randn', (['batch_size', '(3)', '(32)', '(32)'], {}), '(batch_size, 3, 32, 32)\n', (1090, 1113), False, 'import torch\n')]
|
# Importando librerías
from numpy import array
# Listas y arreglos
a = array(['h', 101, 'l', 'l', 'o'])
x = ['h', 101, 'l', 'l', 'o']
print(a)
print(x)
print("Tamaño: ", len(x))
# Condicionales
if isinstance(x[1], int):
x[1] = chr(x[1])
elif isinstance(x[1], str):
pass
else:
raise TypeError("Tipo no soportado!. No te pases! >:c")
print(' uwu '.join(x))
# Ciclos
for item in x:
print(item)
for i in range(len(x)):
print(x[i])
for i in range(1, 10, 2):
print(i)
while len(x):
print(x.pop(0))
while len(x):
print(x.pop(0))
else:
print('F para x :C')
# Operaciones con listas
x.append('H')
x.append('o')
x.append('l')
x.append('a')
x.insert(1, 'o')
# Entrada de datos
print(x)
respuesta = input("Hola?")
print(respuesta)
# Operadores aritméticos y booleanos
print(x)
print(10.1)
print(1 + 2 - 4 * 5 / 8 % 2)
print(2 ** 5)
print(True and True)
print(False and True)
print(False or True)
print(not False)
# Listas comprimidas
print([i for i in range(1, 11) if i % 2 == 0])
print([j for j in range(2, 101) if all(j % i != 0 for i in range(2, j))])
print([j for j in range(2, 101) if not(j % 2 or j % 3 or j % 5)])
|
[
"numpy.array"
] |
[((72, 104), 'numpy.array', 'array', (["['h', 101, 'l', 'l', 'o']"], {}), "(['h', 101, 'l', 'l', 'o'])\n", (77, 104), False, 'from numpy import array\n')]
|
import argparse
import collections
import os
import cv2
import numpy as np
import pandas as pd
import pretrainedmodels
import torch
import torch.optim as optim
import torchsummary
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision import datasets, models, transforms
from tqdm import tqdm
import skimage.io
from sklearn.metrics import f1_score
import torch.nn as nn
import torch.nn.functional as F
import config
import utils
import classification_dataset
from triplet_dataset import TripletDataset, TripletDatasetUpdate, TripletDatasetPredict
from logger import Logger
from experiments import MODELS
class EmbeddingsModel(nn.Module):
def __init__(self, nb_embeddings=config.NB_EMBEDDINGS):
super().__init__()
self.base_model = pretrainedmodels.resnet18()
self.fc = nn.Linear(2048, nb_embeddings)
def forward(self, x):
x = self.base_model.conv1(x)
x = self.base_model.bn1(x)
x = self.base_model.relu(x)
x = self.base_model.maxpool(x)
x = self.base_model.layer1(x)
x = self.base_model.layer2(x)
x = self.base_model.layer3(x)
x = self.base_model.layer4(x)
x = self.base_model.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class TripletLoss(nn.Module):
def __init__(self, margin):
super().__init__()
self.margin = margin
def forward(self, anchor, positive, negative):
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.mean()
def train(model_name, run=None):
run_str = '' if run is None or run == '' else f'_{run}'
checkpoints_dir = f'../output/checkpoints_3/{model_name}{run_str}'
tensorboard_dir = f'../output/tensorboard_3/{model_name}{run_str}'
os.makedirs(checkpoints_dir, exist_ok=True)
os.makedirs(tensorboard_dir, exist_ok=True)
print('\n', model_name, '\n')
logger = Logger(tensorboard_dir)
model = EmbeddingsModel()
model = model.cuda()
dataset_train = TripletDataset(
is_train=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
crop_size=256
)
dataset_valid = TripletDataset(
is_train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
crop_size=256
)
dataset_update_train = TripletDatasetUpdate(dataset_train)
dataset_update_valid = TripletDatasetUpdate(dataset_valid)
model.training = True
print('using sgd optimiser')
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-5)
scheduler = utils.CosineAnnealingLRWithRestarts(optimizer, T_max=8, T_mult=1.2)
print('Num training images: {} valid images: {}'.format(len(dataset_train), len(dataset_valid)))
data_loader_train = DataLoader(
dataset_train,
shuffle=True,
num_workers=8,
batch_size=64)
data_loader_valid = DataLoader(
dataset_valid,
shuffle=False,
num_workers=8,
batch_size=64)
data_loader_update_train = DataLoader(
dataset_update_train,
shuffle=False,
num_workers=8,
batch_size=64)
data_loader_update_valid = DataLoader(
dataset_update_valid,
shuffle=False,
num_workers=8,
batch_size=64)
criterium = TripletLoss(margin=1.0)
for epoch_num in range(512):
model.eval()
with torch.set_grad_enabled(False):
for iter_num, data in tqdm(enumerate(data_loader_update_train), total=len(data_loader_update_train)):
img = data['img'].cuda()
samples_idx = data['idx']
vectors = model(img).detach().cpu().numpy()
dataset_train.embeddings[samples_idx] = vectors
print(np.mean(dataset_train.embeddings, axis=0), np.std(dataset_train.embeddings, axis=0))
for iter_num, data in tqdm(enumerate(data_loader_update_valid), total=len(data_loader_update_valid)):
img = data['img'].cuda()
samples_idx = data['idx']
vectors = model(img).detach().cpu().numpy()
dataset_valid.embeddings[samples_idx] = vectors
print(np.mean(dataset_train.embeddings, axis=0), np.std(dataset_valid.embeddings, axis=0))
model.train()
epoch_loss = []
with torch.set_grad_enabled(True):
data_iter = tqdm(enumerate(data_loader_train), total=len(data_loader_train))
for iter_num, data in data_iter:
img = data['img'].cuda()
img_pos = data['img_pos'].cuda()
img_neg = data['img_neg'].cuda()
optimizer.zero_grad()
output = model(img)
output_pos = model(img_pos)
output_neg = model(img_neg)
loss = criterium(output, output_pos, output_neg)
epoch_loss.append(float(loss.detach().cpu()))
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
optimizer.step()
data_iter.set_description(
f'{epoch_num} Loss: {np.mean(epoch_loss):1.4f}')
logger.scalar_summary(f'loss_train', np.mean(epoch_loss), epoch_num)
epoch_loss = []
with torch.set_grad_enabled(False):
data_iter = tqdm(enumerate(data_loader_valid), total=len(data_loader_valid))
for iter_num, data in data_iter:
img = data['img'].cuda()
img_pos = data['img_pos'].cuda()
img_neg = data['img_neg'].cuda()
output = model(img)
output_pos = model(img_pos)
output_neg = model(img_neg)
loss = criterium(output, output_pos, output_neg)
epoch_loss.append(float(loss))
data_iter.set_description(
f'{epoch_num} Loss: {np.mean(epoch_loss):1.4f}')
logger.scalar_summary(f'loss_valid', np.mean(epoch_loss), epoch_num)
logger.scalar_summary('lr', optimizer.param_groups[0]['lr'], epoch_num)
scheduler.step(metrics=np.mean(epoch_loss), epoch=epoch_num)
model.eval()
torch.save(
{
'epoch': epoch_num,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
},
f'{checkpoints_dir}/{epoch_num:03}.pt'
)
def predict(model_name, epoch_num, img_dir, sample_ids, run=None):
model = EmbeddingsModel()
model = model.cuda()
run_str = '' if run is None or run == '' else f'_{run}'
checkpoints_dir = f'../output/checkpoints_3/{model_name}{run_str}'
checkpoint = torch.load(f'{checkpoints_dir}/{epoch_num:03}.pt')
model.load_state_dict(checkpoint['model_state_dict'])
dataset = TripletDatasetPredict(sample_ids=sample_ids,
img_dir=img_dir,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
crop_size=256)
data_loader_update_train = DataLoader(
dataset,
shuffle=False,
num_workers=8,
batch_size=64)
results = []
results_idx = []
with torch.set_grad_enabled(False):
for data in tqdm(data_loader_update_train):
img = data['img'].cuda()
samples_idx = data['idx']
embeddings = model(img).detach().cpu().numpy()
results.append(embeddings)
results_idx.append(samples_idx)
# for image_id in tqdm(sample_ids):
# images = []
# for color in ['red', 'green', 'blue']:
# try:
# img = cv2.imread(f'{img_dir}/{image_id}_{color}.png', cv2.IMREAD_UNCHANGED)
# img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_AREA).astype("uint8")
# images.append(img)
# except:
# print(f'failed to open {img_dir}/{image_id}_{color}.png')
# raise
#
# images = np.stack(images, axis=0).astype(np.float32) / 255.0
# images = torch.from_numpy(images).cuda()
# images = normalize(images)
# images = torch.unsqueeze(images, 0)
# embeddings = model(images)
# embeddings = embeddings.detach().cpu().numpy()
# # print(image_id, embeddings.flatten())
# results.append(embeddings)
results = np.concatenate(results, axis=0)
results_idx = np.concatenate(results_idx, axis=0)
utils.print_stats('results_idx diff', np.diff(results_idx))
return results
def predict_extra(model_name, epoch_num, run=None):
data = pd.read_csv('../input/folds_4_extra.csv')
embeddings = predict(model_name=model_name, epoch_num=epoch_num,
img_dir=config.TRAIN_DIR_EXTRA,
sample_ids=data.Id.values,
run=run)
torch.save(embeddings, '../output/embeddings_extra.pt')
for i in range(embeddings.shape[1]):
data[f'emb_{i}'] = embeddings[:, i]
print(np.mean(embeddings, axis=0), np.std(embeddings, axis=0))
data.to_csv('../input/emb_extra.csv', index=False)
def predict_train(model_name, epoch_num, run=None):
data = pd.read_csv('../input/train.csv')
embeddings = predict(model_name=model_name, epoch_num=epoch_num,
img_dir=config.TRAIN_DIR,
sample_ids=data.Id.values,
run=run)
torch.save(embeddings, '../output/embeddings_train.pt')
for i in range(embeddings.shape[1]):
data[f'emb_{i}'] = embeddings[:, i]
print(np.mean(embeddings, axis=0), np.std(embeddings, axis=0))
data.to_csv('../input/emb_train.csv', index=False)
def predict_test(model_name, epoch_num, run=None):
data = pd.read_csv('../input/sample_submission.csv')
embeddings = predict(model_name=model_name, epoch_num=epoch_num,
img_dir=config.TEST_DIR,
sample_ids=data.Id.values,
run=run)
torch.save(embeddings, '../output/embeddings_test.pt')
for i in range(embeddings.shape[1]):
data[f'emb_{i}'] = embeddings[:, i]
print(np.mean(embeddings, axis=0), np.std(embeddings, axis=0))
data.to_csv('../input/emb_test.csv', index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('action', type=str, default='train')
parser.add_argument('--model', type=str, default='tr_resnet18_now_8')
parser.add_argument('--epoch', type=int, default=0)
args = parser.parse_args()
action = args.action
model = args.model
np.set_printoptions(precision=3, linewidth=200)
if action == 'train':
try:
train(model_name=model)
except KeyboardInterrupt:
pass
if action == 'predict_extra':
predict_extra(model_name=model, epoch_num=args.epoch, run=None)
if action == 'predict_train':
predict_train(model_name=model, epoch_num=args.epoch, run=None)
if action == 'predict_test':
predict_test(model_name=model, epoch_num=args.epoch, run=None)
|
[
"pandas.read_csv",
"utils.CosineAnnealingLRWithRestarts",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.diff",
"numpy.concatenate",
"triplet_dataset.TripletDatasetUpdate",
"torchvision.transforms.ToTensor",
"logger.Logger",
"torch.save",
"numpy.std",
"torch.nn.functional.relu",
"torchvision.transforms.Normalize",
"numpy.set_printoptions",
"os.makedirs",
"torch.load",
"tqdm.tqdm",
"pretrainedmodels.resnet18",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.set_grad_enabled"
] |
[((1957, 2000), 'os.makedirs', 'os.makedirs', (['checkpoints_dir'], {'exist_ok': '(True)'}), '(checkpoints_dir, exist_ok=True)\n', (1968, 2000), False, 'import os\n'), ((2005, 2048), 'os.makedirs', 'os.makedirs', (['tensorboard_dir'], {'exist_ok': '(True)'}), '(tensorboard_dir, exist_ok=True)\n', (2016, 2048), False, 'import os\n'), ((2097, 2120), 'logger.Logger', 'Logger', (['tensorboard_dir'], {}), '(tensorboard_dir)\n', (2103, 2120), False, 'from logger import Logger\n'), ((2712, 2747), 'triplet_dataset.TripletDatasetUpdate', 'TripletDatasetUpdate', (['dataset_train'], {}), '(dataset_train)\n', (2732, 2747), False, 'from triplet_dataset import TripletDataset, TripletDatasetUpdate, TripletDatasetPredict\n'), ((2775, 2810), 'triplet_dataset.TripletDatasetUpdate', 'TripletDatasetUpdate', (['dataset_valid'], {}), '(dataset_valid)\n', (2795, 2810), False, 'from triplet_dataset import TripletDataset, TripletDatasetUpdate, TripletDatasetPredict\n'), ((2975, 3042), 'utils.CosineAnnealingLRWithRestarts', 'utils.CosineAnnealingLRWithRestarts', (['optimizer'], {'T_max': '(8)', 'T_mult': '(1.2)'}), '(optimizer, T_max=8, T_mult=1.2)\n', (3010, 3042), False, 'import utils\n'), ((3170, 3239), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'shuffle': '(True)', 'num_workers': '(8)', 'batch_size': '(64)'}), '(dataset_train, shuffle=True, num_workers=8, batch_size=64)\n', (3180, 3239), False, 'from torch.utils.data import DataLoader\n'), ((3298, 3368), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_valid'], {'shuffle': '(False)', 'num_workers': '(8)', 'batch_size': '(64)'}), '(dataset_valid, shuffle=False, num_workers=8, batch_size=64)\n', (3308, 3368), False, 'from torch.utils.data import DataLoader\n'), ((3434, 3511), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_update_train'], {'shuffle': '(False)', 'num_workers': '(8)', 'batch_size': '(64)'}), '(dataset_update_train, shuffle=False, num_workers=8, batch_size=64)\n', (3444, 3511), False, 'from torch.utils.data import DataLoader\n'), ((3577, 3654), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_update_valid'], {'shuffle': '(False)', 'num_workers': '(8)', 'batch_size': '(64)'}), '(dataset_update_valid, shuffle=False, num_workers=8, batch_size=64)\n', (3587, 3654), False, 'from torch.utils.data import DataLoader\n'), ((7147, 7197), 'torch.load', 'torch.load', (['f"""{checkpoints_dir}/{epoch_num:03}.pt"""'], {}), "(f'{checkpoints_dir}/{epoch_num:03}.pt')\n", (7157, 7197), False, 'import torch\n'), ((7729, 7793), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'shuffle': '(False)', 'num_workers': '(8)', 'batch_size': '(64)'}), '(dataset, shuffle=False, num_workers=8, batch_size=64)\n', (7739, 7793), False, 'from torch.utils.data import DataLoader\n'), ((9140, 9171), 'numpy.concatenate', 'np.concatenate', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (9154, 9171), True, 'import numpy as np\n'), ((9190, 9225), 'numpy.concatenate', 'np.concatenate', (['results_idx'], {'axis': '(0)'}), '(results_idx, axis=0)\n', (9204, 9225), True, 'import numpy as np\n'), ((9374, 9415), 'pandas.read_csv', 'pd.read_csv', (['"""../input/folds_4_extra.csv"""'], {}), "('../input/folds_4_extra.csv')\n", (9385, 9415), True, 'import pandas as pd\n'), ((9632, 9687), 'torch.save', 'torch.save', (['embeddings', '"""../output/embeddings_extra.pt"""'], {}), "(embeddings, '../output/embeddings_extra.pt')\n", (9642, 9687), False, 'import torch\n'), ((9962, 9995), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {}), "('../input/train.csv')\n", (9973, 9995), True, 'import pandas as pd\n'), ((10206, 10261), 'torch.save', 'torch.save', (['embeddings', '"""../output/embeddings_train.pt"""'], {}), "(embeddings, '../output/embeddings_train.pt')\n", (10216, 10261), False, 'import torch\n'), ((10535, 10580), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sample_submission.csv"""'], {}), "('../input/sample_submission.csv')\n", (10546, 10580), True, 'import pandas as pd\n'), ((10790, 10844), 'torch.save', 'torch.save', (['embeddings', '"""../output/embeddings_test.pt"""'], {}), "(embeddings, '../output/embeddings_test.pt')\n", (10800, 10844), False, 'import torch\n'), ((11095, 11120), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11118, 11120), False, 'import argparse\n'), ((11397, 11444), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'linewidth': '(200)'}), '(precision=3, linewidth=200)\n', (11416, 11444), True, 'import numpy as np\n'), ((793, 820), 'pretrainedmodels.resnet18', 'pretrainedmodels.resnet18', ([], {}), '()\n', (818, 820), False, 'import pretrainedmodels\n'), ((839, 869), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'nb_embeddings'], {}), '(2048, nb_embeddings)\n', (848, 869), True, 'import torch.nn as nn\n'), ((1626, 1685), 'torch.nn.functional.relu', 'F.relu', (['(distance_positive - distance_negative + self.margin)'], {}), '(distance_positive - distance_negative + self.margin)\n', (1632, 1685), True, 'import torch.nn.functional as F\n'), ((7875, 7904), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (7897, 7904), False, 'import torch\n'), ((7926, 7956), 'tqdm.tqdm', 'tqdm', (['data_loader_update_train'], {}), '(data_loader_update_train)\n', (7930, 7956), False, 'from tqdm import tqdm\n'), ((9268, 9288), 'numpy.diff', 'np.diff', (['results_idx'], {}), '(results_idx)\n', (9275, 9288), True, 'import numpy as np\n'), ((9784, 9811), 'numpy.mean', 'np.mean', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (9791, 9811), True, 'import numpy as np\n'), ((9813, 9839), 'numpy.std', 'np.std', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (9819, 9839), True, 'import numpy as np\n'), ((10358, 10385), 'numpy.mean', 'np.mean', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (10365, 10385), True, 'import numpy as np\n'), ((10387, 10413), 'numpy.std', 'np.std', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (10393, 10413), True, 'import numpy as np\n'), ((10941, 10968), 'numpy.mean', 'np.mean', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (10948, 10968), True, 'import numpy as np\n'), ((10970, 10996), 'numpy.std', 'np.std', (['embeddings'], {'axis': '(0)'}), '(embeddings, axis=0)\n', (10976, 10996), True, 'import numpy as np\n'), ((3797, 3826), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (3819, 3826), False, 'import torch\n'), ((4739, 4767), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (4761, 4767), False, 'import torch\n'), ((5631, 5650), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (5638, 5650), True, 'import numpy as np\n'), ((5701, 5730), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (5723, 5730), False, 'import torch\n'), ((6402, 6421), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (6409, 6421), True, 'import numpy as np\n'), ((4168, 4209), 'numpy.mean', 'np.mean', (['dataset_train.embeddings'], {'axis': '(0)'}), '(dataset_train.embeddings, axis=0)\n', (4175, 4209), True, 'import numpy as np\n'), ((4211, 4251), 'numpy.std', 'np.std', (['dataset_train.embeddings'], {'axis': '(0)'}), '(dataset_train.embeddings, axis=0)\n', (4217, 4251), True, 'import numpy as np\n'), ((4594, 4635), 'numpy.mean', 'np.mean', (['dataset_train.embeddings'], {'axis': '(0)'}), '(dataset_train.embeddings, axis=0)\n', (4601, 4635), True, 'import numpy as np\n'), ((4637, 4677), 'numpy.std', 'np.std', (['dataset_valid.embeddings'], {'axis': '(0)'}), '(dataset_valid.embeddings, axis=0)\n', (4643, 4677), True, 'import numpy as np\n'), ((6547, 6566), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (6554, 6566), True, 'import numpy as np\n'), ((2288, 2309), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2307, 2309), False, 'from torchvision import datasets, models, transforms\n'), ((2323, 2389), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2343, 2389), False, 'from torchvision import datasets, models, transforms\n'), ((2542, 2563), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2561, 2563), False, 'from torchvision import datasets, models, transforms\n'), ((2577, 2643), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2597, 2643), False, 'from torchvision import datasets, models, transforms\n'), ((7476, 7497), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7495, 7497), False, 'from torchvision import datasets, models, transforms\n'), ((7539, 7605), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (7559, 7605), False, 'from torchvision import datasets, models, transforms\n'), ((5557, 5576), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (5564, 5576), True, 'import numpy as np\n'), ((6328, 6347), 'numpy.mean', 'np.mean', (['epoch_loss'], {}), '(epoch_loss)\n', (6335, 6347), True, 'import numpy as np\n')]
|
import functools
import numpy as np
def dft2(f, alpha, npix=None, shift=(0, 0), offset=(0, 0), unitary=True, out=None):
"""Compute the 2-dimensional discrete Fourier Transform.
This function allows independent control over input shape, output shape,
and output sampling by implementing the matrix triple product algorithm
described in [1].
Parameters
----------
f : array_like
2D array to Fourier Transform
alpha : float or array_like
Output plane sampling interval (frequency). If :attr:`alpha` is an
array, ``alpha[1]`` represents row-wise sampling and ``alpha[2]``
represents column-wise sampling. If :attr:`alpha` is a scalar,
``alpha[1] = alpha[2] = alpha`` gives uniform sampling across the rows
and columns of the output plane.
npix : int or array_like, optional
Size of the output array :attr:`F`. If :attr:`npix` is an array,
``F.shape = (npix[1], npix[2])``. If :attr:`npi`` is a scalar,
``F.shape = (npix, npix)``. Default is ``f.shape``.
shift : array_like, optional
Number of pixels in (r,c) to shift the DC pixel in the output plane
with the origin centrally located in the plane. Default is ``(0,0)``.
offset : array_like, optional
Number of pixels in (r,c) that the input plane is shifted relative to
the origin. Default is ``(0,0)``.
unitary : bool, optional
Normalization flag. If ``True``, a normalization is performed on the
output such that the DFT operation is unitary and energy is conserved
through the Fourier transform operation (Parseval's theorem). In this
way, the energy in in a limited-area DFT is a fraction of the total
energy corresponding to the limited area. Default is ``True``.
out : ndarray or None
A location into which the result is stored. If provided, it must have
shape = npix and dtype = np.complex. If not provided or None, a
freshly-allocated array is returned.
Returns
-------
F : complex ndarray
Notes
-----
* Setting ``alpha = 1/f.shape`` and ``npix = f.shape`` is equivalent to
::
F = np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(f)))
* ``dft2()`` is designed to place the DC pixel in the same location as a
well formed call to any standard FFT for both even and odd sized input
arrays. The DC pixel is located at ``np.floor(npix/2) + 1``, which is
consistent with calls to Numpy's FFT method where the input and output
are correctly shifted:
``np.fft.ifftshift(np.fft.fft2(np.fft.fftshift(f)))``.
* If the y-axis shift behavior is not what you are expecting, you most
likely have your plotting axes flipped (matplotlib's default behavior is
to place [0,0] in the upper left corner of the axes). This may be resolved
by either flipping the sign of the y component of ``shift`` or by passing
``origin = 'lower'`` to ``imshow()``.
References
----------
[1] Soummer, et. al. Fast computation of Lyot-style coronagraph propagation (2007)
"""
alpha_row, alpha_col = _sanitize_alpha(alpha)
f = np.asarray(f)
m, n = f.shape
if npix is None:
npix = [m, n]
M, N = _sanitize_npix(npix)
shift_row, shift_col = _sanitize_shift(shift)
offset_row, offset_col = _sanitize_npix(offset)
if out is not None:
if not np.can_cast(complex, out.dtype):
raise TypeError(f"Cannot cast complex output to dtype('{out.dtype}')")
E1, E2 = _dft2_matrices(m, n, M, N, alpha_row, alpha_col, shift_row, shift_col,
offset_row, offset_col)
F = np.dot(E1.dot(f), E2, out=out)
# now calculate the answer, without reallocating memory
if unitary:
np.multiply(F, np.sqrt(np.abs(alpha_row * alpha_col)), out=F)
return F
@functools.lru_cache(maxsize=32)
def _dft2_matrices(m, n, M, N, alphar, alphac, shiftr, shiftc, offsetr, offsetc):
R, S, U, V = _dft2_coords(m, n, M, N)
E1 = np.exp(-2.0 * 1j * np.pi * alphar * np.outer(R-shiftr+offsetr, U-shiftr)).T
E2 = np.exp(-2.0 * 1j * np.pi * alphac * np.outer(S-shiftc+offsetc, V-shiftc))
return E1, E2
@functools.lru_cache(maxsize=32)
def _dft2_coords(m, n, M, N):
# R and S are (r,c) coordinates in the (m x n) input plane f
# V and U are (r,c) coordinates in the (M x N) output plane F
R = np.arange(m) - np.floor(m/2.0)
S = np.arange(n) - np.floor(n/2.0)
U = np.arange(M) - np.floor(M/2.0)
V = np.arange(N) - np.floor(N/2.0)
return R, S, U, V
def idft2(F, alpha, npix=None, shift=(0,0), unitary=True, out=None):
"""Compute the 2-dimensional inverse discrete Fourier Transform.
This function allows independent control over input shape, output shape,
and output sampling by implementing the matrix triple product algorithm
described in [1].
Parameters
----------
F : array_like
2D array to Fourier Transform
alpha : float or array_like
Input plane sampling interval (frequency). If :attr:`alpha` is an array,
``alpha[1]`` represents row-wise sampling and ``alpha[2]`` represents
column-wise sampling. If :attr:`alpha` is a scalar,
``alpha[1] = alpha[2] = alpha`` represents uniform sampling across the
rows and columns of the input plane.
npix : int or array_like, optional
Size of the output array :attr:`F`. If :attr:`npix` is an array,
``F.shape = (npix[1], npix[2])``. If :attr:`npix` is a scalar,
``F.shape = (npix, npix)``. Default is ``F.shape``
shift : array_like, optional
Number of pixels in (x,y) to shift the DC pixel in the output plane with
the origin centrally located in the plane. Default is `[0,0]`.
unitary : bool, optional
Normalization flag. If ``True``, a normalization is performed on the
output such that the DFT operation is unitary and energy is conserved
through the Fourier transform operation (Parseval's theorem). In this
way, the energy in in a limited-area DFT is a fraction of the total
energy corresponding to the limited area. Default is ``True``.
Returns
-------
f : complex ndarray
Notes
-----
* Setting ``alpha = 1/F.shape`` and ``npix = F.shape`` is equivalent to
::
F = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(F)))
* ``idft2()`` is designed to place the DC pixel in the same location as a
well formed call to any standard FFT for both even and odd sized input
arrays. The DC pixel is located at ``np.floor(npix/2) + 1``, which is
consistent with calls to Numpy's IFFT method where the input and output
are correctly shifted:
``np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(f)))``.
* If the y-axis shift behavior is not what you are expecting, you most
likely have your plotting axes flipped (matplotlib's default behavior is
to place [0,0] in the upper left corner of the axes). This may be resolved
by either flipping the sign of the y component of ``shift`` or by passing
``origin = 'lower'`` to ``imshow()``.
References
----------
[1] Soummer, et. al. Fast computation of Lyot-style coronagraph propagation (2007)
[2] `Expressing the inverse DFT in terms of the DFT <https://en.wikipedia.org/wiki/Discrete_Fourier_transform#Expressing_the_inverse_DFT_in_terms_of_the_DFT>`_.
"""
F = np.asarray(F)
N = F.size
# will allocate memory for F if out == None
F = dft2(np.conj(F), alpha, npix, shift, unitary=unitary, out=out)
np.conj(F, out=F)
return np.divide(F, N, out=F)
def _sanitize_alpha(x):
"""Return consistent representation of alpha as ar, ac"""
x = np.asarray(x)
if x.size == 1:
ar, ac = float(x), float(x)
else:
ar, ac = float(x[0]), float(x[1])
return ar, ac
def _sanitize_npix(x):
"""Return consistent representation of npix as M, N"""
x = np.asarray(x)
if x.size == 1:
M, N = int(x), int(x)
else:
M, N = int(x[0]), int(x[1])
return M, N
def _sanitize_shift(x):
"""Return consistent representation of shift as sr, sc"""
if isinstance(x, np.ndarray):
sr, sc = float(x[0]), float(x[1])
else:
sr, sc = x
return sr, sc
|
[
"numpy.abs",
"numpy.arange",
"numpy.conj",
"numpy.asarray",
"numpy.floor",
"numpy.can_cast",
"numpy.outer",
"functools.lru_cache",
"numpy.divide"
] |
[((3906, 3937), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (3925, 3937), False, 'import functools\n'), ((4251, 4282), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (4270, 4282), False, 'import functools\n'), ((3198, 3211), 'numpy.asarray', 'np.asarray', (['f'], {}), '(f)\n', (3208, 3211), True, 'import numpy as np\n'), ((7523, 7536), 'numpy.asarray', 'np.asarray', (['F'], {}), '(F)\n', (7533, 7536), True, 'import numpy as np\n'), ((7675, 7692), 'numpy.conj', 'np.conj', (['F'], {'out': 'F'}), '(F, out=F)\n', (7682, 7692), True, 'import numpy as np\n'), ((7704, 7726), 'numpy.divide', 'np.divide', (['F', 'N'], {'out': 'F'}), '(F, N, out=F)\n', (7713, 7726), True, 'import numpy as np\n'), ((7823, 7836), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7833, 7836), True, 'import numpy as np\n'), ((8055, 8068), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (8065, 8068), True, 'import numpy as np\n'), ((4453, 4465), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (4462, 4465), True, 'import numpy as np\n'), ((4468, 4485), 'numpy.floor', 'np.floor', (['(m / 2.0)'], {}), '(m / 2.0)\n', (4476, 4485), True, 'import numpy as np\n'), ((4492, 4504), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4501, 4504), True, 'import numpy as np\n'), ((4507, 4524), 'numpy.floor', 'np.floor', (['(n / 2.0)'], {}), '(n / 2.0)\n', (4515, 4524), True, 'import numpy as np\n'), ((4531, 4543), 'numpy.arange', 'np.arange', (['M'], {}), '(M)\n', (4540, 4543), True, 'import numpy as np\n'), ((4546, 4563), 'numpy.floor', 'np.floor', (['(M / 2.0)'], {}), '(M / 2.0)\n', (4554, 4563), True, 'import numpy as np\n'), ((4570, 4582), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4579, 4582), True, 'import numpy as np\n'), ((4585, 4602), 'numpy.floor', 'np.floor', (['(N / 2.0)'], {}), '(N / 2.0)\n', (4593, 4602), True, 'import numpy as np\n'), ((7613, 7623), 'numpy.conj', 'np.conj', (['F'], {}), '(F)\n', (7620, 7623), True, 'import numpy as np\n'), ((3450, 3481), 'numpy.can_cast', 'np.can_cast', (['complex', 'out.dtype'], {}), '(complex, out.dtype)\n', (3461, 3481), True, 'import numpy as np\n'), ((4192, 4234), 'numpy.outer', 'np.outer', (['(S - shiftc + offsetc)', '(V - shiftc)'], {}), '(S - shiftc + offsetc, V - shiftc)\n', (4200, 4234), True, 'import numpy as np\n'), ((3850, 3879), 'numpy.abs', 'np.abs', (['(alpha_row * alpha_col)'], {}), '(alpha_row * alpha_col)\n', (3856, 3879), True, 'import numpy as np\n'), ((4107, 4149), 'numpy.outer', 'np.outer', (['(R - shiftr + offsetr)', '(U - shiftr)'], {}), '(R - shiftr + offsetr, U - shiftr)\n', (4115, 4149), True, 'import numpy as np\n')]
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import N_A
from numba import jit
import copy
__author__ = "<NAME>, <NAME>, <NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.1"
"""
Kinetic Monte Carlo (kMC) simulation for a reaction network, assuming spatial homogeneity. Simulation can be performed
with and without ReactionNetwork objects. The version without ReactionNetwork objects is computationally cheaper.
The algorithm is described by Gillespie (1976).
"""
def initialize_simulation(reaction_network, initial_cond, volume=10 ** -24):
"""
Initial loop through reactions to create lists, mappings, and initial states needed for simulation without
reaction network objects.
Args:
reaction_network: Fully generated ReactionNetwork
initial_cond: dict mapping mol_index to initial concentration [M]. mol_index is entry position in
reaction_network.entries_list
volume: float of system volume
:return:
initial_state: array of initial molecule amounts, indexed corresponding to reaction_network.entries_list
initial_state_dict: dict mapping molecule index to initial molecule amounts
species_rxn_mapping: 2d array; each row i contains reactions which molecule_i takes part in
molid_index_mapping: mapping between species entry id and its molecule index
reactants_array: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
products_array: (n_rxns x 2) array, each row containing product mol_index of forward reaction
coord_array: (2*n_rxns x 1) array, with coordination number of each for and rev rxn: [c1_f, c1_r, c2_f, c2_r...]
rate_constants: (2*n_rxns x 1) array, with rate constant of each for and rev rxn: [k1_f, k1_r, k2_f, k2_r ...]
propensities: (2*n_rxns x 1) array of reaction propensities, defined as coord_num*rate_constant
"""
num_rxns = len(reaction_network.reactions)
num_species = len(reaction_network.entries_list)
molid_index_mapping = dict()
initial_state = [0 for i in range(num_species)]
initial_state_dict = dict()
for ind, mol in enumerate(reaction_network.entries_list):
molid_index_mapping[mol.entry_id] = ind
this_c = initial_cond.get(mol.entry_id, 0)
this_mol_amt = int(volume * N_A * 1000 * this_c)
initial_state[ind] = this_mol_amt
if mol.entry_id in initial_cond:
initial_state_dict[ind] = this_mol_amt
# Initially compile each species' reactions in lists, later convert to a 2d array
species_rxn_mapping_list = [[] for j in range(num_species)]
reactant_array = -1 * np.ones((num_rxns, 2), dtype=int)
product_array = -1 * np.ones((num_rxns, 2), dtype=int)
coord_array = np.zeros(2 * num_rxns)
rate_constants = np.zeros(2 * num_rxns)
for id, reaction in enumerate(reaction_network.reactions):
# Keep track of reactant amounts, for later calculating coordination number
num_reactants_for = list()
num_reactants_rev = list()
rate_constants[2 * id] = reaction.k_A
rate_constants[2 * id + 1] = reaction.k_B
for idx, react in enumerate(reaction.reactants):
# for each reactant, need to find the corresponding mol_id with the index
mol_ind = molid_index_mapping[react.entry_id]
reactant_array[id, idx] = mol_ind
species_rxn_mapping_list[mol_ind].append(2 * id)
num_reactants_for.append(initial_state[mol_ind])
for idx, prod in enumerate(reaction.products):
mol_ind = molid_index_mapping[prod.entry_id]
product_array[id, idx] = mol_ind
species_rxn_mapping_list[mol_ind].append(2 * id + 1)
num_reactants_rev.append(initial_state[mol_ind])
if len(reaction.reactants) == 1:
coord_array[2 * id] = num_reactants_for[0]
elif (len(reaction.reactants) == 2) and (
reaction.reactants[0] == reaction.reactants[1]
):
coord_array[2 * id] = num_reactants_for[0] * (num_reactants_for[0] - 1)
elif (len(reaction.reactants) == 2) and (
reaction.reactants[0] != reaction.reactants[1]
):
coord_array[2 * id] = num_reactants_for[0] * num_reactants_for[1]
else:
raise RuntimeError(
"Only single and bimolecular reactions supported by this simulation"
)
# For reverse reaction
if len(reaction.products) == 1:
coord_array[2 * id + 1] = num_reactants_rev[0]
elif (len(reaction.products) == 2) and (
reaction.products[0] == reaction.products[1]
):
coord_array[2 * id + 1] = num_reactants_rev[0] * (num_reactants_rev[0] - 1)
elif (len(reaction.products) == 2) and (
reaction.products[0] != reaction.products[1]
):
coord_array[2 * id + 1] = num_reactants_rev[0] * num_reactants_rev[1]
else:
raise RuntimeError(
"Only single and bimolecular reactions supported by this simulation"
)
rxn_mapping_lengths = [len(rxn_list) for rxn_list in species_rxn_mapping_list]
max_mapping_length = max(rxn_mapping_lengths)
species_rxn_mapping = -1 * np.ones((num_species, max_mapping_length), dtype=int)
for index, rxn_list in enumerate(species_rxn_mapping_list):
this_map_length = rxn_mapping_lengths[index]
if this_map_length == max_mapping_length:
species_rxn_mapping[index, :] = rxn_list
else:
species_rxn_mapping[
index, : this_map_length - max_mapping_length
] = rxn_list
propensities = np.multiply(coord_array, rate_constants)
return [
np.array(initial_state, dtype=int),
initial_state_dict,
species_rxn_mapping,
reactant_array,
product_array,
coord_array,
rate_constants,
propensities,
molid_index_mapping,
]
@jit(nopython=True, parallel=True)
def kmc_simulate(
time_steps,
coord_array,
rate_constants,
propensity_array,
species_rxn_mapping,
reactants,
products,
state,
):
"""
KMC Simulation of reaction network and specified initial conditions. Args are all Numpy arrays, to allow
computational speed up with Numba.
Args:
time_steps: int number of time steps desired to run
coord_array: array containing coordination numbers of for and rev rxns.
rate_constants: array containing rate constants of for and rev rxns
propensity_array: array containing propensities of for and rev rxns
species_rxn_mapping: 2d array; each row i contains reactions which molecule_i takes part in
reactants: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
products: (n_rxns x 2) array, each row containing product mol_index of forward reaction
state: array of initial molecule amounts, indexed corresponding to reaction_network.entries_list
:return: A (2 x time_steps) Numpy array. First row contains the indeces of reactions that occurred.
Second row are the time steps generated at each iteration.
"""
total_propensity = np.sum(propensity_array)
reaction_history = [0 for step in range(time_steps)]
times = [0.0 for step in range(time_steps)]
relevant_ind = np.where(propensity_array > 0)[
0
] # Take advantage of sparsity - many propensities will be 0.
for step_counter in range(time_steps):
r1 = random.random()
r2 = random.random()
tau = -np.log(r1) / total_propensity
random_propensity = r2 * total_propensity
abrgd_reaction_choice_ind = np.where(
np.cumsum(propensity_array[relevant_ind]) >= random_propensity
)[0][0]
reaction_choice_ind = relevant_ind[abrgd_reaction_choice_ind]
converted_rxn_ind = math.floor(reaction_choice_ind / 2)
if reaction_choice_ind % 2:
reverse = True
else:
reverse = False
state = update_state(reactants, products, state, converted_rxn_ind, reverse)
# Log the reactions that need to be altered after reaction is performed, for the coordination array
reactions_to_change = list()
for reactant_id in reactants[converted_rxn_ind, :]:
if reactant_id == -1:
continue
else:
reactions_to_change.extend(list(species_rxn_mapping[reactant_id, :]))
for product_id in products[converted_rxn_ind, :]:
if product_id == -1:
continue
else:
reactions_to_change.extend(list(species_rxn_mapping[product_id, :]))
rxns_change = set(reactions_to_change)
for rxn_ind in rxns_change:
if rxn_ind == -1:
continue
elif rxn_ind % 2:
this_reverse = True
else:
this_reverse = False
this_h = get_coordination(
reactants, products, state, math.floor(rxn_ind / 2), this_reverse
)
coord_array[rxn_ind] = this_h
propensity_array = np.multiply(rate_constants, coord_array)
relevant_ind = np.where(propensity_array > 0)[0]
total_propensity = np.sum(propensity_array[relevant_ind])
reaction_history[step_counter] = int(reaction_choice_ind)
times[step_counter] = tau
return np.vstack((np.array(reaction_history), np.array(times)))
@jit(nopython=True)
def update_state(reactants, products, state, rxn_ind, reverse):
"""
Updating the system state based on chosen reaction, during kMC simulation.
Args:
reactants: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
products: (n_rxns x 2) array, each row containing product mol_index of forward reaction
state: array of initial molecule amounts, indexed corresponding to reaction_network.entries_list
rxn_ind: int of reaction index, corresponding to position in reaction_network.reactions list
reverse: bool of whether this is the reverse reaction or not
:return: updated state array, after performing the specified reaction
"""
if rxn_ind == -1:
raise RuntimeError("Incorrect reaction index when updating state")
if reverse:
for reactant_id in products[rxn_ind, :]:
if reactant_id == -1:
continue
else:
state[reactant_id] -= 1
if state[reactant_id] < 0:
raise ValueError("State invalid! Negative specie encountered")
for product_id in reactants[rxn_ind, :]:
if product_id == -1:
continue
else:
state[product_id] += 1
else:
for reactant_id in reactants[rxn_ind, :]:
if reactant_id == -1:
continue
else:
state[reactant_id] -= 1
if state[reactant_id] < 0:
raise ValueError("State invalid! Negative specie encountered")
for product_id in products[rxn_ind, :]:
if product_id == -1:
continue
else:
state[product_id] += 1
return state
@jit(nopython=True)
def get_coordination(reactants, products, state, rxn_id, reverse):
"""
Calculate the coordination number of a reaction, for reactions involving two reactions of less.
They are defined as follows:
A -> B; coord = n(A)
A + A --> B; coord = n(A) * (n(A) - 1)
A + B --> C; coord = n(A) * n(B)
Args:
reactants: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
products: (n_rxns x 2) array, each row containing product mol_index of forward reaction
state: array of initial molecule amounts, indexed corresponding to reaction_network.entries_list
rxn_ind: int of reaction index, corresponding to position in reaction_network.reactions list
reverse: bool of whether this is the reverse reaction or not
:return: float of reaction coordination number
"""
if reverse:
reactant_array = products[rxn_id, :]
num_reactants = len(np.where(reactant_array != -1)[0])
else:
reactant_array = reactants[rxn_id, :]
num_reactants = len(np.where(reactant_array != -1)[0])
num_mols_list = list()
for reactant_id in reactant_array:
num_mols_list.append(state[reactant_id])
if num_reactants == 1:
h_prop = num_mols_list[0]
elif (num_reactants == 2) and (reactant_array[0] == reactant_array[1]):
h_prop = num_mols_list[0] * (num_mols_list[0] - 1) / 2
elif (num_reactants == 2) and (reactant_array[0] != reactant_array[1]):
h_prop = num_mols_list[0] * num_mols_list[1]
else:
raise RuntimeError(
"Only single and bimolecular reactions supported by this simulation"
)
return h_prop
class KmcDataAnalyzer:
"""
Functions to analyze (function-based) KMC outputs from many simulation runs. Ideally, the reaction history and
time history data are list of arrays.
Args:
reaction_network: fully generated ReactionNetwork, used for kMC simulation
molid_ind_mapping: dict mapping each entry's id to its index; of form {entry_id: mol_index, ... }
species_rxn_mapping: 2d array; each row i contains reactions which molecule_i takes part in
initial_state_dict: dict mapping mol_id to its initial amount {mol1_id: amt_1, mol2_id: amt2 ... }
products: (n_rxns x 2) array, each row containing product mol_index of forward reaction
reactants: (n_rxns x 2) array, each row containing reactant mol_index of forward reaction
reaction_history: list of arrays of reaction histories of each simulation.
time_history: list of arrays of time histories of each simulation.
"""
def __init__(
self,
reaction_network,
molid_ind_mapping,
species_rxn_mapping,
initial_state_dict,
products,
reactants,
reaction_history,
time_history,
):
self.reaction_network = reaction_network
self.molid_ind_mapping = molid_ind_mapping
self.species_rxn_mapping = species_rxn_mapping
self.initial_state_dict = initial_state_dict
self.products = products
self.reactants = reactants
self.reaction_history = reaction_history
self.time_history = time_history
self.num_sims = len(self.reaction_history)
if self.num_sims != len(self.time_history):
raise RuntimeError(
"Number of datasets for rxn history and time step history should be same!"
)
self.molind_id_mapping = [
mol.entry_id for mol in self.reaction_network.entries_list
]
def generate_time_dep_profiles(self):
"""
Generate plottable time-dependent profiles of species and rxns from raw KMC output, obtain final states.
:return dict containing species profiles, reaction profiles, and final states from each simulation.
{species_profiles: [ {mol_ind1: [(t0, n(t0)), (t1, n(t1)...], mol_ind2: [...] , ... }, {...}, ... ]
reaction_profiles: [ {rxn_ind1: [t0, t1, ...], rxn_ind2: ..., ...}, {...}, ...]
final_states: [ {mol_ind1: n1, mol_ind2: ..., ...}, {...}, ...] }
"""
species_profiles = list()
reaction_profiles = list()
final_states = list()
for n_sim in range(self.num_sims):
sim_time_history = self.time_history[n_sim]
sim_rxn_history = self.reaction_history[n_sim]
sim_species_profile = dict()
sim_rxn_profile = dict()
cumulative_time = list(np.cumsum(np.array(sim_time_history)))
state = copy.deepcopy(self.initial_state_dict)
for mol_ind in state:
sim_species_profile[mol_ind] = [(0.0, self.initial_state_dict[mol_ind])]
total_iterations = len(sim_rxn_history)
for iter in range(total_iterations):
rxn_ind = sim_rxn_history[iter]
t = cumulative_time[iter]
if rxn_ind not in sim_rxn_profile:
sim_rxn_profile[rxn_ind] = [t]
else:
sim_rxn_profile[rxn_ind].append(t)
converted_ind = math.floor(rxn_ind / 2)
if rxn_ind % 2:
reacts = self.products[converted_ind, :]
prods = self.reactants[converted_ind, :]
else:
reacts = self.reactants[converted_ind, :]
prods = self.products[converted_ind, :]
for r_ind in reacts:
if r_ind == -1:
continue
else:
try:
state[r_ind] -= 1
if state[r_ind] < 0:
raise ValueError(
"State invalid: negative specie: {}".format(r_ind)
)
sim_species_profile[r_ind].append((t, state[r_ind]))
except KeyError:
raise ValueError(
"Reactant specie {} given is not in state!".format(
r_ind
)
)
for p_ind in prods:
if p_ind == -1:
continue
else:
if (p_ind in state) and (p_ind in sim_species_profile):
state[p_ind] += 1
sim_species_profile[p_ind].append((t, state[p_ind]))
else:
state[p_ind] = 1
sim_species_profile[p_ind] = [(0.0, 0), (t, state[p_ind])]
# for plotting convenience, add data point at final time
for mol_ind in sim_species_profile:
sim_species_profile[mol_ind].append(
(cumulative_time[-1], state[mol_ind])
)
species_profiles.append(sim_species_profile)
reaction_profiles.append(sim_rxn_profile)
final_states.append(state)
return {
"species_profiles": species_profiles,
"reaction_profiles": reaction_profiles,
"final_states": final_states,
}
def final_state_analysis(self, final_states):
"""
Gather statistical analysis of the final states of simulation.
Args:
final_states: list of dicts of final states, as generated in generate_time_dep_profiles()
:return: list of tuples containing statistical data for each species, sorted from highest to low avg occurrence
"""
state_arrays = (
dict()
) # For each molecule, compile an array of its final amounts
for iter, final_state in enumerate(final_states):
for mol_ind, amt in final_state.items():
# Store the amount, and convert key from mol_ind to entry_id
if self.molind_id_mapping[mol_ind] not in state_arrays:
state_arrays[self.molind_id_mapping[mol_ind]] = np.zeros(
self.num_sims
)
state_arrays[self.molind_id_mapping[mol_ind]][iter] = amt
analyzed_states = dict() # will contain statistical results of final states
for mol_entry, state_array in state_arrays.items():
analyzed_states[mol_entry] = (np.mean(state_array), np.std(state_array))
# Sort from highest avg final amount to lowest
sorted_analyzed_states = sorted(
[(entry_id, data_tup) for entry_id, data_tup in analyzed_states.items()],
key=lambda x: x[1][0],
reverse=True,
)
return sorted_analyzed_states
def plot_species_profiles(
self,
species_profiles,
final_states,
num_label=12,
num_plots=None,
filename=None,
file_dir=None,
):
"""
Sorting and plotting species profiles for a specified number of simulations. The profiles might be very similar,
so may not need to plot all of the runs for good understanding of results.
Args:
species_profiles: list of dicts of species as function of time, for each simulation
final_states: list of dicts of final states of each simulation
num_label: integer number of species in the legend
filename (str)
file_dir (str)
"""
if num_plots is None:
num_plots = self.num_sims
elif num_plots > self.num_sims:
num_plots = self.num_sims
for n_sim in range(num_plots):
# Sorting and plotting:
fig, ax = plt.subplots()
sorted_state = sorted(
[(k, v) for k, v in final_states[n_sim].items()],
key=lambda x: x[1],
reverse=True,
)
sorted_inds = [mol_tuple[0] for mol_tuple in sorted_state]
sorted_ind_id_mapping = dict()
iter_counter = 0
for id, ind in self.molid_ind_mapping.items():
if ind in sorted_inds[:num_label]:
sorted_ind_id_mapping[ind] = id
iter_counter += 1
if iter_counter == num_label:
break
colors = plt.cm.get_cmap("hsv", num_label)
this_id = 0
t_end = sum(self.time_history[n_sim])
for mol_ind in species_profiles[n_sim]:
# ts = np.append(np.array([e[0] for e in species_profiles[n_sim][mol_ind]]), t_end)
ts = np.array([e[0] for e in species_profiles[n_sim][mol_ind]])
nums = np.array([e[1] for e in species_profiles[n_sim][mol_ind]])
if mol_ind in sorted_inds[:num_label]:
mol_id = sorted_ind_id_mapping[mol_ind]
for entry in self.reaction_network.entries_list:
if mol_id == entry.entry_id:
this_composition = (
entry.molecule.composition.alphabetical_formula
)
this_charge = entry.molecule.charge
this_label = this_composition + " " + str(this_charge)
this_color = colors(this_id)
this_id += 1
break
ax.plot(ts, nums, label=this_label, color=this_color)
else:
ax.plot(ts, nums)
title = "KMC simulation, total time {}".format(t_end)
ax.set(title=title, xlabel="Time (s)", ylabel="# Molecules")
ax.legend(
loc="upper right", bbox_to_anchor=(1, 1), ncol=2, fontsize="small"
)
sim_filename = filename + "_run_" + str(n_sim + 1)
if file_dir is None:
plt.show()
else:
plt.savefig(file_dir + "/" + sim_filename)
def analyze_intermediates(self, species_profiles, cutoff=0.9):
"""
Identify intermediates from species vs time profiles. Species are intermediates if consumed nearly as much
as they are created.
Args:
species_profile: Dict of list of tuples, as generated in generate_time_dep_profiles()
cutoff: (float) fraction to adjust definition of intermediate
:return: Analyzed data in a dict, of the form:
{mol1: {'freqency': (float), 'lifetime': (avg, std), 't_max': (avg, std), 'amt_produced': (avg, std)},
mol2: {...}, ... }
"""
intermediates = dict()
for n_sim in range(self.num_sims):
for mol_ind, prof in species_profiles[n_sim].items():
history = np.array([t[1] for t in prof])
diff_history = np.diff(history)
max_amt = max(history)
amt_produced = np.sum(diff_history == 1)
amt_consumed = np.sum(diff_history == -1)
# Identify the intermediate, accounting for fluctuations
if (amt_produced >= 3) and (amt_consumed > amt_produced * cutoff):
if mol_ind not in intermediates:
intermediates[mol_ind] = dict()
intermediates[mol_ind]["lifetime"] = list()
intermediates[mol_ind]["amt_produced"] = list()
intermediates[mol_ind]["t_max"] = list()
intermediates[mol_ind]["amt_consumed"] = list()
# Intermediate lifetime is approximately the time from its max amount to when nearly all consumed
max_ind = np.where(history == max_amt)[0][0]
t_max = prof[max_ind][0]
for state in prof[max_ind + 1 :]:
if state[1] < (1 - cutoff) * amt_produced + history[0]:
intermediates[mol_ind]["lifetime"].append(state[0] - t_max)
intermediates[mol_ind]["t_max"].append(t_max)
intermediates[mol_ind]["amt_produced"].append(amt_produced)
intermediates[mol_ind]["amt_consumed"].append(amt_consumed)
break
intermediates_analysis = dict()
for mol_ind in intermediates:
entry_id = self.molind_id_mapping[mol_ind]
intermediates_analysis[entry_id] = dict() # convert keys to entry id
if len(intermediates[mol_ind]["lifetime"]) != len(
intermediates[mol_ind]["t_max"]
):
raise RuntimeError("Intermediates data should be of the same length")
intermediates_analysis[entry_id]["frequency"] = (
len(intermediates[mol_ind]["lifetime"]) / self.num_sims
)
lifetime_array = np.array(intermediates[mol_ind]["lifetime"])
intermediates_analysis[entry_id]["lifetime"] = (
np.mean(lifetime_array),
np.std(lifetime_array),
)
t_max_array = np.array(intermediates[mol_ind]["t_max"])
intermediates_analysis[entry_id]["t_max"] = (
np.mean(t_max_array),
np.std(t_max_array),
)
amt_produced_array = np.array(intermediates[mol_ind]["amt_produced"])
intermediates_analysis[entry_id]["amt_produced"] = (
np.mean(amt_produced_array),
np.std(amt_produced_array),
)
amt_consumed_array = np.array(intermediates[mol_ind]["amt_consumed"])
intermediates_analysis[entry_id]["amt_consumed"] = (
np.mean(amt_consumed_array),
np.std(amt_produced_array),
)
# Sort by highest average amount produced
sorted_intermediates_analysis = sorted(
[
(entry_id, mol_data)
for entry_id, mol_data in intermediates_analysis.items()
],
key=lambda x: x[1]["amt_produced"][0],
reverse=True,
)
return sorted_intermediates_analysis
def correlate_reactions(self, reaction_inds):
"""
Correlate two reactions, by finding the average time and steps elapsed for rxn2 to fire after rxn1,
and vice-versa.
Args:
reaction_inds: list, array, or tuple of two reaction indexes
:return: dict containing analysis of how reactions are correlated {rxn1: {'time': (float), 'steps': (float),
'occurrences': float}, rxn2: {...} }
"""
correlation_data = dict()
correlation_analysis = dict()
for rxn_ind in reaction_inds:
correlation_data[rxn_ind] = dict()
correlation_data[rxn_ind]["time"] = list()
correlation_data[rxn_ind]["steps"] = list()
correlation_data[rxn_ind]["occurrences"] = list()
correlation_analysis[rxn_ind] = dict()
for n_sim in range(self.num_sims):
cum_time = np.cumsum(self.time_history[n_sim])
rxn_locations = dict()
# Find the step numbers when reactions fire in the simulation
for rxn_ind in reaction_inds:
rxn_locations[rxn_ind] = list(
np.where(self.reaction_history[n_sim] == rxn_ind)[0]
)
rxn_locations[rxn_ind].append(len(self.reaction_history[n_sim]))
# Correlate between each reaction
for (rxn_ind, location_list) in rxn_locations.items():
time_elapse = list()
step_elapse = list()
occurrences = 0
for (rxn_ind_j, location_list_j) in rxn_locations.items():
if rxn_ind == rxn_ind_j:
continue
for i in range(1, len(location_list)):
for loc_j in location_list_j:
# Find location where reaction j happens after reaction i, before reaction i fires again
if (loc_j > location_list[i - 1]) and (
loc_j < location_list[i]
):
time_elapse.append(
cum_time[loc_j] - cum_time[location_list[i - 1]]
)
step_elapse.append(loc_j - location_list[i - 1])
occurrences += 1
break
if len(time_elapse) == 0:
correlation_data[rxn_ind]["occurrences"].append(0)
else:
correlation_data[rxn_ind]["time"].append(
np.mean(np.array(time_elapse))
)
correlation_data[rxn_ind]["steps"].append(
np.mean(np.array(step_elapse))
)
correlation_data[rxn_ind]["occurrences"].append(occurrences)
for rxn_ind, data_dict in correlation_data.items():
if len(data_dict["time"]) != 0:
correlation_analysis[rxn_ind]["time"] = (
np.mean(np.array(data_dict["time"])),
np.std(np.array(data_dict["time"])),
)
correlation_analysis[rxn_ind]["steps"] = (
np.mean(np.array(data_dict["steps"])),
np.std(np.array(data_dict["steps"])),
)
correlation_analysis[rxn_ind]["occurrences"] = (
np.mean(np.array(data_dict["occurrences"])),
np.std(np.array(data_dict["occurrences"])),
)
else:
print(
"Reaction ",
rxn_ind,
"does not lead to the other reaction in simulation ",
n_sim,
)
return correlation_analysis
def quantify_specific_reaction(self, reaction_history, reaction_index):
"""
Quantify a reaction from one simulation reaction history
Args:
reaction_history: array containing sequence of reactions fired during a simulation.
reaction_index: integer of reaction index of interest
:return: integer number of times reaction is fired
"""
if reaction_index not in reaction_history:
reaction_count = 0
else:
reaction_count = len(reaction_history[reaction_index])
return reaction_count
def quantify_rank_reactions(self, reaction_type=None, num_rxns=None):
"""
Given reaction histories, identify the most commonly occurring reactions, on average.
Can rank generally, or by reactions of a certain type.
Args:
reaction_profiles (list of dicts): reactions fired as a function of time
reaction_type (string)
num_rxns (int): the amount of reactions interested in collecting data on. If None, record for all.
Returns:
reaction_data: list of reactions and their avg, std of times fired. Sorted by the average times fired.
[(rxn1, (avg, std)), (rxn2, (avg, std)) ... ]
"""
allowed_rxn_types = [
"One electron reduction",
"One electron oxidation",
"Intramolecular single bond breakage",
"Intramolecular single bond formation",
"Coordination bond breaking AM -> A+M",
"Coordination bond forming A+M -> AM",
"Molecular decomposition breaking one bond A -> B+C",
"Molecular formation from one new bond A+B -> C",
"Concerted",
]
if reaction_type is not None:
rxns_of_type = list()
if reaction_type not in allowed_rxn_types:
raise RuntimeError(
"This reaction type does not (yet) exist in our reaction networks."
)
for ind, rxn in enumerate(self.reaction_network.reactions):
if rxn.reaction_type()["rxn_type_A"] == reaction_type:
rxns_of_type.append(2 * ind)
elif rxn.reaction_type()["rxn_type_B"] == reaction_type:
rxns_of_type.append(2 * ind + 1)
reaction_data = dict() # keeping record of each iteration
# Loop to count all reactions fired
for n_sim in range(self.num_sims):
rxns_fired = set(self.reaction_history[n_sim])
if reaction_type is not None:
relevant_rxns = [r for r in rxns_fired if r in rxns_of_type]
else:
relevant_rxns = rxns_fired
for rxn_ind in relevant_rxns:
if rxn_ind not in reaction_data:
reaction_data[rxn_ind] = list()
reaction_data[rxn_ind].append(
np.sum(self.reaction_history[n_sim] == rxn_ind)
)
reaction_analysis = dict()
for rxn_ind, counts in reaction_data.items():
reaction_analysis[rxn_ind] = (
np.mean(np.array(counts)),
np.std(np.array(counts)),
)
# Sort reactions by the average amount fired
sorted_reaction_analysis = sorted(
[(i, c) for i, c in reaction_analysis.items()],
key=lambda x: x[1][0],
reverse=True,
)
if num_rxns is None:
return sorted_reaction_analysis
else:
return sorted_reaction_analysis[:num_rxns]
def frequency_analysis(self, rxn_inds, spec_inds, partitions=100):
"""
Calculate the frequency of reaction and species formation as a function of time. Simulation data is
discretized into time intervals, and probabilities in each set are obtained.
Args:
rxn_inds: list of indeces of reactions of interest
spec_inds: list of molecule indexes of interest
partitions: number of intervals in which to discretize time
:return: dict of dicts containing the statistics of reaction fired, product formed at each time interval.
{reaction_data: {rxn_ind1: [(t0, avg0, std0), (t1, avg1, std1), ...], rxn_ind2: [...], ... rxn_ind_n: [...]}
{species_data: {spec1: [(t0, avg0, std0), (t1, avg1, std1), ...], spec2: [...], ... specn: [...]}}
"""
reaction_frequency_data = dict()
reaction_frequency_array = (
dict()
) # Growing arrays of reaction frequencies as fxn of time
species_frequency_data = dict()
species_frequency_array = dict()
new_species_counters = dict()
for ind in rxn_inds:
reaction_frequency_data[ind] = [0 for j in range(partitions)]
for ind in spec_inds:
species_frequency_data[ind] = [0 for j in range(partitions)]
new_species_counters[ind] = 0
for n_sim in range(self.num_sims):
delta_t = np.sum(self.time_history[n_sim]) / partitions
ind_0 = 0
t = 0
n = 0 # for tracking which time interval we are in
species_counters = copy.deepcopy(
new_species_counters
) # for counting species as they appear
rxn_freq_data = copy.deepcopy(reaction_frequency_data)
spec_freq_data = copy.deepcopy(species_frequency_data)
for step_num, tau in enumerate(self.time_history[n_sim]):
t += tau
this_rxn_ind = int(self.reaction_history[n_sim][step_num])
if this_rxn_ind % 2: # reverse reaction
prods = self.reactants[math.floor(this_rxn_ind / 2), :]
else:
prods = self.products[math.floor(this_rxn_ind / 2), :]
for spec_ind in spec_inds:
if spec_ind in prods:
species_counters[spec_ind] += 1
# When t reaches the next discretized time step, or end of the simulation
if (t >= (n + 1) * delta_t) or (
step_num == len(self.reaction_history[n_sim]) - 1
):
n_to_fill = n
if t >= (n + 2) * delta_t:
n += math.floor(t / delta_t - n)
else:
n += 1
steps = step_num - ind_0 + 1
for spec_ind in spec_inds:
spec_freq_data[spec_ind][n_to_fill] = (
species_counters[spec_ind] / steps
)
for rxn_ind in rxn_inds:
rxn_freq = (
np.count_nonzero(
self.reaction_history[n_sim][ind_0 : step_num + 1]
== rxn_ind
)
/ steps
)
# t_mdpt = (self.time_history[n_sim][step_num] + self.time_history[n_sim][ind_0]) / 2
rxn_freq_data[rxn_ind][n_to_fill] = rxn_freq
# Reset and update counters
species_counters = copy.deepcopy(new_species_counters)
ind_0 = step_num + 1
for rxn_ind in rxn_inds:
if n_sim == 0:
reaction_frequency_array[rxn_ind] = np.array(rxn_freq_data[rxn_ind])
else:
reaction_frequency_array[rxn_ind] = np.vstack(
(reaction_frequency_array[rxn_ind], rxn_freq_data[rxn_ind])
)
# print('reaction freq array', reaction_frequency_array)
for spec_ind in spec_inds:
if n_sim == 0:
species_frequency_array[spec_ind] = np.array(
spec_freq_data[spec_ind]
)
else:
species_frequency_array[spec_ind] = np.vstack(
(species_frequency_array[spec_ind], spec_freq_data[spec_ind])
)
# Statistical analysis
statistical_rxn_data = dict()
statistical_spec_data = dict()
avg_delta_t = (
np.mean(np.array([sum(self.time_history[i]) for i in range(self.num_sims)]))
/ partitions
)
time_list = [i * avg_delta_t + avg_delta_t / 2 for i in range(partitions)]
# print('time_list: ', time_list)
for rxn_ind in rxn_inds:
if self.num_sims == 1:
avgs = reaction_frequency_array[rxn_ind]
stds = np.zeros(partitions)
else:
avgs = np.mean(reaction_frequency_array[rxn_ind], 0)
stds = np.std(reaction_frequency_array[rxn_ind], 0)
statistical_rxn_data[rxn_ind] = [
(time_list[n], avgs[n], stds[n]) for n in range(partitions)
]
for spec_ind in spec_inds:
if self.num_sims == 1:
spec_avgs = species_frequency_array[spec_ind]
spec_stds = np.zeros(partitions)
else:
spec_avgs = np.mean(species_frequency_array[spec_ind], 0)
spec_stds = np.std(species_frequency_array[spec_ind], 0)
statistical_spec_data[spec_ind] = [
(time_list[n], spec_avgs[n], spec_stds[n]) for n in range(partitions)
]
return {
"reaction_data": statistical_rxn_data,
"species_data": statistical_spec_data,
}
def find_rxn_index(self, reaction, reverse):
"""
Find the reaction index of a given reaction object
Args:
reaction: Reaction object
reverse: bool to say whether reaction is reverse or forward
:return: integer reaction index
"""
for ind, rxn in enumerate(self.reaction_network.reactions):
if rxn == reaction:
if reverse is True:
rxn_ind = 2 * ind + 1
else:
rxn_ind = 2 * ind
break
return rxn_ind
|
[
"math.floor",
"numpy.log",
"numpy.count_nonzero",
"numpy.array",
"copy.deepcopy",
"numpy.mean",
"numpy.multiply",
"numpy.where",
"numpy.diff",
"numpy.vstack",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numba.jit",
"numpy.std",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.zeros",
"numpy.cumsum",
"random.random",
"matplotlib.pyplot.subplots"
] |
[((6251, 6284), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'parallel': '(True)'}), '(nopython=True, parallel=True)\n', (6254, 6284), False, 'from numba import jit\n'), ((9817, 9835), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (9820, 9835), False, 'from numba import jit\n'), ((11607, 11625), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (11610, 11625), False, 'from numba import jit\n'), ((2996, 3018), 'numpy.zeros', 'np.zeros', (['(2 * num_rxns)'], {}), '(2 * num_rxns)\n', (3004, 3018), True, 'import numpy as np\n'), ((3040, 3062), 'numpy.zeros', 'np.zeros', (['(2 * num_rxns)'], {}), '(2 * num_rxns)\n', (3048, 3062), True, 'import numpy as np\n'), ((5944, 5984), 'numpy.multiply', 'np.multiply', (['coord_array', 'rate_constants'], {}), '(coord_array, rate_constants)\n', (5955, 5984), True, 'import numpy as np\n'), ((7514, 7538), 'numpy.sum', 'np.sum', (['propensity_array'], {}), '(propensity_array)\n', (7520, 7538), True, 'import numpy as np\n'), ((2885, 2918), 'numpy.ones', 'np.ones', (['(num_rxns, 2)'], {'dtype': 'int'}), '((num_rxns, 2), dtype=int)\n', (2892, 2918), True, 'import numpy as np\n'), ((2944, 2977), 'numpy.ones', 'np.ones', (['(num_rxns, 2)'], {'dtype': 'int'}), '((num_rxns, 2), dtype=int)\n', (2951, 2977), True, 'import numpy as np\n'), ((5516, 5569), 'numpy.ones', 'np.ones', (['(num_species, max_mapping_length)'], {'dtype': 'int'}), '((num_species, max_mapping_length), dtype=int)\n', (5523, 5569), True, 'import numpy as np\n'), ((6006, 6040), 'numpy.array', 'np.array', (['initial_state'], {'dtype': 'int'}), '(initial_state, dtype=int)\n', (6014, 6040), True, 'import numpy as np\n'), ((7663, 7693), 'numpy.where', 'np.where', (['(propensity_array > 0)'], {}), '(propensity_array > 0)\n', (7671, 7693), True, 'import numpy as np\n'), ((7828, 7843), 'random.random', 'random.random', ([], {}), '()\n', (7841, 7843), False, 'import random\n'), ((7857, 7872), 'random.random', 'random.random', ([], {}), '()\n', (7870, 7872), False, 'import random\n'), ((8203, 8238), 'math.floor', 'math.floor', (['(reaction_choice_ind / 2)'], {}), '(reaction_choice_ind / 2)\n', (8213, 8238), False, 'import math\n'), ((9481, 9521), 'numpy.multiply', 'np.multiply', (['rate_constants', 'coord_array'], {}), '(rate_constants, coord_array)\n', (9492, 9521), True, 'import numpy as np\n'), ((9606, 9644), 'numpy.sum', 'np.sum', (['propensity_array[relevant_ind]'], {}), '(propensity_array[relevant_ind])\n', (9612, 9644), True, 'import numpy as np\n'), ((9545, 9575), 'numpy.where', 'np.where', (['(propensity_array > 0)'], {}), '(propensity_array > 0)\n', (9553, 9575), True, 'import numpy as np\n'), ((9768, 9794), 'numpy.array', 'np.array', (['reaction_history'], {}), '(reaction_history)\n', (9776, 9794), True, 'import numpy as np\n'), ((9796, 9811), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (9804, 9811), True, 'import numpy as np\n'), ((16248, 16286), 'copy.deepcopy', 'copy.deepcopy', (['self.initial_state_dict'], {}), '(self.initial_state_dict)\n', (16261, 16286), False, 'import copy\n'), ((21452, 21466), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (21464, 21466), True, 'import matplotlib.pyplot as plt\n'), ((22085, 22118), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""hsv"""', 'num_label'], {}), "('hsv', num_label)\n", (22100, 22118), True, 'import matplotlib.pyplot as plt\n'), ((26683, 26727), 'numpy.array', 'np.array', (["intermediates[mol_ind]['lifetime']"], {}), "(intermediates[mol_ind]['lifetime'])\n", (26691, 26727), True, 'import numpy as np\n'), ((26910, 26951), 'numpy.array', 'np.array', (["intermediates[mol_ind]['t_max']"], {}), "(intermediates[mol_ind]['t_max'])\n", (26918, 26951), True, 'import numpy as np\n'), ((27132, 27180), 'numpy.array', 'np.array', (["intermediates[mol_ind]['amt_produced']"], {}), "(intermediates[mol_ind]['amt_produced'])\n", (27140, 27180), True, 'import numpy as np\n'), ((27382, 27430), 'numpy.array', 'np.array', (["intermediates[mol_ind]['amt_consumed']"], {}), "(intermediates[mol_ind]['amt_consumed'])\n", (27390, 27430), True, 'import numpy as np\n'), ((28884, 28919), 'numpy.cumsum', 'np.cumsum', (['self.time_history[n_sim]'], {}), '(self.time_history[n_sim])\n', (28893, 28919), True, 'import numpy as np\n'), ((37088, 37123), 'copy.deepcopy', 'copy.deepcopy', (['new_species_counters'], {}), '(new_species_counters)\n', (37101, 37123), False, 'import copy\n'), ((37221, 37259), 'copy.deepcopy', 'copy.deepcopy', (['reaction_frequency_data'], {}), '(reaction_frequency_data)\n', (37234, 37259), False, 'import copy\n'), ((37289, 37326), 'copy.deepcopy', 'copy.deepcopy', (['species_frequency_data'], {}), '(species_frequency_data)\n', (37302, 37326), False, 'import copy\n'), ((7888, 7898), 'numpy.log', 'np.log', (['r1'], {}), '(r1)\n', (7894, 7898), True, 'import numpy as np\n'), ((9359, 9382), 'math.floor', 'math.floor', (['(rxn_ind / 2)'], {}), '(rxn_ind / 2)\n', (9369, 9382), False, 'import math\n'), ((12570, 12600), 'numpy.where', 'np.where', (['(reactant_array != -1)'], {}), '(reactant_array != -1)\n', (12578, 12600), True, 'import numpy as np\n'), ((12689, 12719), 'numpy.where', 'np.where', (['(reactant_array != -1)'], {}), '(reactant_array != -1)\n', (12697, 12719), True, 'import numpy as np\n'), ((16813, 16836), 'math.floor', 'math.floor', (['(rxn_ind / 2)'], {}), '(rxn_ind / 2)\n', (16823, 16836), False, 'import math\n'), ((20149, 20169), 'numpy.mean', 'np.mean', (['state_array'], {}), '(state_array)\n', (20156, 20169), True, 'import numpy as np\n'), ((20171, 20190), 'numpy.std', 'np.std', (['state_array'], {}), '(state_array)\n', (20177, 20190), True, 'import numpy as np\n'), ((22366, 22424), 'numpy.array', 'np.array', (['[e[0] for e in species_profiles[n_sim][mol_ind]]'], {}), '([e[0] for e in species_profiles[n_sim][mol_ind]])\n', (22374, 22424), True, 'import numpy as np\n'), ((22448, 22506), 'numpy.array', 'np.array', (['[e[1] for e in species_profiles[n_sim][mol_ind]]'], {}), '([e[1] for e in species_profiles[n_sim][mol_ind]])\n', (22456, 22506), True, 'import numpy as np\n'), ((23690, 23700), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23698, 23700), True, 'import matplotlib.pyplot as plt\n'), ((23735, 23777), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(file_dir + '/' + sim_filename)"], {}), "(file_dir + '/' + sim_filename)\n", (23746, 23777), True, 'import matplotlib.pyplot as plt\n'), ((24569, 24599), 'numpy.array', 'np.array', (['[t[1] for t in prof]'], {}), '([t[1] for t in prof])\n', (24577, 24599), True, 'import numpy as np\n'), ((24631, 24647), 'numpy.diff', 'np.diff', (['history'], {}), '(history)\n', (24638, 24647), True, 'import numpy as np\n'), ((24718, 24743), 'numpy.sum', 'np.sum', (['(diff_history == 1)'], {}), '(diff_history == 1)\n', (24724, 24743), True, 'import numpy as np\n'), ((24775, 24801), 'numpy.sum', 'np.sum', (['(diff_history == -1)'], {}), '(diff_history == -1)\n', (24781, 24801), True, 'import numpy as np\n'), ((26805, 26828), 'numpy.mean', 'np.mean', (['lifetime_array'], {}), '(lifetime_array)\n', (26812, 26828), True, 'import numpy as np\n'), ((26846, 26868), 'numpy.std', 'np.std', (['lifetime_array'], {}), '(lifetime_array)\n', (26852, 26868), True, 'import numpy as np\n'), ((27026, 27046), 'numpy.mean', 'np.mean', (['t_max_array'], {}), '(t_max_array)\n', (27033, 27046), True, 'import numpy as np\n'), ((27064, 27083), 'numpy.std', 'np.std', (['t_max_array'], {}), '(t_max_array)\n', (27070, 27083), True, 'import numpy as np\n'), ((27262, 27289), 'numpy.mean', 'np.mean', (['amt_produced_array'], {}), '(amt_produced_array)\n', (27269, 27289), True, 'import numpy as np\n'), ((27307, 27333), 'numpy.std', 'np.std', (['amt_produced_array'], {}), '(amt_produced_array)\n', (27313, 27333), True, 'import numpy as np\n'), ((27512, 27539), 'numpy.mean', 'np.mean', (['amt_consumed_array'], {}), '(amt_consumed_array)\n', (27519, 27539), True, 'import numpy as np\n'), ((27557, 27583), 'numpy.std', 'np.std', (['amt_produced_array'], {}), '(amt_produced_array)\n', (27563, 27583), True, 'import numpy as np\n'), ((36907, 36939), 'numpy.sum', 'np.sum', (['self.time_history[n_sim]'], {}), '(self.time_history[n_sim])\n', (36913, 36939), True, 'import numpy as np\n'), ((40589, 40609), 'numpy.zeros', 'np.zeros', (['partitions'], {}), '(partitions)\n', (40597, 40609), True, 'import numpy as np\n'), ((40651, 40696), 'numpy.mean', 'np.mean', (['reaction_frequency_array[rxn_ind]', '(0)'], {}), '(reaction_frequency_array[rxn_ind], 0)\n', (40658, 40696), True, 'import numpy as np\n'), ((40720, 40764), 'numpy.std', 'np.std', (['reaction_frequency_array[rxn_ind]', '(0)'], {}), '(reaction_frequency_array[rxn_ind], 0)\n', (40726, 40764), True, 'import numpy as np\n'), ((41062, 41082), 'numpy.zeros', 'np.zeros', (['partitions'], {}), '(partitions)\n', (41070, 41082), True, 'import numpy as np\n'), ((41129, 41174), 'numpy.mean', 'np.mean', (['species_frequency_array[spec_ind]', '(0)'], {}), '(species_frequency_array[spec_ind], 0)\n', (41136, 41174), True, 'import numpy as np\n'), ((41203, 41247), 'numpy.std', 'np.std', (['species_frequency_array[spec_ind]', '(0)'], {}), '(species_frequency_array[spec_ind], 0)\n', (41209, 41247), True, 'import numpy as np\n'), ((16199, 16225), 'numpy.array', 'np.array', (['sim_time_history'], {}), '(sim_time_history)\n', (16207, 16225), True, 'import numpy as np\n'), ((19818, 19841), 'numpy.zeros', 'np.zeros', (['self.num_sims'], {}), '(self.num_sims)\n', (19826, 19841), True, 'import numpy as np\n'), ((34802, 34849), 'numpy.sum', 'np.sum', (['(self.reaction_history[n_sim] == rxn_ind)'], {}), '(self.reaction_history[n_sim] == rxn_ind)\n', (34808, 34849), True, 'import numpy as np\n'), ((35025, 35041), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (35033, 35041), True, 'import numpy as np\n'), ((35067, 35083), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (35075, 35083), True, 'import numpy as np\n'), ((39156, 39191), 'copy.deepcopy', 'copy.deepcopy', (['new_species_counters'], {}), '(new_species_counters)\n', (39169, 39191), False, 'import copy\n'), ((39358, 39390), 'numpy.array', 'np.array', (['rxn_freq_data[rxn_ind]'], {}), '(rxn_freq_data[rxn_ind])\n', (39366, 39390), True, 'import numpy as np\n'), ((39469, 39539), 'numpy.vstack', 'np.vstack', (['(reaction_frequency_array[rxn_ind], rxn_freq_data[rxn_ind])'], {}), '((reaction_frequency_array[rxn_ind], rxn_freq_data[rxn_ind]))\n', (39478, 39539), True, 'import numpy as np\n'), ((39782, 39816), 'numpy.array', 'np.array', (['spec_freq_data[spec_ind]'], {}), '(spec_freq_data[spec_ind])\n', (39790, 39816), True, 'import numpy as np\n'), ((39941, 40013), 'numpy.vstack', 'np.vstack', (['(species_frequency_array[spec_ind], spec_freq_data[spec_ind])'], {}), '((species_frequency_array[spec_ind], spec_freq_data[spec_ind]))\n', (39950, 40013), True, 'import numpy as np\n'), ((8026, 8067), 'numpy.cumsum', 'np.cumsum', (['propensity_array[relevant_ind]'], {}), '(propensity_array[relevant_ind])\n', (8035, 8067), True, 'import numpy as np\n'), ((29138, 29187), 'numpy.where', 'np.where', (['(self.reaction_history[n_sim] == rxn_ind)'], {}), '(self.reaction_history[n_sim] == rxn_ind)\n', (29146, 29187), True, 'import numpy as np\n'), ((31075, 31102), 'numpy.array', 'np.array', (["data_dict['time']"], {}), "(data_dict['time'])\n", (31083, 31102), True, 'import numpy as np\n'), ((31132, 31159), 'numpy.array', 'np.array', (["data_dict['time']"], {}), "(data_dict['time'])\n", (31140, 31159), True, 'import numpy as np\n'), ((31267, 31295), 'numpy.array', 'np.array', (["data_dict['steps']"], {}), "(data_dict['steps'])\n", (31275, 31295), True, 'import numpy as np\n'), ((31325, 31353), 'numpy.array', 'np.array', (["data_dict['steps']"], {}), "(data_dict['steps'])\n", (31333, 31353), True, 'import numpy as np\n'), ((31467, 31501), 'numpy.array', 'np.array', (["data_dict['occurrences']"], {}), "(data_dict['occurrences'])\n", (31475, 31501), True, 'import numpy as np\n'), ((31531, 31565), 'numpy.array', 'np.array', (["data_dict['occurrences']"], {}), "(data_dict['occurrences'])\n", (31539, 31565), True, 'import numpy as np\n'), ((38208, 38235), 'math.floor', 'math.floor', (['(t / delta_t - n)'], {}), '(t / delta_t - n)\n', (38218, 38235), False, 'import math\n'), ((25492, 25520), 'numpy.where', 'np.where', (['(history == max_amt)'], {}), '(history == max_amt)\n', (25500, 25520), True, 'import numpy as np\n'), ((30618, 30639), 'numpy.array', 'np.array', (['time_elapse'], {}), '(time_elapse)\n', (30626, 30639), True, 'import numpy as np\n'), ((30758, 30779), 'numpy.array', 'np.array', (['step_elapse'], {}), '(step_elapse)\n', (30766, 30779), True, 'import numpy as np\n'), ((37597, 37625), 'math.floor', 'math.floor', (['(this_rxn_ind / 2)'], {}), '(this_rxn_ind / 2)\n', (37607, 37625), False, 'import math\n'), ((37694, 37722), 'math.floor', 'math.floor', (['(this_rxn_ind / 2)'], {}), '(this_rxn_ind / 2)\n', (37704, 37722), False, 'import math\n'), ((38653, 38730), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.reaction_history[n_sim][ind_0:step_num + 1] == rxn_ind)'], {}), '(self.reaction_history[n_sim][ind_0:step_num + 1] == rxn_ind)\n', (38669, 38730), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 23 17:18:39 2021
@author: Koustav
"""
import os
import glob
import matplotlib.pyplot as plt
import seaborn as sea
import numpy as np
import pandas as pan
import math
import collections
import matplotlib.ticker as mtick
from mpl_toolkits import mplot3d
from matplotlib.collections import LineCollection
from scipy.optimize import curve_fit
import powerlaw
def pow_law(x, a, expo):
return a*(np.power(x, expo))
def trunc_pow_law(x, a, expo, trunc_expo): #Truncated Power Law
return a*(np.power(x, expo))*np.exp(trunc_expo*x)
def main_ind():
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0
for i in range(6,7):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
Hai
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
'''if(p == 0.728):
print("Skipped")
continue'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
#DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
print("Sorted del(s) PDF:")
print(DP_freqs)
'''DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)'''
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
'''if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")'''
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
#hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S \geq \Delta s)$"])
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S = \Delta s)$"])
fig = plt.figure(figsize=(6.4,4.8))
f = sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$")
f.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
#sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
#ax.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); plt.xscale('log')
plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
plt.savefig("0P(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
'''x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
perr = np.sqrt(np.diag(pcov))
print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *popt), 'm--', label=r'Fit: $ P (S \geq \Delta s) = %3.2f \times \Delta s^{(%4.3f \mp %4.3f)}\times e^{(%4.3f \mp %4.3f)\times \Delta s}$ ' % tukan )
plt.ylim(10**(-6.4), 10**(0.1)); plt.xlim(1, 10**5)
plt.legend()
plt.savefig("Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
#Saving best fit data.
gaol[float(round(CC,2))].append([L, p, -popt[1], perr[1], -popt[2], perr[2]])'''
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
#break;
#Saving as CSVs.
'''if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
K= [0.6, 0.7, 0.75, 0.8, 0.9, 0.95]
heado = 'L, p, alpha, SD(alpha), lambda, SD(lambda)'
for k in K:
np.savetxt("BestFitCDF_CC_%3.2F.csv" %(k), gaol[k], delimiter=',', header=heado, comments='#')
os.chdir(r"../../")'''
def main_ccdf_fit():
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0; crosc= 0.7
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\512" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( p == 0.678):
print(str(CC) + " " + str(p) + " shall be skipped.")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
fit = powerlaw.Fit(data_temp[:,5],discrete=True,estimate_discrete = False) #If you already know xmin pass it as an argument (xmin=value) for speed
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
print('x_min: ',fit.xmin)
print('alpha: ',fit.truncated_power_law.parameter1)
print('1/lambda: ',1/fit.truncated_power_law.parameter2)
tukan = (-fit.truncated_power_law.parameter1, -fit.truncated_power_law.parameter2)
fig = fit.plot_ccdf(color ='cornflowerblue', ls='-', linewidth=1.1, alpha=0.2)
fit.plot_ccdf(color='darkcyan',marker='o', linestyle='', ms=1.2, alpha=0.35, ax=fig)
#ax = fig.add_subplot(111)
fit.truncated_power_law.plot_ccdf(color='darkslateblue', linestyle='--', label=r'Fit: $ P (S \geq \Delta s) \propto \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan, ax=fig)
fig.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#x = fit.xmins
#y = fit.Ds
#plt.ylim(10**(-6.4), 10**(0.1));
plt.xlim(1, 10**5.3)
plt.xlabel(r"$|\Delta s|$")
plt.ylabel(r"$P (S \geq \Delta s)$")
plt.legend()
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
plt.savefig("Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
os.chdir("../../")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("Done with CDF Plots And Fits. Moving On To PDF Plots...")
fig = fit.plot_pdf(color='darkcyan',marker='o', linestyle='', ms=1.5, alpha=0.4)
#fit.plot_pdf(color='darkcyan',marker='o', linestyle='', ms=1.2, alpha=0.35, ax=fig)
#ax = fig.add_subplot(111)
fit.truncated_power_law.plot_pdf(color='darkslateblue', linestyle='--', label=r'Fit: $ P (S = \Delta s) \propto \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan, ax=fig)
fig.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
#x = fit.xmins
#y = fit.Ds
#plt.ylim(10**(-6.4), 10**(0.1));
plt.xlim(1, 10**5.3)
plt.xlabel(r"$|\Delta s|$")
plt.ylabel(r"$P (S = \Delta s)$")
plt.legend()
plt.savefig("Better Fit PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
comparison_tpl_exp = fit.distribution_compare('truncated_power_law','exponential',normalized_ratio=True)
comparison_tpl_streched_exp = fit.distribution_compare('truncated_power_law','stretched_exponential',normalized_ratio=True)
comparison_tpl_log_normal = fit.distribution_compare('truncated_power_law','lognormal',normalized_ratio=True)
comparison_tpl_pl = fit.distribution_compare('truncated_power_law','power_law',normalized_ratio=True)
f = open("Taupe.txt", "w+")
f.write("LR (Power Law): " + str(comparison_tpl_pl[0]) +" p-value: "+ str(comparison_tpl_pl[1]) +"\n")
f.write("LR (Exponential): " + str(comparison_tpl_exp[0]) +" p-value: "+ str(comparison_tpl_exp[1]) +"\n")
f.write("LR (Log-Normal): " + str(comparison_tpl_log_normal[0]) +" p-value: "+ str(comparison_tpl_log_normal[1]) +"\n")
f.write("LR (Stretched-Exponential): " + str(comparison_tpl_streched_exp[0]) +" p-value: "+ str(comparison_tpl_streched_exp[1]) +"\n")
f.close()
print("LR (Power Law): ",comparison_tpl_pl[0]," p-value: ",comparison_tpl_pl[1])
print("LR (Exponential): ",comparison_tpl_exp[0]," p-value: ",comparison_tpl_exp[1])
print("LR (Log-Normal): ",comparison_tpl_log_normal[0]," p-value: ",comparison_tpl_log_normal[1])
print("LR (Stretched-Exponential): ",comparison_tpl_streched_exp[0]," p-value: ",comparison_tpl_streched_exp[1])
gaol[float(round(CC,2))].append([L, p, fit.xmin, fit.truncated_power_law.parameter1, 1/fit.truncated_power_law.parameter2])
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
K= [0.6, 0.7, 0.75, 0.8, 0.9, 0.95]
heado = 'L, p, x_min, alpha, 1/lambda'
for k in K:
np.savetxt("Nu_Pow_0_6_BestFitCDF_CC_%3.2F.csv" %(k), gaol[k], delimiter=',', header=heado, comments='#')
os.chdir(r"../../")
def main_cumulative():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\512" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
#col_P= np.zeros((a,1)); col_P = p
DP_freqs = np.insert(DP_freqs, 0, p, axis=1)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
'''DP_freqs = list(DP_freqs.items()) #Converting dictionary to list.
for j in range(0,len(DP_freqs)):
DP_freqs[j].append(p)'''
print(DP_freqs)
if(len(binder)==0):
#First one in the bag.
binder = DP_freqs.tolist()
else:
binder.extend(DP_freqs.tolist())
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("3D")==False):
os.mkdir("3D")
os.chdir("3D")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
binder= np.array(binder)
fig=plt.figure()
ax = plt.axes(projection='3d')
#surf1 =ax.plot_trisurf(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), cmap='viridis', edgecolor='none')
'''for k in range(0,len(self.x1)):
#Plotting SD bars
ax.plot([self.x1[k], self.x1[k]], [self.y1[k], self.y1[k]], [self.z1[k] + self.sd_z1[k], self.z1[k] - self.sd_z1[k]], marker="_", markerfacecolor='k', color='k')
'''
surf1 =ax.scatter(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), c= np.log10(binder[:,2]), cmap='viridis', linewidth=0.5)
cbar1=fig.colorbar(surf1, shrink=0.75)
cbar1.ax.get_yaxis().labelpad = 12
cbar1.ax.set_ylabel(r"$P (S=\Delta s)$", rotation=270)
ax.set_xlabel(r"$log_{10}|\Delta s|$")
ax.set_zlabel(r"$log_{10}|P (S=\Delta s)|$")
ax.set_ylabel("Occupancy rate (p)")
#plt.zscale('log'); plt.xscale('log')
ax.view_init(elev=36.0, azim=-52.0)
ax.legend()
ax.set_title(r"$P (S=\Delta s)$ vs $|\Delta s|$, L = %d, $R_{0,0}$ = %3.2f" %(L,crosc))
plt.savefig("Cumulative Scatter P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
plt.show()
plt.close()
'''Now for scatter plot'''
fig=plt.figure(figsize=(6.4,4.8))
#ax = plt.axes(projection='3d')
ax = fig.add_subplot(111,projection='3d')
surf1 =ax.scatter(np.log10(binder[:,1]), binder[:,0], np.log10(binder[:,2]), c= np.log10(binder[:,2]), cmap='viridis', linewidth=0.5)
'''for k in range(0,len(self.x1)):
#Plotting SD bars
ax.plot([self.x1[k], self.x1[k]], [self.y1[k], self.y1[k]], [self.z1[k] + self.sd_z1[k], self.z1[k] - self.sd_z1[k]], marker="_", markerfacecolor='k', color='k')
'''
cbar1=fig.colorbar(surf1, shrink=0.75)
cbar1.ax.get_yaxis().labelpad = 12
cbar1.ax.set_ylabel(r"$log|P (S=\Delta s)|$", rotation=270)
ax.set_xlabel(r"$log_{10}|\Delta s|$")
ax.set_xlim(-0.1, 5)
ax.set_zlabel(r"$log_{10}|P (S=\Delta s)|$")
ax.set_zlim(-6.1, 0)
ax.set_ylabel("Occupancy rate (p)")
#plt.zscale('log'); plt.xscale('log')
#Plotting p_c plane.
x = np.linspace(-1,5.5,10)
z = np.linspace(-7,1,10)
X,Z = np.meshgrid(x,z)
Y= 0*X +0*Z + p_c
#ax.hold(True) #Preserve pre-plotted elements.
ax.plot_surface(X,Y,Z, alpha= 0.3, color='k', antialiased=True)
ax.text(5, p_c, -1, "$p_{c}(q)$", color='0.5')
'''p_clin = np.array([[0,p_c], [5,p_c]])
lines = LineCollection([p_clin],zorder=1000,color='0.65',lw=2)
ax.add_collection3d(lines, zs=-90)'''
ax.view_init(elev=36.0, azim=-52.0)
ax.legend()
ax.set_title(r"$log|P (S=\Delta s)|$ vs $log|\Delta s|$, L = %d, $R_{0,0}$ = %3.2f" %(L,crosc))
plt.savefig("Cumulative Scatter Plane P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
ax.view_init(elev=62.0, azim=-3.0)
plt.savefig("Cumulative Scatter Plane P(del(s)) vs del(s) Top Down --- Grid Size (G)_%d - CC_%3.2f.png" %(L,crosc), dpi=550)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def main_del_s_count():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
#col_P= np.zeros((a,1)); col_P = p
DP_freqs = np.insert(DP_freqs, 0, p, axis=1)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
'''DP_freqs = list(DP_freqs.items()) #Converting dictionary to list.
for j in range(0,len(DP_freqs)):
DP_freqs[j].append(p)'''
print(DP_freqs)
print("Number of del s counts: " + str(a))
binder.append([p, a])
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Bifurcation")==False):
os.mkdir("Bifurcation")
os.chdir("Bifurcation")
if(os.path.isdir("S Count")==False):
os.mkdir("S Count")
os.chdir("S Count")
binder= np.array(binder)
hurtlocker= pan.DataFrame(binder, columns= ["p", r"Number of unique $|\Delta s|$ observations"])
f = sea.scatterplot(data=hurtlocker, x="p" , y=r"Number of unique $|\Delta s|$ observations")#, marker="+")
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('Unique $|\Delta s|$ observations, Grid Size (G) = %d, Cross-Correlation = %3.2f' %( L, crosc))
#plt.yscale('log'); #plt.xscale('log')
#plt.ylim(1, 10**5)
plt.axvline(x= p_c, color='0.65')
plt.text(p_c+ 0.003,10**2,r'$p_{c}$',rotation=90, color ='0.65')
plt.savefig("S Count, Grid Size (G) = %d, CC = %3.2f.png" %(L, crosc), dpi=400)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def main_del_s_symmetry():
p_mask=[0.658, 0.666, 0.678, 0.689, 0.701, 0.728, 0.739, 0.743, 0.755, 0.773 ]
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
MastBind=[]; L=0
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( p not in p_mask):
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
#data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
print("Sorted del(s) PDF:")
print(DP_freqs)
#DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
'''Performing a log-mod transform
https://blogs.sas.com/content/iml/2014/07/14/log-transformation-of-pos-neg.html
https://juluribk.com/dealing-with-plotting-negative-zero-and-positive-values-in-log-scale.html
'''
DP_freqs[:,0] = np.sign(DP_freqs[:,0])*(np.log10(np.abs(DP_freqs[:,0])+1))
DP_freqs = np.insert(DP_freqs, 2, float(round(CC,2)), axis=1)
DP_freqs = np.insert(DP_freqs, 3, p, axis=1)
'''DP_freqs looks like:
|del(s), P(del(s)), CC, p|
'''
print("Final del(s) PDF:")
print(DP_freqs)
if(len(MastBind)== 0):
#Empty
MastBind = DP_freqs
else:
MastBind = np.concatenate((MastBind, DP_freqs), axis=0)
'''for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)'''
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("Symmetry")==False):
os.mkdir("Symmetry")
os.chdir("Symmetry")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$\Delta s$", r"$P (S = \Delta s)$", "Cross-Correlation", "p"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
f= sea.scatterplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$")#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); #plt.xscale('log')
#plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
#plt.xlim(-5, 5)
'''x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
perr = np.sqrt(np.diag(pcov))
print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *popt), 'm--', label=r'Fit: $ P (S \geq \Delta s) = %3.2f \times \Delta s^{(%4.3f \mp %4.3f)}\times e^{(%4.3f \mp %4.3f)\times \Delta s}$ ' % tukan )
plt.legend()'''
plt.savefig("Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
#break;
#Plotting cumulative results.
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("Symmetry")==False):
os.mkdir("Symmetry")
os.chdir("Symmetry")
if(os.path.isdir("Cum")==False):
os.mkdir("Cum")
os.chdir("Cum")
hurtlocker= pan.DataFrame(MastBind, columns= [r"$\Delta s$", r"$P (S = \Delta s)$", "Cross-Correlation", "p"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
#ax = fig.add_subplot(111)
f= sea.scatterplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", hue="Cross-Correlation")#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$\Delta s$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('p = %f, Grid Size (G) = %d' %(MastBind[0,3], L))
plt.yscale('log'); #plt.xscale('log')
#plt.xlim(1, 10**5)
plt.ylim(10**(-6.4), 10**(0.1))
plt.xlim(-5, 5)
plt.savefig("Alt Cum Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d.png" %(MastBind[0,3], L), dpi=400)
#plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
def main_bifurcation():
p_c = 0.725194
crosc = float(input("Enter a Cross-Correlation Value To Be Analysed (Choose Between 0.95, 0.9, 0.8, 0.75, 0.7 & 0.6):\t"))
#crosc =0.8
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
binder=[]; L=0;
for i in range(0,10):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
print('Gandu')
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01):
print(str(CC) + " shall be skipped.")
continue
if( p == 0.678):
print("Fuck You")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / a for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
a,b =DP_freqs.shape
split_data = DP_freqs[:,1] < 10**(-5.6)
DP_freqs = DP_freqs[split_data]
print("Half Done:")
print(DP_freqs)
split_data = DP_freqs[:,1] > 10**(-6)
DP_freqs_band = DP_freqs[split_data] #Stores the band of del(s) values whose probability lie between 10^(-5.85) and 10^(-5.85)
#col_P= np.zeros((a,1)); col_P = p
DP_freqs_band = np.insert(DP_freqs_band, 0, p, axis=1)
DP_freqs_band = DP_freqs_band[DP_freqs_band[:,1].argsort()]
#Sorting in increasing values of del(s)
print("Total number of points in given gap for p:\t"+str(p) +" is: \t" +str(len(DP_freqs_band[:,2])) +"\n")
print(DP_freqs_band)
'''DP_freqs looks like:
| p, del(s), P(del(s))|
'''
flag=0
for j in range(1, len(DP_freqs_band[:,2])-1):
if(abs(DP_freqs_band[j,1] -DP_freqs_band[j-1,2]) > 411 or abs(DP_freqs_band[j,1] -DP_freqs_band[j+1,2]) > 411):
# 10^(3.3) - 10^(3.2) = 410.369
binder.append([p,DP_freqs_band[j,1]])
flag=1
if(flag==0):
#No del(s) value satisfied the bandwidth demand.
#if()
binder.append([p,DP_freqs_band[-1,1]])
#Append the very last value
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Bifurcation")==False):
os.mkdir("Bifurcation")
os.chdir("Bifurcation")
if(os.path.isdir("%d" %(L))==False):
os.mkdir("%d" %(L))
os.chdir("%d" %(L))
binder= np.array(binder)
hurtlocker= pan.DataFrame(binder, columns= ["p", r"$|\Delta s|$ s.t. $P (\Delta s \geq 10^{-6})$"])
f = sea.scatterplot(data=hurtlocker, x="p" , y=r"$|\Delta s|$ s.t. $P (\Delta s \geq 10^{-6})$", marker="+")
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S \geq \Delta s)$", alpha=0.2, ax= ax) #, s=1)
f.set_title('Bifurcation Map, Grid Size (G) = %d, Cross-Correlation = %3.2f' %( L, crosc))
plt.yscale('log'); #plt.xscale('log')
plt.ylim(1, 10**5)
plt.axvline(x= p_c, color='0.65')
plt.text(p_c+ 0.003,10**1,r'$p_{c}$',rotation=90, color ='0.65')
plt.savefig("Bifurcation Map, Grid Size (G) = %d, CC = %3.2f.png" %(L, crosc), dpi=400)
plt.show()
plt.close()
os.chdir(r"..\..\..\..\..\analysis\Mass Action\DP")
def plot_fit_pdf():
twist =(-1.2912647288993737, -(1/37.72480211483688))
fandango = np.genfromtxt("PissingAbout15+16.csv", delimiter=",", comments='#', skip_header=1)
#Stores decay data of cross-correlation between frames as a function of p.
gaol={} #Stores truncated power law fit data.
gaol[0.60] =[]; gaol[0.70] =[]; gaol[0.75] =[];
gaol[0.80] =[]; gaol[0.90] =[]; gaol[0.95] =[];
L=0; crosc= 0.7
for i in range(0,1):
base_path = r"22Apret\Apres 256+512\256" + "\\" + str(i)
files = glob.glob(base_path + "**/**/*.csv", recursive=True)
for file in files:
if (file == base_path + r"\dump\15_16_KungF---U.csv"):
continue
if (os.path.getsize(file) > 4096):
#Keeping unwanted files out.
print(file)
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1, max_rows=3)
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
if( CC <= crosc - 0.01 or CC >= crosc + 0.01 or p != 0.66):
print(str(CC) + " " + str(p) + " shall be skipped.")
continue
data_temp= np.genfromtxt(file, delimiter=",", comments='#', skip_header=1)
'''
data_temp resembles:
| L, p, lag, #, s, s + del(s) |
'''
'''
p= data_temp[0,1]; L= int(data_temp[0,0]); CC= cross_cor(fandango, data_temp[0,2], L, p)
data_temp[:,5] -= data_temp[:,4]
data_temp[:,5] = np.abs(data_temp[:,5])
temp_freqs = dict(collections.Counter(data_temp[:,5]))
a,b = data_temp.shape
DP_freqs = {k: v / (a) for k, v in temp_freqs.items()}
DP_freqs = np.array(list(DP_freqs.items())) #Converting dictionary to numpy array.
#Sorting array in increasing order of del(s).
DP_freqs = DP_freqs[DP_freqs[:,0].argsort()]
print("Sorted del(s) PDF:")
print(DP_freqs)
os.chdir("../../../figures")
if(os.path.isdir("del_S")==False):
os.mkdir("del_S")
os.chdir("del_S")
if(os.path.isdir("DP")==False):
os.mkdir("DP")
os.chdir("DP")
if(os.path.isdir("Individual")==False):
os.mkdir("Individual")
os.chdir("Individual")
if(os.path.isdir("1-CDF")==False):
os.mkdir("1-CDF")
os.chdir("1-CDF")
if(os.path.isdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))==False):
os.mkdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
os.chdir("L_%d_p_%4.3f" %(int(data_temp[0,0]), data_temp[0,1]))
print("p:\t" +str(p) + " L:\t"+ str(L) + " CC:\t" +str(CC))
hurtlocker= pan.DataFrame(DP_freqs, columns= [r"$|\Delta s|$", r"$P (S = \Delta s)$"])
fig = plt.figure(figsize=(6.4,4.8))
#Overlaying two seaborn plots.
ax = fig.add_subplot(111)
sea.scatterplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$", ax= ax)#, alpha=0.5, s=2, ax= ax)
#sea.lineplot(data=hurtlocker, x=r"$|\Delta s|$" , y=r"$P (S = \Delta s)$", alpha=0.2, ax= ax) #, s=1)
ax.set_title('p = %f, Grid Size (G) = %d, Cross-Correlation = %3.2f' %(p, L, CC))
plt.yscale('log'); plt.xscale('log')
plt.xlim(1, 10**5)
plt.ylim(10**(-6.3), 10**(0.1))
x1 = np.transpose(DP_freqs[:,0])
x2 = np.transpose(DP_freqs[:,1])
#popt, pcov = curve_fit(trunc_pow_law, x1, x2, p0= np.asarray([1, -0.75, -0.0005]), maxfev=5000 )
#perr = np.sqrt(np.diag(pcov))
#print("SD of exponent:\t" +str(perr[1]) + " for p:\t" +str(p))
#tukan= (popt[0], popt[1], perr[1], popt[2], perr[2])
plt.plot(x1, trunc_pow_law(x1, *twist), color='darkslateblue', linestyle='--', label=r'Fit: $ P (S = \Delta s) = %3.2f \times \Delta s^{(%4.3f)}\times e^{(%6.5f)\times \Delta s}$ ' % tukan )
plt.ylim(10**(-6.4), 10**(0.1)); plt.xlim(1, 10**5)
plt.legend()
plt.savefig("Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
#Next, to convert PDF into 1 - CDF (P(S >= (DEL(S))))
DP_freqs[-2,1] += DP_freqs[-1,1]; #DP_freqs[-1,1] = 0
k= len(DP_freqs[:,1]) #Finding total number of del(s) elements
print("Total distinct del(s) samples:\t" +str(k))
for j in range(k-3, -1, -1):
#Iterate over the PDF function in reverse.
DP_freqs[j,1] += DP_freqs[j+1,1]
print("Sorted del(s) 1-CDF:")
print(DP_freqs)
plt.savefig("Even Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png" %(p,L,CC), dpi=400)
#plt.show()
plt.close()
comparison_tpl_exp = fit.distribution_compare('truncated_power_law','exponential',normalized_ratio=True)
comparison_tpl_streched_exp = fit.distribution_compare('truncated_power_law','stretched_exponential',normalized_ratio=True)
comparison_tpl_log_normal = fit.distribution_compare('truncated_power_law','lognormal',normalized_ratio=True)
comparison_tpl_pl = fit.distribution_compare('truncated_power_law','power_law',normalized_ratio=True)
print("LR (Power Law): ",comparison_tpl_pl[0]," p-value: ",comparison_tpl_pl[1])
print("LR (Exponential): ",comparison_tpl_exp[0]," p-value: ",comparison_tpl_exp[1])
print("LR (Log-Normal): ",comparison_tpl_log_normal[0]," p-value: ",comparison_tpl_log_normal[1])
print("LR (Stretched-Exponential): ",comparison_tpl_streched_exp[0]," p-value: ",comparison_tpl_streched_exp[1])
gaol[float(round(CC,2))].append([L, p, fit.xmin, fit.truncated_power_law.parameter1, 1/fit.truncated_power_law.parameter2])
os.chdir(r"..\..\..\..\..\..\analysis\Mass Action\DP")
'''
def cross_cor(grim_fandango, lag, L, p):
CC=0; k= 128/L
for t in range(0, len(grim_fandango[:,0])):
if grim_fandango[t,0] == p:
CC = grim_fandango[t,1]+ grim_fandango[t,3]*(math.exp(lag*grim_fandango[t,5]*k*k)); break;
#Calculating cross-correlation b/w frames.
print("CC:\t"+ str(CC))
return CC;
main_ind()
|
[
"numpy.log10",
"powerlaw.Fit",
"matplotlib.pyplot.ylabel",
"numpy.array",
"seaborn.scatterplot",
"math.exp",
"numpy.genfromtxt",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.linspace",
"os.path.isdir",
"os.mkdir",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.meshgrid",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yscale",
"glob.glob",
"numpy.abs",
"os.path.getsize",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axes",
"numpy.sign",
"numpy.savetxt",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.insert",
"matplotlib.pyplot.text",
"numpy.power",
"os.chdir",
"collections.Counter",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.xscale"
] |
[((627, 713), 'numpy.genfromtxt', 'np.genfromtxt', (['"""PissingAbout15+16.csv"""'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "('PissingAbout15+16.csv', delimiter=',', comments='#',\n skip_header=1)\n", (640, 713), True, 'import numpy as np\n'), ((7089, 7175), 'numpy.genfromtxt', 'np.genfromtxt', (['"""PissingAbout15+16.csv"""'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "('PissingAbout15+16.csv', delimiter=',', comments='#',\n skip_header=1)\n", (7102, 7175), True, 'import numpy as np\n'), ((14506, 14523), 'os.chdir', 'os.chdir', (['"""del_S"""'], {}), "('del_S')\n", (14514, 14523), False, 'import os\n'), ((14597, 14615), 'os.chdir', 'os.chdir', (["('%d' % L)"], {}), "('%d' % L)\n", (14605, 14615), False, 'import os\n'), ((14835, 14853), 'os.chdir', 'os.chdir', (['"""../../"""'], {}), "('../../')\n", (14843, 14853), False, 'import os\n'), ((15071, 15157), 'numpy.genfromtxt', 'np.genfromtxt', (['"""PissingAbout15+16.csv"""'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "('PissingAbout15+16.csv', delimiter=',', comments='#',\n skip_header=1)\n", (15084, 15157), True, 'import numpy as np\n'), ((17627, 17655), 'os.chdir', 'os.chdir', (['"""../../../figures"""'], {}), "('../../../figures')\n", (17635, 17655), False, 'import os\n'), ((17725, 17742), 'os.chdir', 'os.chdir', (['"""del_S"""'], {}), "('del_S')\n", (17733, 17742), False, 'import os\n'), ((17806, 17820), 'os.chdir', 'os.chdir', (['"""DP"""'], {}), "('DP')\n", (17814, 17820), False, 'import os\n'), ((17884, 17898), 'os.chdir', 'os.chdir', (['"""3D"""'], {}), "('3D')\n", (17892, 17898), False, 'import os\n'), ((17972, 17990), 'os.chdir', 'os.chdir', (["('%d' % L)"], {}), "('%d' % L)\n", (17980, 17990), False, 'import os\n'), ((18021, 18037), 'numpy.array', 'np.array', (['binder'], {}), '(binder)\n', (18029, 18037), True, 'import numpy as np\n'), ((18051, 18063), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18061, 18063), True, 'import matplotlib.pyplot as plt\n'), ((18073, 18098), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (18081, 18098), True, 'import matplotlib.pyplot as plt\n'), ((19094, 19216), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Cumulative Scatter P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png'\n % (L, crosc))"], {'dpi': '(550)'}), "(\n 'Cumulative Scatter P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png'\n % (L, crosc), dpi=550)\n", (19105, 19216), True, 'import matplotlib.pyplot as plt\n'), ((19209, 19219), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19217, 19219), True, 'import matplotlib.pyplot as plt\n'), ((19224, 19235), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19233, 19235), True, 'import matplotlib.pyplot as plt\n'), ((19290, 19320), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 4.8)'}), '(figsize=(6.4, 4.8))\n', (19300, 19320), True, 'import matplotlib.pyplot as plt\n'), ((20201, 20225), 'numpy.linspace', 'np.linspace', (['(-1)', '(5.5)', '(10)'], {}), '(-1, 5.5, 10)\n', (20212, 20225), True, 'import numpy as np\n'), ((20232, 20254), 'numpy.linspace', 'np.linspace', (['(-7)', '(1)', '(10)'], {}), '(-7, 1, 10)\n', (20243, 20254), True, 'import numpy as np\n'), ((20263, 20280), 'numpy.meshgrid', 'np.meshgrid', (['x', 'z'], {}), '(x, z)\n', (20274, 20280), True, 'import numpy as np\n'), ((20801, 20929), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Cumulative Scatter Plane P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png'\n % (L, crosc))"], {'dpi': '(550)'}), "(\n 'Cumulative Scatter Plane P(del(s)) vs del(s) --- Grid Size (G)_%d - CC_%3.2f.png'\n % (L, crosc), dpi=550)\n", (20812, 20929), True, 'import matplotlib.pyplot as plt\n'), ((20966, 21102), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Cumulative Scatter Plane P(del(s)) vs del(s) Top Down --- Grid Size (G)_%d - CC_%3.2f.png'\n % (L, crosc))"], {'dpi': '(550)'}), "(\n 'Cumulative Scatter Plane P(del(s)) vs del(s) Top Down --- Grid Size (G)_%d - CC_%3.2f.png'\n % (L, crosc), dpi=550)\n", (20977, 21102), True, 'import matplotlib.pyplot as plt\n'), ((21095, 21105), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21103, 21105), True, 'import matplotlib.pyplot as plt\n'), ((21110, 21121), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (21119, 21121), True, 'import matplotlib.pyplot as plt\n'), ((21131, 21188), 'os.chdir', 'os.chdir', (['"""..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP"""'], {}), "('..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP')\n", (21139, 21188), False, 'import os\n'), ((21393, 21479), 'numpy.genfromtxt', 'np.genfromtxt', (['"""PissingAbout15+16.csv"""'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "('PissingAbout15+16.csv', delimiter=',', comments='#',\n skip_header=1)\n", (21406, 21479), True, 'import numpy as np\n'), ((23828, 23856), 'os.chdir', 'os.chdir', (['"""../../../figures"""'], {}), "('../../../figures')\n", (23836, 23856), False, 'import os\n'), ((23926, 23943), 'os.chdir', 'os.chdir', (['"""del_S"""'], {}), "('del_S')\n", (23934, 23943), False, 'import os\n'), ((24007, 24021), 'os.chdir', 'os.chdir', (['"""DP"""'], {}), "('DP')\n", (24015, 24021), False, 'import os\n'), ((24103, 24126), 'os.chdir', 'os.chdir', (['"""Bifurcation"""'], {}), "('Bifurcation')\n", (24111, 24126), False, 'import os\n'), ((24200, 24219), 'os.chdir', 'os.chdir', (['"""S Count"""'], {}), "('S Count')\n", (24208, 24219), False, 'import os\n'), ((24249, 24265), 'numpy.array', 'np.array', (['binder'], {}), '(binder)\n', (24257, 24265), True, 'import numpy as np\n'), ((24287, 24374), 'pandas.DataFrame', 'pan.DataFrame', (['binder'], {'columns': "['p', 'Number of unique $|\\\\Delta s|$ observations']"}), "(binder, columns=['p',\n 'Number of unique $|\\\\Delta s|$ observations'])\n", (24300, 24374), True, 'import pandas as pan\n'), ((24380, 24473), 'seaborn.scatterplot', 'sea.scatterplot', ([], {'data': 'hurtlocker', 'x': '"""p"""', 'y': '"""Number of unique $|\\\\Delta s|$ observations"""'}), "(data=hurtlocker, x='p', y=\n 'Number of unique $|\\\\Delta s|$ observations')\n", (24395, 24473), True, 'import seaborn as sea\n'), ((24777, 24809), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'p_c', 'color': '"""0.65"""'}), "(x=p_c, color='0.65')\n", (24788, 24809), True, 'import matplotlib.pyplot as plt\n'), ((24815, 24883), 'matplotlib.pyplot.text', 'plt.text', (['(p_c + 0.003)', '(10 ** 2)', '"""$p_{c}$"""'], {'rotation': '(90)', 'color': '"""0.65"""'}), "(p_c + 0.003, 10 ** 2, '$p_{c}$', rotation=90, color='0.65')\n", (24823, 24883), True, 'import matplotlib.pyplot as plt\n'), ((24889, 24974), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('S Count, Grid Size (G) = %d, CC = %3.2f.png' % (L, crosc))"], {'dpi': '(400)'}), "('S Count, Grid Size (G) = %d, CC = %3.2f.png' % (L, crosc), dpi=400\n )\n", (24900, 24974), True, 'import matplotlib.pyplot as plt\n'), ((24973, 24983), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24981, 24983), True, 'import matplotlib.pyplot as plt\n'), ((24988, 24999), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24997, 24999), True, 'import matplotlib.pyplot as plt\n'), ((25009, 25066), 'os.chdir', 'os.chdir', (['"""..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP"""'], {}), "('..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP')\n", (25017, 25066), False, 'import os\n'), ((25206, 25292), 'numpy.genfromtxt', 'np.genfromtxt', (['"""PissingAbout15+16.csv"""'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "('PissingAbout15+16.csv', delimiter=',', comments='#',\n skip_header=1)\n", (25219, 25292), True, 'import numpy as np\n'), ((33182, 33268), 'numpy.genfromtxt', 'np.genfromtxt', (['"""PissingAbout15+16.csv"""'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "('PissingAbout15+16.csv', delimiter=',', comments='#',\n skip_header=1)\n", (33195, 33268), True, 'import numpy as np\n'), ((36629, 36657), 'os.chdir', 'os.chdir', (['"""../../../figures"""'], {}), "('../../../figures')\n", (36637, 36657), False, 'import os\n'), ((36727, 36744), 'os.chdir', 'os.chdir', (['"""del_S"""'], {}), "('del_S')\n", (36735, 36744), False, 'import os\n'), ((36808, 36822), 'os.chdir', 'os.chdir', (['"""DP"""'], {}), "('DP')\n", (36816, 36822), False, 'import os\n'), ((36904, 36927), 'os.chdir', 'os.chdir', (['"""Bifurcation"""'], {}), "('Bifurcation')\n", (36912, 36927), False, 'import os\n'), ((37001, 37019), 'os.chdir', 'os.chdir', (["('%d' % L)"], {}), "('%d' % L)\n", (37009, 37019), False, 'import os\n'), ((37050, 37066), 'numpy.array', 'np.array', (['binder'], {}), '(binder)\n', (37058, 37066), True, 'import numpy as np\n'), ((37088, 37180), 'pandas.DataFrame', 'pan.DataFrame', (['binder'], {'columns': "['p', '$|\\\\Delta s|$ s.t. $P (\\\\Delta s \\\\geq 10^{-6})$']"}), "(binder, columns=['p',\n '$|\\\\Delta s|$ s.t. $P (\\\\Delta s \\\\geq 10^{-6})$'])\n", (37101, 37180), True, 'import pandas as pan\n'), ((37184, 37294), 'seaborn.scatterplot', 'sea.scatterplot', ([], {'data': 'hurtlocker', 'x': '"""p"""', 'y': '"""$|\\\\Delta s|$ s.t. $P (\\\\Delta s \\\\geq 10^{-6})$"""', 'marker': '"""+"""'}), "(data=hurtlocker, x='p', y=\n '$|\\\\Delta s|$ s.t. $P (\\\\Delta s \\\\geq 10^{-6})$', marker='+')\n", (37199, 37294), True, 'import seaborn as sea\n'), ((37498, 37515), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (37508, 37515), True, 'import matplotlib.pyplot as plt\n'), ((37540, 37560), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1)', '(10 ** 5)'], {}), '(1, 10 ** 5)\n', (37548, 37560), True, 'import matplotlib.pyplot as plt\n'), ((37563, 37595), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'p_c', 'color': '"""0.65"""'}), "(x=p_c, color='0.65')\n", (37574, 37595), True, 'import matplotlib.pyplot as plt\n'), ((37601, 37669), 'matplotlib.pyplot.text', 'plt.text', (['(p_c + 0.003)', '(10 ** 1)', '"""$p_{c}$"""'], {'rotation': '(90)', 'color': '"""0.65"""'}), "(p_c + 0.003, 10 ** 1, '$p_{c}$', rotation=90, color='0.65')\n", (37609, 37669), True, 'import matplotlib.pyplot as plt\n'), ((37675, 37767), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Bifurcation Map, Grid Size (G) = %d, CC = %3.2f.png' % (L, crosc))"], {'dpi': '(400)'}), "('Bifurcation Map, Grid Size (G) = %d, CC = %3.2f.png' % (L,\n crosc), dpi=400)\n", (37686, 37767), True, 'import matplotlib.pyplot as plt\n'), ((37767, 37777), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37775, 37777), True, 'import matplotlib.pyplot as plt\n'), ((37782, 37793), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (37791, 37793), True, 'import matplotlib.pyplot as plt\n'), ((37803, 37860), 'os.chdir', 'os.chdir', (['"""..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP"""'], {}), "('..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP')\n", (37811, 37860), False, 'import os\n'), ((37967, 38053), 'numpy.genfromtxt', 'np.genfromtxt', (['"""PissingAbout15+16.csv"""'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "('PissingAbout15+16.csv', delimiter=',', comments='#',\n skip_header=1)\n", (37980, 38053), True, 'import numpy as np\n'), ((445, 462), 'numpy.power', 'np.power', (['x', 'expo'], {}), '(x, expo)\n', (453, 462), True, 'import numpy as np\n'), ((568, 590), 'numpy.exp', 'np.exp', (['(trunc_expo * x)'], {}), '(trunc_expo * x)\n', (574, 590), True, 'import numpy as np\n'), ((1058, 1110), 'glob.glob', 'glob.glob', (["(base_path + '**/**/*.csv')"], {'recursive': '(True)'}), "(base_path + '**/**/*.csv', recursive=True)\n", (1067, 1110), False, 'import glob\n'), ((7533, 7585), 'glob.glob', 'glob.glob', (["(base_path + '**/**/*.csv')"], {'recursive': '(True)'}), "(base_path + '**/**/*.csv', recursive=True)\n", (7542, 7585), False, 'import glob\n'), ((14444, 14466), 'os.path.isdir', 'os.path.isdir', (['"""del_S"""'], {}), "('del_S')\n", (14457, 14466), False, 'import os\n'), ((14484, 14501), 'os.mkdir', 'os.mkdir', (['"""del_S"""'], {}), "('del_S')\n", (14492, 14501), False, 'import os\n'), ((14531, 14554), 'os.path.isdir', 'os.path.isdir', (["('%d' % L)"], {}), "('%d' % L)\n", (14544, 14554), False, 'import os\n'), ((14573, 14591), 'os.mkdir', 'os.mkdir', (["('%d' % L)"], {}), "('%d' % L)\n", (14581, 14591), False, 'import os\n'), ((14725, 14833), 'numpy.savetxt', 'np.savetxt', (["('Nu_Pow_0_6_BestFitCDF_CC_%3.2F.csv' % k)", 'gaol[k]'], {'delimiter': '""","""', 'header': 'heado', 'comments': '"""#"""'}), "('Nu_Pow_0_6_BestFitCDF_CC_%3.2F.csv' % k, gaol[k], delimiter=',',\n header=heado, comments='#')\n", (14735, 14833), True, 'import numpy as np\n'), ((15360, 15412), 'glob.glob', 'glob.glob', (["(base_path + '**/**/*.csv')"], {'recursive': '(True)'}), "(base_path + '**/**/*.csv', recursive=True)\n", (15369, 15412), False, 'import glob\n'), ((17663, 17685), 'os.path.isdir', 'os.path.isdir', (['"""del_S"""'], {}), "('del_S')\n", (17676, 17685), False, 'import os\n'), ((17703, 17720), 'os.mkdir', 'os.mkdir', (['"""del_S"""'], {}), "('del_S')\n", (17711, 17720), False, 'import os\n'), ((17750, 17769), 'os.path.isdir', 'os.path.isdir', (['"""DP"""'], {}), "('DP')\n", (17763, 17769), False, 'import os\n'), ((17787, 17801), 'os.mkdir', 'os.mkdir', (['"""DP"""'], {}), "('DP')\n", (17795, 17801), False, 'import os\n'), ((17828, 17847), 'os.path.isdir', 'os.path.isdir', (['"""3D"""'], {}), "('3D')\n", (17841, 17847), False, 'import os\n'), ((17865, 17879), 'os.mkdir', 'os.mkdir', (['"""3D"""'], {}), "('3D')\n", (17873, 17879), False, 'import os\n'), ((17906, 17929), 'os.path.isdir', 'os.path.isdir', (["('%d' % L)"], {}), "('%d' % L)\n", (17919, 17929), False, 'import os\n'), ((17948, 17966), 'os.mkdir', 'os.mkdir', (["('%d' % L)"], {}), "('%d' % L)\n", (17956, 17966), False, 'import os\n'), ((18485, 18507), 'numpy.log10', 'np.log10', (['binder[:, 1]'], {}), '(binder[:, 1])\n', (18493, 18507), True, 'import numpy as np\n'), ((18521, 18543), 'numpy.log10', 'np.log10', (['binder[:, 2]'], {}), '(binder[:, 2])\n', (18529, 18543), True, 'import numpy as np\n'), ((19424, 19446), 'numpy.log10', 'np.log10', (['binder[:, 1]'], {}), '(binder[:, 1])\n', (19432, 19446), True, 'import numpy as np\n'), ((19460, 19482), 'numpy.log10', 'np.log10', (['binder[:, 2]'], {}), '(binder[:, 2])\n', (19468, 19482), True, 'import numpy as np\n'), ((21682, 21734), 'glob.glob', 'glob.glob', (["(base_path + '**/**/*.csv')"], {'recursive': '(True)'}), "(base_path + '**/**/*.csv', recursive=True)\n", (21691, 21734), False, 'import glob\n'), ((23864, 23886), 'os.path.isdir', 'os.path.isdir', (['"""del_S"""'], {}), "('del_S')\n", (23877, 23886), False, 'import os\n'), ((23904, 23921), 'os.mkdir', 'os.mkdir', (['"""del_S"""'], {}), "('del_S')\n", (23912, 23921), False, 'import os\n'), ((23951, 23970), 'os.path.isdir', 'os.path.isdir', (['"""DP"""'], {}), "('DP')\n", (23964, 23970), False, 'import os\n'), ((23988, 24002), 'os.mkdir', 'os.mkdir', (['"""DP"""'], {}), "('DP')\n", (23996, 24002), False, 'import os\n'), ((24029, 24057), 'os.path.isdir', 'os.path.isdir', (['"""Bifurcation"""'], {}), "('Bifurcation')\n", (24042, 24057), False, 'import os\n'), ((24075, 24098), 'os.mkdir', 'os.mkdir', (['"""Bifurcation"""'], {}), "('Bifurcation')\n", (24083, 24098), False, 'import os\n'), ((24134, 24158), 'os.path.isdir', 'os.path.isdir', (['"""S Count"""'], {}), "('S Count')\n", (24147, 24158), False, 'import os\n'), ((24176, 24195), 'os.mkdir', 'os.mkdir', (['"""S Count"""'], {}), "('S Count')\n", (24184, 24195), False, 'import os\n'), ((25475, 25527), 'glob.glob', 'glob.glob', (["(base_path + '**/**/*.csv')"], {'recursive': '(True)'}), "(base_path + '**/**/*.csv', recursive=True)\n", (25484, 25527), False, 'import glob\n'), ((31456, 31484), 'os.chdir', 'os.chdir', (['"""../../../figures"""'], {}), "('../../../figures')\n", (31464, 31484), False, 'import os\n'), ((31575, 31592), 'os.chdir', 'os.chdir', (['"""del_S"""'], {}), "('del_S')\n", (31583, 31592), False, 'import os\n'), ((31668, 31682), 'os.chdir', 'os.chdir', (['"""DP"""'], {}), "('DP')\n", (31676, 31682), False, 'import os\n'), ((31774, 31796), 'os.chdir', 'os.chdir', (['"""Individual"""'], {}), "('Individual')\n", (31782, 31796), False, 'import os\n'), ((31884, 31904), 'os.chdir', 'os.chdir', (['"""Symmetry"""'], {}), "('Symmetry')\n", (31892, 31904), False, 'import os\n'), ((31982, 31997), 'os.chdir', 'os.chdir', (['"""Cum"""'], {}), "('Cum')\n", (31990, 31997), False, 'import os\n'), ((32027, 32128), 'pandas.DataFrame', 'pan.DataFrame', (['MastBind'], {'columns': "['$\\\\Delta s$', '$P (S = \\\\Delta s)$', 'Cross-Correlation', 'p']"}), "(MastBind, columns=['$\\\\Delta s$', '$P (S = \\\\Delta s)$',\n 'Cross-Correlation', 'p'])\n", (32040, 32128), True, 'import pandas as pan\n'), ((32140, 32170), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 4.8)'}), '(figsize=(6.4, 4.8))\n', (32150, 32170), True, 'import matplotlib.pyplot as plt\n'), ((32272, 32375), 'seaborn.scatterplot', 'sea.scatterplot', ([], {'data': 'hurtlocker', 'x': '"""$\\\\Delta s$"""', 'y': '"""$P (S = \\\\Delta s)$"""', 'hue': '"""Cross-Correlation"""'}), "(data=hurtlocker, x='$\\\\Delta s$', y='$P (S = \\\\Delta s)$',\n hue='Cross-Correlation')\n", (32287, 32375), True, 'import seaborn as sea\n'), ((32586, 32603), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (32596, 32603), True, 'import matplotlib.pyplot as plt\n'), ((32660, 32691), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(10 ** -6.4)', '(10 ** 0.1)'], {}), '(10 ** -6.4, 10 ** 0.1)\n', (32668, 32691), True, 'import matplotlib.pyplot as plt\n'), ((32700, 32715), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-5)', '(5)'], {}), '(-5, 5)\n', (32708, 32715), True, 'import matplotlib.pyplot as plt\n'), ((32733, 32859), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Alt Cum Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d.png' %\n (MastBind[0, 3], L))"], {'dpi': '(400)'}), "(\n 'Alt Cum Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d.png' %\n (MastBind[0, 3], L), dpi=400)\n", (32744, 32859), True, 'import matplotlib.pyplot as plt\n'), ((32877, 32888), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (32886, 32888), True, 'import matplotlib.pyplot as plt\n'), ((32897, 32958), 'os.chdir', 'os.chdir', (['"""..\\\\..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP"""'], {}), "('..\\\\..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP')\n", (32905, 32958), False, 'import os\n'), ((33471, 33523), 'glob.glob', 'glob.glob', (["(base_path + '**/**/*.csv')"], {'recursive': '(True)'}), "(base_path + '**/**/*.csv', recursive=True)\n", (33480, 33523), False, 'import glob\n'), ((36665, 36687), 'os.path.isdir', 'os.path.isdir', (['"""del_S"""'], {}), "('del_S')\n", (36678, 36687), False, 'import os\n'), ((36705, 36722), 'os.mkdir', 'os.mkdir', (['"""del_S"""'], {}), "('del_S')\n", (36713, 36722), False, 'import os\n'), ((36752, 36771), 'os.path.isdir', 'os.path.isdir', (['"""DP"""'], {}), "('DP')\n", (36765, 36771), False, 'import os\n'), ((36789, 36803), 'os.mkdir', 'os.mkdir', (['"""DP"""'], {}), "('DP')\n", (36797, 36803), False, 'import os\n'), ((36830, 36858), 'os.path.isdir', 'os.path.isdir', (['"""Bifurcation"""'], {}), "('Bifurcation')\n", (36843, 36858), False, 'import os\n'), ((36876, 36899), 'os.mkdir', 'os.mkdir', (['"""Bifurcation"""'], {}), "('Bifurcation')\n", (36884, 36899), False, 'import os\n'), ((36935, 36958), 'os.path.isdir', 'os.path.isdir', (["('%d' % L)"], {}), "('%d' % L)\n", (36948, 36958), False, 'import os\n'), ((36977, 36995), 'os.mkdir', 'os.mkdir', (["('%d' % L)"], {}), "('%d' % L)\n", (36985, 36995), False, 'import os\n'), ((38410, 38462), 'glob.glob', 'glob.glob', (["(base_path + '**/**/*.csv')"], {'recursive': '(True)'}), "(base_path + '**/**/*.csv', recursive=True)\n", (38419, 38462), False, 'import glob\n'), ((549, 566), 'numpy.power', 'np.power', (['x', 'expo'], {}), '(x, expo)\n', (557, 566), True, 'import numpy as np\n'), ((18547, 18569), 'numpy.log10', 'np.log10', (['binder[:, 2]'], {}), '(binder[:, 2])\n', (18555, 18569), True, 'import numpy as np\n'), ((19486, 19508), 'numpy.log10', 'np.log10', (['binder[:, 2]'], {}), '(binder[:, 2])\n', (19494, 19508), True, 'import numpy as np\n'), ((31505, 31527), 'os.path.isdir', 'os.path.isdir', (['"""del_S"""'], {}), "('del_S')\n", (31518, 31527), False, 'import os\n'), ((31549, 31566), 'os.mkdir', 'os.mkdir', (['"""del_S"""'], {}), "('del_S')\n", (31557, 31566), False, 'import os\n'), ((31604, 31623), 'os.path.isdir', 'os.path.isdir', (['"""DP"""'], {}), "('DP')\n", (31617, 31623), False, 'import os\n'), ((31645, 31659), 'os.mkdir', 'os.mkdir', (['"""DP"""'], {}), "('DP')\n", (31653, 31659), False, 'import os\n'), ((31694, 31721), 'os.path.isdir', 'os.path.isdir', (['"""Individual"""'], {}), "('Individual')\n", (31707, 31721), False, 'import os\n'), ((31743, 31765), 'os.mkdir', 'os.mkdir', (['"""Individual"""'], {}), "('Individual')\n", (31751, 31765), False, 'import os\n'), ((31808, 31833), 'os.path.isdir', 'os.path.isdir', (['"""Symmetry"""'], {}), "('Symmetry')\n", (31821, 31833), False, 'import os\n'), ((31855, 31875), 'os.mkdir', 'os.mkdir', (['"""Symmetry"""'], {}), "('Symmetry')\n", (31863, 31875), False, 'import os\n'), ((31916, 31936), 'os.path.isdir', 'os.path.isdir', (['"""Cum"""'], {}), "('Cum')\n", (31929, 31936), False, 'import os\n'), ((31958, 31973), 'os.mkdir', 'os.mkdir', (['"""Cum"""'], {}), "('Cum')\n", (31966, 31973), False, 'import os\n'), ((1246, 1267), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (1261, 1267), False, 'import os\n'), ((1394, 1457), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "(file, delimiter=',', comments='#', skip_header=1)\n", (1407, 1457), True, 'import numpy as np\n'), ((1953, 1976), 'numpy.abs', 'np.abs', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (1959, 1976), True, 'import numpy as np\n'), ((3173, 3201), 'os.chdir', 'os.chdir', (['"""../../../figures"""'], {}), "('../../../figures')\n", (3181, 3201), False, 'import os\n'), ((3320, 3337), 'os.chdir', 'os.chdir', (['"""del_S"""'], {}), "('del_S')\n", (3328, 3337), False, 'import os\n'), ((3437, 3451), 'os.chdir', 'os.chdir', (['"""DP"""'], {}), "('DP')\n", (3445, 3451), False, 'import os\n'), ((3567, 3589), 'os.chdir', 'os.chdir', (['"""Individual"""'], {}), "('Individual')\n", (3575, 3589), False, 'import os\n'), ((4225, 4298), 'pandas.DataFrame', 'pan.DataFrame', (['DP_freqs'], {'columns': "['$|\\\\Delta s|$', '$P (S = \\\\Delta s)$']"}), "(DP_freqs, columns=['$|\\\\Delta s|$', '$P (S = \\\\Delta s)$'])\n", (4238, 4298), True, 'import pandas as pan\n'), ((4322, 4352), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 4.8)'}), '(figsize=(6.4, 4.8))\n', (4332, 4352), True, 'import matplotlib.pyplot as plt\n'), ((4372, 4448), 'seaborn.scatterplot', 'sea.scatterplot', ([], {'data': 'hurtlocker', 'x': '"""$|\\\\Delta s|$"""', 'y': '"""$P (S = \\\\Delta s)$"""'}), "(data=hurtlocker, x='$|\\\\Delta s|$', y='$P (S = \\\\Delta s)$')\n", (4387, 4448), True, 'import seaborn as sea\n'), ((5030, 5047), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5040, 5047), True, 'import matplotlib.pyplot as plt\n'), ((5049, 5066), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (5059, 5066), True, 'import matplotlib.pyplot as plt\n'), ((5083, 5103), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(10 ** 5)'], {}), '(1, 10 ** 5)\n', (5091, 5103), True, 'import matplotlib.pyplot as plt\n'), ((5118, 5149), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(10 ** -6.4)', '(10 ** 0.1)'], {}), '(10 ** -6.4, 10 ** 0.1)\n', (5126, 5149), True, 'import matplotlib.pyplot as plt\n'), ((5183, 5293), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('0P(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png' % (p, L,\n CC))"], {'dpi': '(400)'}), "(\n '0P(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png' % (p,\n L, CC), dpi=400)\n", (5194, 5293), True, 'import matplotlib.pyplot as plt\n'), ((5326, 5337), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5335, 5337), True, 'import matplotlib.pyplot as plt\n'), ((6510, 6567), 'os.chdir', 'os.chdir', (['"""..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP"""'], {}), "('..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP')\n", (6518, 6567), False, 'import os\n'), ((7721, 7742), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (7736, 7742), False, 'import os\n'), ((7869, 7944), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)', 'max_rows': '(3)'}), "(file, delimiter=',', comments='#', skip_header=1, max_rows=3)\n", (7882, 7944), True, 'import numpy as np\n'), ((8264, 8327), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "(file, delimiter=',', comments='#', skip_header=1)\n", (8277, 8327), True, 'import numpy as np\n'), ((8695, 8718), 'numpy.abs', 'np.abs', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (8701, 8718), True, 'import numpy as np\n'), ((8757, 8826), 'powerlaw.Fit', 'powerlaw.Fit', (['data_temp[:, 5]'], {'discrete': '(True)', 'estimate_discrete': '(False)'}), '(data_temp[:, 5], discrete=True, estimate_discrete=False)\n', (8769, 8826), False, 'import powerlaw\n'), ((9941, 9963), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(10 ** 5.3)'], {}), '(1, 10 ** 5.3)\n', (9949, 9963), True, 'import matplotlib.pyplot as plt\n'), ((9978, 10005), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$|\\\\Delta s|$"""'], {}), "('$|\\\\Delta s|$')\n", (9988, 10005), True, 'import matplotlib.pyplot as plt\n'), ((10022, 10059), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P (S \\\\geq \\\\Delta s)$"""'], {}), "('$P (S \\\\geq \\\\Delta s)$')\n", (10032, 10059), True, 'import matplotlib.pyplot as plt\n'), ((10075, 10087), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10085, 10087), True, 'import matplotlib.pyplot as plt\n'), ((10121, 10149), 'os.chdir', 'os.chdir', (['"""../../../figures"""'], {}), "('../../../figures')\n", (10129, 10149), False, 'import os\n'), ((10268, 10285), 'os.chdir', 'os.chdir', (['"""del_S"""'], {}), "('del_S')\n", (10276, 10285), False, 'import os\n'), ((10385, 10399), 'os.chdir', 'os.chdir', (['"""DP"""'], {}), "('DP')\n", (10393, 10399), False, 'import os\n'), ((10515, 10537), 'os.chdir', 'os.chdir', (['"""Individual"""'], {}), "('Individual')\n", (10523, 10537), False, 'import os\n'), ((10643, 10660), 'os.chdir', 'os.chdir', (['"""1-CDF"""'], {}), "('1-CDF')\n", (10651, 10660), False, 'import os\n'), ((10955, 11081), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png'\n % (p, L, CC))"], {'dpi': '(400)'}), "(\n 'Better Fit 1- CDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png'\n % (p, L, CC), dpi=400)\n", (10966, 11081), True, 'import matplotlib.pyplot as plt\n'), ((11113, 11124), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11122, 11124), True, 'import matplotlib.pyplot as plt\n'), ((11158, 11176), 'os.chdir', 'os.chdir', (['"""../../"""'], {}), "('../../')\n", (11166, 11176), False, 'import os\n'), ((12219, 12241), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(1)', '(10 ** 5.3)'], {}), '(1, 10 ** 5.3)\n', (12227, 12241), True, 'import matplotlib.pyplot as plt\n'), ((12256, 12283), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$|\\\\Delta s|$"""'], {}), "('$|\\\\Delta s|$')\n", (12266, 12283), True, 'import matplotlib.pyplot as plt\n'), ((12300, 12333), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P (S = \\\\Delta s)$"""'], {}), "('$P (S = \\\\Delta s)$')\n", (12310, 12333), True, 'import matplotlib.pyplot as plt\n'), ((12350, 12362), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12360, 12362), True, 'import matplotlib.pyplot as plt\n'), ((12380, 12503), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Better Fit PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png'\n % (p, L, CC))"], {'dpi': '(400)'}), "(\n 'Better Fit PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png'\n % (p, L, CC), dpi=400)\n", (12391, 12503), True, 'import matplotlib.pyplot as plt\n'), ((12535, 12546), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12544, 12546), True, 'import matplotlib.pyplot as plt\n'), ((14368, 14425), 'os.chdir', 'os.chdir', (['"""..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP"""'], {}), "('..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP')\n", (14376, 14425), False, 'import os\n'), ((15579, 15600), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (15594, 15600), False, 'import os\n'), ((15727, 15802), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)', 'max_rows': '(3)'}), "(file, delimiter=',', comments='#', skip_header=1, max_rows=3)\n", (15740, 15802), True, 'import numpy as np\n'), ((16237, 16300), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "(file, delimiter=',', comments='#', skip_header=1)\n", (16250, 16300), True, 'import numpy as np\n'), ((16546, 16569), 'numpy.abs', 'np.abs', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (16552, 16569), True, 'import numpy as np\n'), ((16979, 17012), 'numpy.insert', 'np.insert', (['DP_freqs', '(0)', 'p'], {'axis': '(1)'}), '(DP_freqs, 0, p, axis=1)\n', (16988, 17012), True, 'import numpy as np\n'), ((21901, 21922), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (21916, 21922), False, 'import os\n'), ((22049, 22124), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)', 'max_rows': '(3)'}), "(file, delimiter=',', comments='#', skip_header=1, max_rows=3)\n", (22062, 22124), True, 'import numpy as np\n'), ((22559, 22622), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "(file, delimiter=',', comments='#', skip_header=1)\n", (22572, 22622), True, 'import numpy as np\n'), ((22868, 22891), 'numpy.abs', 'np.abs', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (22874, 22891), True, 'import numpy as np\n'), ((23301, 23334), 'numpy.insert', 'np.insert', (['DP_freqs', '(0)', 'p'], {'axis': '(1)'}), '(DP_freqs, 0, p, axis=1)\n', (23310, 23334), True, 'import numpy as np\n'), ((25688, 25709), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (25703, 25709), False, 'import os\n'), ((25836, 25911), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)', 'max_rows': '(3)'}), "(file, delimiter=',', comments='#', skip_header=1, max_rows=3)\n", (25849, 25911), True, 'import numpy as np\n'), ((26179, 26242), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "(file, delimiter=',', comments='#', skip_header=1)\n", (26192, 26242), True, 'import numpy as np\n'), ((27904, 27937), 'numpy.insert', 'np.insert', (['DP_freqs', '(3)', 'p'], {'axis': '(1)'}), '(DP_freqs, 3, p, axis=1)\n', (27913, 27937), True, 'import numpy as np\n'), ((28705, 28733), 'os.chdir', 'os.chdir', (['"""../../../figures"""'], {}), "('../../../figures')\n", (28713, 28733), False, 'import os\n'), ((28852, 28869), 'os.chdir', 'os.chdir', (['"""del_S"""'], {}), "('del_S')\n", (28860, 28869), False, 'import os\n'), ((28969, 28983), 'os.chdir', 'os.chdir', (['"""DP"""'], {}), "('DP')\n", (28977, 28983), False, 'import os\n'), ((29099, 29121), 'os.chdir', 'os.chdir', (['"""Individual"""'], {}), "('Individual')\n", (29107, 29121), False, 'import os\n'), ((29233, 29253), 'os.chdir', 'os.chdir', (['"""Symmetry"""'], {}), "('Symmetry')\n", (29241, 29253), False, 'import os\n'), ((29653, 29754), 'pandas.DataFrame', 'pan.DataFrame', (['DP_freqs'], {'columns': "['$\\\\Delta s$', '$P (S = \\\\Delta s)$', 'Cross-Correlation', 'p']"}), "(DP_freqs, columns=['$\\\\Delta s$', '$P (S = \\\\Delta s)$',\n 'Cross-Correlation', 'p'])\n", (29666, 29754), True, 'import pandas as pan\n'), ((29774, 29804), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 4.8)'}), '(figsize=(6.4, 4.8))\n', (29784, 29804), True, 'import matplotlib.pyplot as plt\n'), ((29930, 30004), 'seaborn.scatterplot', 'sea.scatterplot', ([], {'data': 'hurtlocker', 'x': '"""$\\\\Delta s$"""', 'y': '"""$P (S = \\\\Delta s)$"""'}), "(data=hurtlocker, x='$\\\\Delta s$', y='$P (S = \\\\Delta s)$')\n", (29945, 30004), True, 'import seaborn as sea\n'), ((30262, 30279), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (30272, 30279), True, 'import matplotlib.pyplot as plt\n'), ((30352, 30383), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(10 ** -6.4)', '(10 ** 0.1)'], {}), '(10 ** -6.4, 10 ** 0.1)\n', (30360, 30383), True, 'import matplotlib.pyplot as plt\n'), ((31141, 31262), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png' %\n (p, L, CC))"], {'dpi': '(400)'}), "(\n 'Symmetry PDF(del(s)) vs del(s) --- p_%f - Grid Size (G)_%d - CC_%3.2f.png'\n % (p, L, CC), dpi=400)\n", (31152, 31262), True, 'import matplotlib.pyplot as plt\n'), ((31294, 31305), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (31303, 31305), True, 'import matplotlib.pyplot as plt\n'), ((31322, 31383), 'os.chdir', 'os.chdir', (['"""..\\\\..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP"""'], {}), "('..\\\\..\\\\..\\\\..\\\\..\\\\..\\\\analysis\\\\Mass Action\\\\DP')\n", (31330, 31383), False, 'import os\n'), ((33690, 33711), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (33705, 33711), False, 'import os\n'), ((33838, 33913), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)', 'max_rows': '(3)'}), "(file, delimiter=',', comments='#', skip_header=1, max_rows=3)\n", (33851, 33913), True, 'import numpy as np\n'), ((34348, 34411), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "(file, delimiter=',', comments='#', skip_header=1)\n", (34361, 34411), True, 'import numpy as np\n'), ((34657, 34680), 'numpy.abs', 'np.abs', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (34663, 34680), True, 'import numpy as np\n'), ((35500, 35538), 'numpy.insert', 'np.insert', (['DP_freqs_band', '(0)', 'p'], {'axis': '(1)'}), '(DP_freqs_band, 0, p, axis=1)\n', (35509, 35538), True, 'import numpy as np\n'), ((38598, 38619), 'os.path.getsize', 'os.path.getsize', (['file'], {}), '(file)\n', (38613, 38619), False, 'import os\n'), ((38746, 38821), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)', 'max_rows': '(3)'}), "(file, delimiter=',', comments='#', skip_header=1, max_rows=3)\n", (38759, 38821), True, 'import numpy as np\n'), ((39185, 39248), 'numpy.genfromtxt', 'np.genfromtxt', (['file'], {'delimiter': '""","""', 'comments': '"""#"""', 'skip_header': '(1)'}), "(file, delimiter=',', comments='#', skip_header=1)\n", (39198, 39248), True, 'import numpy as np\n'), ((2010, 2046), 'collections.Counter', 'collections.Counter', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (2029, 2046), False, 'import collections\n'), ((3234, 3256), 'os.path.isdir', 'os.path.isdir', (['"""del_S"""'], {}), "('del_S')\n", (3247, 3256), False, 'import os\n'), ((3286, 3303), 'os.mkdir', 'os.mkdir', (['"""del_S"""'], {}), "('del_S')\n", (3294, 3303), False, 'import os\n'), ((3357, 3376), 'os.path.isdir', 'os.path.isdir', (['"""DP"""'], {}), "('DP')\n", (3370, 3376), False, 'import os\n'), ((3406, 3420), 'os.mkdir', 'os.mkdir', (['"""DP"""'], {}), "('DP')\n", (3414, 3420), False, 'import os\n'), ((3471, 3498), 'os.path.isdir', 'os.path.isdir', (['"""Individual"""'], {}), "('Individual')\n", (3484, 3498), False, 'import os\n'), ((3528, 3550), 'os.mkdir', 'os.mkdir', (['"""Individual"""'], {}), "('Individual')\n", (3536, 3550), False, 'import os\n'), ((10182, 10204), 'os.path.isdir', 'os.path.isdir', (['"""del_S"""'], {}), "('del_S')\n", (10195, 10204), False, 'import os\n'), ((10234, 10251), 'os.mkdir', 'os.mkdir', (['"""del_S"""'], {}), "('del_S')\n", (10242, 10251), False, 'import os\n'), ((10305, 10324), 'os.path.isdir', 'os.path.isdir', (['"""DP"""'], {}), "('DP')\n", (10318, 10324), False, 'import os\n'), ((10354, 10368), 'os.mkdir', 'os.mkdir', (['"""DP"""'], {}), "('DP')\n", (10362, 10368), False, 'import os\n'), ((10419, 10446), 'os.path.isdir', 'os.path.isdir', (['"""Individual"""'], {}), "('Individual')\n", (10432, 10446), False, 'import os\n'), ((10476, 10498), 'os.mkdir', 'os.mkdir', (['"""Individual"""'], {}), "('Individual')\n", (10484, 10498), False, 'import os\n'), ((10557, 10579), 'os.path.isdir', 'os.path.isdir', (['"""1-CDF"""'], {}), "('1-CDF')\n", (10570, 10579), False, 'import os\n'), ((10609, 10626), 'os.mkdir', 'os.mkdir', (['"""1-CDF"""'], {}), "('1-CDF')\n", (10617, 10626), False, 'import os\n'), ((16603, 16639), 'collections.Counter', 'collections.Counter', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (16622, 16639), False, 'import collections\n'), ((22925, 22961), 'collections.Counter', 'collections.Counter', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (22944, 22961), False, 'import collections\n'), ((26546, 26582), 'collections.Counter', 'collections.Counter', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (26565, 26582), False, 'import collections\n'), ((27740, 27763), 'numpy.sign', 'np.sign', (['DP_freqs[:, 0]'], {}), '(DP_freqs[:, 0])\n', (27747, 27763), True, 'import numpy as np\n'), ((28331, 28375), 'numpy.concatenate', 'np.concatenate', (['(MastBind, DP_freqs)'], {'axis': '(0)'}), '((MastBind, DP_freqs), axis=0)\n', (28345, 28375), True, 'import numpy as np\n'), ((28766, 28788), 'os.path.isdir', 'os.path.isdir', (['"""del_S"""'], {}), "('del_S')\n", (28779, 28788), False, 'import os\n'), ((28818, 28835), 'os.mkdir', 'os.mkdir', (['"""del_S"""'], {}), "('del_S')\n", (28826, 28835), False, 'import os\n'), ((28889, 28908), 'os.path.isdir', 'os.path.isdir', (['"""DP"""'], {}), "('DP')\n", (28902, 28908), False, 'import os\n'), ((28938, 28952), 'os.mkdir', 'os.mkdir', (['"""DP"""'], {}), "('DP')\n", (28946, 28952), False, 'import os\n'), ((29003, 29030), 'os.path.isdir', 'os.path.isdir', (['"""Individual"""'], {}), "('Individual')\n", (29016, 29030), False, 'import os\n'), ((29060, 29082), 'os.mkdir', 'os.mkdir', (['"""Individual"""'], {}), "('Individual')\n", (29068, 29082), False, 'import os\n'), ((29141, 29166), 'os.path.isdir', 'os.path.isdir', (['"""Symmetry"""'], {}), "('Symmetry')\n", (29154, 29166), False, 'import os\n'), ((29196, 29216), 'os.mkdir', 'os.mkdir', (['"""Symmetry"""'], {}), "('Symmetry')\n", (29204, 29216), False, 'import os\n'), ((34714, 34750), 'collections.Counter', 'collections.Counter', (['data_temp[:, 5]'], {}), '(data_temp[:, 5])\n', (34733, 34750), False, 'import collections\n'), ((45291, 45334), 'math.exp', 'math.exp', (['(lag * grim_fandango[t, 5] * k * k)'], {}), '(lag * grim_fandango[t, 5] * k * k)\n', (45299, 45334), False, 'import math\n'), ((27773, 27795), 'numpy.abs', 'np.abs', (['DP_freqs[:, 0]'], {}), '(DP_freqs[:, 0])\n', (27779, 27795), True, 'import numpy as np\n')]
|
import streamlit as st
import numpy as np
import pandas as pd
import requests
import re
import altair as alt
# Find all available data
def find_all_spreadsheets():
available_data = {}
r = requests.get('https://www.football-data.co.uk/downloadm.php')
if r.status_code != 200:
print('Oh dear. Error {}'.format(r.status_code))
return -1
matches = re.findall('(mmz(.*?)[0-9]+-[0-9]+(.*?).[xls]+")',r.text)
for match in matches:
tmp = match[0].replace('"','')
season = re.search('[0-9]+-[0-9]+',tmp).group()
available_data[season] = tmp
return available_data
def load_data(data_url):
data = pd.read_csv(data_url)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
data['datetime'] = pd.to_datetime(data['date'] + ' ' + data['time'])
data = data.drop(['date','time'],axis=1)
# Rearrange columns
cols = data.columns.tolist()
cols.remove('datetime')
cols.insert(0,'datetime')
data = data[cols]
return data
def season_spreadsheet(data_url):
url = 'https://www.football-data.co.uk/'+data_url
data = pd.read_excel(url,sheet_name=None)
return data
@st.cache
def load_all_data(spreadsheets):
# Silly. But it works..
base_url = 'https://www.football-data.co.uk/'
big_df = pd.DataFrame()
all_keys = list(spreadsheets.keys())
stop_season = '2014-2015'
# stop_season = '2018-2019'
pos = all_keys.index(stop_season)
for c,key in enumerate(all_keys):
print(key)
if key == stop_season:
break
data_state.text('Loading season {} ... only {} left to go'.format(key,pos-c))
url = base_url + spreadsheets[key]
og_spreadsheet = pd.read_excel(url,None)
big_spreadsheet = pd.concat(og_spreadsheet, ignore_index=True)
# Convert date to datetime object
#big_spreadsheet['Date'] = pd.to_datetime(big_spreadsheet['Date'])
big_spreadsheet.loc[big_spreadsheet.index,'Date'] = pd.to_datetime(big_spreadsheet['Date'])
#big_spreadsheet['season'] = key
big_spreadsheet.loc[big_spreadsheet.index,'season'] = key
#big_spreadsheet['s-year'] = key[5:]
big_spreadsheet.loc[big_spreadsheet.index,'s-year'] = key[5:]
#big_spreadsheet['s-year'] = big_spreadsheet['s-year'].astype(int)
big_spreadsheet.loc[big_spreadsheet.index,'s-year'] = big_spreadsheet['s-year'].astype(int)
if 'AG' in big_df.columns:
#big_spreadsheet['total-goals'] = big_spreadsheet['HG'] + big_spreadsheet['AG']
big_spreadsheet.loc[big_spreadsheet.index,'total-goals'] = big_spreadsheet['HG'] + big_spreadsheet['AG']
else:
#big_spreadsheet['total-goals'] = big_spreadsheet['FTHG'] + big_spreadsheet['FTAG']
big_spreadsheet.loc[big_spreadsheet.index,'total-goals'] = big_spreadsheet['FTHG'] + big_spreadsheet['FTAG']
big_df = big_df.append(big_spreadsheet, sort=False,ignore_index=True)
big_df = big_df[big_df['total-goals'].isna()==False]
return big_df.sort_values('Date',ascending=False)#.dropna(axis=0,how='any')
def prev_match(df,order_specific=False):
small = df[['Date','HomeTeam','AwayTeam','total-goals']]
small = small.dropna(how='all')
if order_specific:
small.loc[small.index,'hash'] = (small['HomeTeam'] + small['AwayTeam']).apply(hash)
else:
small.loc[small.index,'hash'] = small['HomeTeam'].apply(hash) + small['AwayTeam'].apply(hash)
return small.drop_duplicates(subset='hash', keep="first")
def prev_match_selection(df,order_specific=True,sel_type=None,total_goals=2.5):
# Return list of pairs where certain condition was satisfied
small = df[['Date','HomeTeam','AwayTeam','total-goals']]
small = small.dropna(how='all')
if order_specific:
small.loc[small.index,'hash'] = (small['HomeTeam'] + small['AwayTeam']).apply(hash)
else:
small.loc[small.index,'hash'] = small['HomeTeam'].apply(hash) + small['AwayTeam'].apply(hash)
tmp = sel_type.split('/')
games_played = int(tmp[1])
min_goals = int(tmp[0])
# Find the total matches played criteria
grouped_matches = small.groupby('hash').head(games_played)
# Only select matches where total-goals was satisfied
filtered_matches = grouped_matches[grouped_matches['total-goals'].gt(total_goals)]
# Count how many matches satisfied the total-goals criterion
hash_sizes = filtered_matches.groupby('hash').size().reset_index(name='counts')
# Only keep matches that satisfy the criterion
good_hashes = hash_sizes[hash_sizes['counts'].ge(min_goals)]
# Merge back to find Home and Away team names
merged = pd.merge(small,good_hashes,left_on='hash',right_on='hash',copy=False)
merged.loc[merged.index,'total-goals'] = np.ceil(total_goals)
return merged[['HomeTeam','AwayTeam','total-goals','hash']].drop_duplicates()
def find_stats(test_df,decision_df,order_specific=True,stats_type='last-match',misc=None):
# Add hashes appropriately
if order_specific:
test_df.loc[test_df.index,'hash'] = (test_df['HomeTeam']+test_df['AwayTeam']).apply(hash)
else:
test_df.loc[test_df.index,'hash'] = test_df['HomeTeam'].apply(hash)+test_df['AwayTeam'].apply(hash)
o = {'accuracy':0,'data':None}
if order_specific:
# Match test_df with decision_df on hashes
merged = pd.merge(test_df,decision_df,left_on='hash',right_on='hash',copy=False,suffixes=['_t','_d'])
merged_full = merged
merged = merged_full[['hash','total-goals_t','total-goals_d',#'HomeTeam_t','AwayTeam_t','HomeTeam_d','AwayTeam_d'
]]
merged.loc[merged.index,'correct'] = 0
merged.loc[(
((merged['total-goals_t']>2.5) & (merged['total-goals_d']>2.5))
|
((merged['total-goals_t']<2.5) & (merged['total-goals_d']<2.5))
)
,'correct'
] = 1
o['accuracy'] = merged['correct'].mean()
if 'Date_t' in merged_full.keys():
date_var = 'Date_t'
else:
date_var = 'Date'
o['data'] = [merged_full[[date_var,'HomeTeam_t','AwayTeam_t','total-goals_t','total-goals_d']]]
else:
# This makes it harder, if more than one game, will have to update stats in between
# Usually each season has two rounds for each team ??
first_round = test_df.drop_duplicates(subset='hash', keep="last")
second_round = test_df.drop(first_round.index)
# st.write('first round',first_round)
# st.write(first_round.shape)
# st.write('second round',second_round)
# st.write(second_round.shape)
# st.write('test_df',test_df)
# Workout decisions for the first round
merged1 = pd.merge(first_round,decision_df,on='hash',copy=False,suffixes=['_t','_d'])
merged1 = merged1.drop_duplicates(subset='hash', keep="last")
merged1 = merged1.drop(columns=['HomeTeam_d','AwayTeam_d'])
# st.write(merged1)
res = merged1[['hash']]
res['total-goals'] = merged1['total-goals_t']
# Flag correct decision
merged1.loc[merged1.index,'correct'] = 0
merged1.loc[(
((merged1['total-goals_t']>2.5) & (merged1['total-goals_d']>2.5))
|
((merged1['total-goals_t']<2.5) & (merged1['total-goals_d']<2.5))
)
,'correct'
] = 1
# st.write('first round choices',merged1[['HomeTeam_t','AwayTeam_t','total-goals_t','total-goals_d','correct']])
# Update stats for second round
if not second_round.empty:
if stats_type == 'last-match':
# Find total goals from previous play
merged2 = pd.merge(second_round,res,left_on='hash',right_on='hash',copy=False,suffixes=['_t','_d'])
merged2.loc[merged2.index,'correct'] = 0
merged2.loc[(
((merged2['total-goals_t']>2.5) & (merged2['total-goals_d']>2.5))
|
((merged2['total-goals_t']<2.5) & (merged2['total-goals_d']<2.5))
)
,'correct'
] = 1
elif stats_type == 'xytotal':
if not misc is None:
x_y_type = misc['sel_type']
total_goals = misc['total_goals']
hist_data = misc['hist_data']
new_data_dirty = merged1.drop(['hash','correct'],axis=1)
new_data = new_data_dirty.rename(columns={'HomeTeam_t':'HomeTeam','AwayTeam_t':'AwayTeam','total-goals_t':'total-goals'}).sort_values('Date',ascending=False)
combined = new_data.append(hist_data,ignore_index=True)
second_round_choices = prev_match_selection(combined,order_specific=order_specific,sel_type=x_y_type,total_goals=total_goals)
merged2 = pd.merge(second_round,second_round_choices,on='hash',copy=False,suffixes=['_t','_d'])
second_round_choices = second_round_choices[['hash','total-goals']].drop_duplicates()
# st.write('second_round_choices',second_round_choices)
# st.write(second_round_choices.shape)
# Find total goals from previous play
merged2 = pd.merge(second_round,second_round_choices,left_on='hash',right_on='hash',copy=False,suffixes=['_t','_d'])
merged2.loc[merged2.index,'correct'] = 0
# st.write(merged2[['HomeTeam','AwayTeam','total-goals_t','total-goals_d']])
merged2.loc[(
((merged2['total-goals_t']>2.5) & (merged2['total-goals_d']>2.5))
|
((merged2['total-goals_t']<2.5) & (merged2['total-goals_d']<2.5))
)
,'correct'
] = 1
o['accuracy'] = np.array(list(merged2['correct'])+list(merged1['correct'])).mean()
if 'Date_t' in merged1.keys():
date_val = 'Date_t'
else:
date_val = 'Date'
o['data'] = [merged1[[date_val,'HomeTeam_t','AwayTeam_t','total-goals_t','total-goals_d']],
merged2[['Date','HomeTeam','AwayTeam','total-goals_t','total-goals_d']]]
else:
o['accuracy'] = np.array(list(merged1['correct'])).mean()
if 'Date_t' in merged1.keys():
date_val = 'Date_t'
else:
date_val = 'Date'
o['data'] = [merged1[[date_val,'HomeTeam_t','AwayTeam_t','total-goals_t','total-goals_d']]]
return o
def calc_roi(season_odds,season_results,chosen_odds=None):
# > 2.5 goals
#BbAv>2.5 = Betbrain average over 2.5 goals
#BbAv<2.5 = Betbrain average under 2.5 goals
merged = pd.merge(season_odds,season_results,left_on=['Date','AwayTeam','HomeTeam'],right_on=['Date','AwayTeam','HomeTeam'],how='inner')
# st.write('season_odds',season_odds.shape)
# st.write('season_results',season_results.shape)
#
# st.write('merged',merged)
# st.write('merged',merged.shape)
clean = merged
bet_size = 1
# Check that total-goals column was created
# if not then go by the odds
if 'total-goals_t' in clean.keys() and 'total-goals_d' in clean.keys():
# add a flag to mark correctness
clean.loc[clean.index,'correct>2.5'] = 0
clean.loc[clean.index,'correct<2.5'] = 0
clean.loc[clean.index,'correct'] = 0
clean.loc[
((clean['total-goals_t']>2.5) & (clean['total-goals_d']>2.5))
,'correct>2.5'
] = 1
clean.loc[
((clean['total-goals_t']<2.5) & (clean['total-goals_d']<2.5))
,'correct<2.5'
] = 1
clean.loc[(clean['correct>2.5']==1) | (clean['correct<2.5']==1),'correct'] = 1
# st.write(clean)
broker_names = []
won_sizes = []
lost_sizes = []
succ_rates = []
avg_prices = []
rois = []
total_costs = []
profits = []
brokers = ['B365','P','GB','BbAv']
avail_brokers = []
# Lowest Broker for selection
available_odds_gt = []
available_odds_lt = []
for b in brokers:
b_str_gt = '{}>2.5'.format(b)
b_str_lt = '{}<2.5'.format(b)
if b_str_gt in clean.keys():
available_odds_gt.append(b_str_gt)
available_odds_lt.append(b_str_lt)
avail_brokers.append(b)
# Add new columns
clean.loc[clean.index,'min>2.5']=clean[available_odds_gt].min(axis=1)
clean.loc[clean.index,'max>2.5']=clean[available_odds_gt].max(axis=1)
clean.loc[clean.index,'min<2.5']=clean[available_odds_lt].min(axis=1)
clean.loc[clean.index,'max<2.5']=clean[available_odds_lt].max(axis=1)
clean.loc[clean.index,'min-odds']=clean[available_odds_lt+available_odds_gt].min(axis=1)
clean.loc[clean.index,'max-odds']=clean[list(available_odds_lt)+list(available_odds_gt)].max(axis=1)
for c,b in enumerate(avail_brokers):
broker = clean[[available_odds_gt[c],available_odds_lt[c],'correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = (bet_size*(correct_rows_gt[available_odds_gt[c]]).sum(skipna=True)
+ bet_size*(correct_rows_lt[available_odds_lt[c]]).sum(skipna=True))
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt[available_odds_gt[c]])+list(correct_rows_lt[available_odds_lt[c]])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append(b)
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
if 'B365C>2.5' in clean.keys():
broker = clean[['B365C>2.5','B365C<2.5','correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = bet_size*(correct_rows_gt['B365C>2.5']+0).sum(skipna=True) + bet_size*(correct_rows_lt['B365C<2.5']+0).sum(skipna=True)
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt['B365C>2.5'])+list(correct_rows_lt['B365C<2.5'])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append('Bet365Close')
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
if 'PC>2.5' in clean.keys():
broker = clean[['PC>2.5','PC<2.5','correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = bet_size*(correct_rows_gt['PC>2.5']+0).sum(skipna=True) + bet_size*(correct_rows_lt['PC<2.5']+0).sum(skipna=True)
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt['PC>2.5'])+list(correct_rows_lt['PC<2.5'])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append('PinnacleClose')
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
# Select lowest broker
broker = clean[['min>2.5','min<2.5','correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = bet_size*(correct_rows_gt['min>2.5']+0).sum(skipna=True) + bet_size*(correct_rows_lt['min<2.5']+0).sum(skipna=True)
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt['min>2.5'])+list(correct_rows_lt['min<2.5'])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append('MinBroker')
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
# Highest Broker
broker = clean[['max>2.5','max<2.5','correct','correct>2.5','correct<2.5']]
broker = broker.dropna(axis=0,how='any')
if broker.shape[0] > 0:
lost_size = broker['correct'].value_counts(dropna=False)[0]*bet_size
correct_rows_gt = broker[broker['correct>2.5']==1]
correct_rows_lt = broker[broker['correct<2.5']==1]
won_size = bet_size*(correct_rows_gt['max>2.5']+0).sum(skipna=True) + bet_size*(correct_rows_lt['max<2.5']+0).sum(skipna=True)
profit = won_size - lost_size
succ_rate = (correct_rows_gt.shape[0]+correct_rows_lt.shape[0])/(broker.shape[0])
avg_price = np.array(list(correct_rows_gt['max>2.5'])+list(correct_rows_lt['max<2.5'])).mean()
total_cost = bet_size*broker.shape[0]
roi = profit/total_cost
broker_names.append('MaxBroker')
won_sizes.append(won_size)
lost_sizes.append(lost_size)
succ_rates.append(succ_rate)
avg_prices.append(avg_price)
rois.append(roi)
total_costs.append(total_cost)
profits.append(profit)
output_table = pd.DataFrame({'broker-name':broker_names,
'won-size':won_sizes,
'profit':profits,
# 'loss':lost_sizes,
'succ-rate':succ_rates,
'avg-price':avg_prices,
'roi':rois,
'total-cost':total_costs
})
st.write('### Selected odds',clean)
st.write('### Results',output_table)
else:
#TODO: Calculate results based on odds (highest and lowest)
pass
def filter_teams(df,chosen_n=5,filter_type='TotalAll'):
# Select only last season data
last_season = df[df['s-year']==df['s-year'].max()]
# Rank = goals/num_games
if filter_type == 'Total goals Home+Away':
# Rank teams by total scored goals
try:
home = hist_data[['HomeTeam','FTHG']].rename(columns={'HomeTeam':'Team','FTHG':'Goals'})
except:
home = hist_data[['HomeTeam','HG']].rename(columns={'HomeTeam':'Team','HG':'Goals'})
try:
away = hist_data[['AwayTeam','FTAG']].rename(columns={'AwayTeam':'Team','FTAG':'Goals'})
except:
away = hist_data[['AwayTeam','AG']].rename(columns={'AwayTeam':'Team','AG':'Goals'})
teams = home.append(away)
goals_by_teams = teams[['Team','Goals']].groupby('Team').sum()
games_by_teams = teams[['Team','Goals']].groupby('Team').count()
rank = (goals_by_teams/games_by_teams).sort_values('Goals',ascending=False).head(chosen_n).index
home_teams = pd.DataFrame(rank)
merge_home = pd.merge(df,home_teams,left_on='HomeTeam',right_on='Team',how='inner')
merge_away = pd.merge(df,home_teams,left_on='AwayTeam',right_on='Team',how='inner')
merge = merge_home.append(merge_away).reset_index()
# st.write(merge)
return merge
elif filter_type == 'Total goals Home':
# Rank teams on total goals when was at home
try:
goals_by_teams = last_season[['HomeTeam','FTHG']].groupby('HomeTeam').sum()
games_by_teams = last_season[['HomeTeam','FTHG']].groupby('HomeTeam').count()
rank = (goals_by_teams/games_by_teams).sort_values('FTHG',ascending=False).head(chosen_n)
except:
goals_by_teams = last_season[['HomeTeam','HG']].groupby('HomeTeam').sum()
games_by_teams = last_season[['HomeTeam','HG']].groupby('HomeTeam').count()
rank = (goals_by_teams/games_by_teams).sort_values('HG',ascending=False).head(chosen_n).index
home_teams = pd.DataFrame(rank)
merge = pd.merge(df,home_teams,left_on='HomeTeam',right_on='HomeTeam',how='inner')
return merge
elif filter_type == 'Total goals Away':
# Rank teams on total goals when was away
try:
goals_by_teams = last_season[['AwayTeam','FTAG']].groupby('AwayTeam').sum()
games_by_teams = last_season[['AwayTeam','FTAG']].groupby('AwayTeam').count()
rank = (goals_by_teams/games_by_teams).sort_values('FTAG',ascending=False).head(chosen_n)
except:
goals_by_teams = last_season[['AwayTeam','AG']].groupby('AwayTeam').sum()
games_by_teams = last_season[['AwayTeam','AG']].groupby('AwayTeam').count()
rank = (goals_by_teams/games_by_teams).sort_values('HG',ascending=False).head(chosen_n).index
away_teams = pd.DataFrame(rank)
merge = pd.merge(df,away_teams,left_on='AwayTeam',right_on='AwayTeam',how='inner')
return merge
spreadsheets = find_all_spreadsheets()
data_state = st.text('')
data_state.text('Pre-processing')
big_df = load_all_data(spreadsheets)
data_state.text('')
season = st.selectbox(
"Select season", list(big_df['season'].unique()),1
)
division = st.selectbox(
"Select division", list(big_df['Div'].sort_values().unique()),0
)
order_specific = st.sidebar.checkbox('Order specific',1)
# Select by exact total number of goals
top_n_selection = st.sidebar.checkbox('Top n teams from the previous season',0)
st.markdown("""Select a type of Head to head. Two options are available.""")
st.markdown("1) Total goals from previous fixture looks at the previous total number of goals in the previous identical match")
st.markdown("2) x/y matching with total goals only selects matches where at least x out of y last matches had at least (however many)`total goals'")
st.markdown("More filters available i the panel on the left")
# Find previous total for all pairs
total_type = st.selectbox(
"Type of `Head to Head'", ['None','Total goals from previous fixture',"x/y & `total goals' criterion"],0
)
current_year = int(season[5:])
division_data = big_df.loc[big_df['Div'] == division]
current_data = division_data.loc[big_df['s-year']==current_year]
hist_data = division_data.loc[(big_df['s-year'] < current_year)]
if top_n_selection:
rank_type = st.sidebar.selectbox(
"Rank teams by", ['Total goals Home+Away','Total goals Home','Total goals Away'],0
)
n = st.sidebar.number_input('Number of top teams selected',
min_value=1,
max_value=len(current_data['HomeTeam'].unique()+current_data['AwayTeam'].unique()),
value=5)
# Filter teams
hist_data = filter_teams(hist_data,chosen_n=n,filter_type=rank_type)
test_data = None
stats_type = None
misc = None
if total_type == 'None':
pass
elif total_type == 'Total goals from previous fixture':
test_data = prev_match(hist_data,order_specific=order_specific)
stats_type = 'last-match'
elif total_type == "x/y & `total goals' criterion":
x_y_type = st.selectbox(
"Select x/y", ['1/1','1/2','2/2','2/3','3/3','3/4','4/4','4/5','5/5'],5
)
total_goals = st.selectbox(
"Select `total goals'", np.linspace(0.5,8.5,9),2
)
test_data = prev_match_selection(hist_data,order_specific=order_specific,sel_type=x_y_type,total_goals=total_goals)
stats_type = 'xytotal'
misc = {'sel_type':x_y_type,'total_goals':total_goals,'hist_data':hist_data}
# Workout how many matches were won with given filters
if total_type != 'None':
temp = find_stats(current_data,test_data,order_specific=order_specific,stats_type=stats_type,misc=misc)
else:
temp = {'data':[]}
if len(temp['data']) == 1:
out_data = temp['data'][0].rename(columns={'HomeTeam_t':'HomeTeam',
'AwayTeam_t':'AwayTeam',
# 'total-goals_t':'total-goals',
# 'total-goals_d':'total-goals',
'Date_t':'Date',
'Date_d':'Date'
})
# st.write('## Selection',out_data)
elif len(temp['data']) == 2:
out_data1 = temp['data'][0].rename(columns={'HomeTeam_t':'HomeTeam',
'AwayTeam_t':'AwayTeam',
# 'total-goals_t':'total-goals',
# 'total-goals_d':'total-goals',
'Date_t':'Date',
'Date_d':'Date'
})
out_data2 = temp['data'][1].rename(columns={'HomeTeam_t':'HomeTeam',
'AwayTeam_t':'AwayTeam',
# 'total-goals_t':'total-goals',
# 'total-goals_d':'total-goals',
'Date_t':'Date',
'Date_d':'Date'
})
out_data = out_data1.append(out_data2,ignore_index=True)
# st.write('## Selection',out_data)
if total_type != 'None':
calc_roi(current_data,out_data)
else:
#TODO: Choose best matches based on odds
pass
|
[
"streamlit.markdown",
"numpy.ceil",
"pandas.read_csv",
"pandas.merge",
"streamlit.write",
"requests.get",
"streamlit.sidebar.checkbox",
"streamlit.text",
"numpy.linspace",
"pandas.read_excel",
"streamlit.sidebar.selectbox",
"streamlit.selectbox",
"pandas.DataFrame",
"re.findall",
"pandas.concat",
"pandas.to_datetime",
"re.search"
] |
[((24432, 24443), 'streamlit.text', 'st.text', (['""""""'], {}), "('')\n", (24439, 24443), True, 'import streamlit as st\n'), ((24732, 24772), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Order specific"""', '(1)'], {}), "('Order specific', 1)\n", (24751, 24772), True, 'import streamlit as st\n'), ((24831, 24893), 'streamlit.sidebar.checkbox', 'st.sidebar.checkbox', (['"""Top n teams from the previous season"""', '(0)'], {}), "('Top n teams from the previous season', 0)\n", (24850, 24893), True, 'import streamlit as st\n'), ((24894, 24966), 'streamlit.markdown', 'st.markdown', (['"""Select a type of Head to head. Two options are available."""'], {}), "('Select a type of Head to head. Two options are available.')\n", (24905, 24966), True, 'import streamlit as st\n'), ((24971, 25108), 'streamlit.markdown', 'st.markdown', (['"""1) Total goals from previous fixture looks at the previous total number of goals in the previous identical match"""'], {}), "(\n '1) Total goals from previous fixture looks at the previous total number of goals in the previous identical match'\n )\n", (24982, 25108), True, 'import streamlit as st\n'), ((25099, 25257), 'streamlit.markdown', 'st.markdown', (['"""2) x/y matching with total goals only selects matches where at least x out of y last matches had at least (however many)`total goals\'"""'], {}), '(\n "2) x/y matching with total goals only selects matches where at least x out of y last matches had at least (however many)`total goals\'"\n )\n', (25110, 25257), True, 'import streamlit as st\n'), ((25248, 25309), 'streamlit.markdown', 'st.markdown', (['"""More filters available i the panel on the left"""'], {}), "('More filters available i the panel on the left')\n", (25259, 25309), True, 'import streamlit as st\n'), ((25360, 25485), 'streamlit.selectbox', 'st.selectbox', (['"""Type of `Head to Head\'"""', '[\'None\', \'Total goals from previous fixture\', "x/y & `total goals\' criterion"]', '(0)'], {}), '("Type of `Head to Head\'", [\'None\',\n \'Total goals from previous fixture\', "x/y & `total goals\' criterion"], 0)\n', (25372, 25485), True, 'import streamlit as st\n'), ((197, 258), 'requests.get', 'requests.get', (['"""https://www.football-data.co.uk/downloadm.php"""'], {}), "('https://www.football-data.co.uk/downloadm.php')\n", (209, 258), False, 'import requests\n'), ((377, 435), 're.findall', 're.findall', (['"""(mmz(.*?)[0-9]+-[0-9]+(.*?).[xls]+")"""', 'r.text'], {}), '(\'(mmz(.*?)[0-9]+-[0-9]+(.*?).[xls]+")\', r.text)\n', (387, 435), False, 'import re\n'), ((657, 678), 'pandas.read_csv', 'pd.read_csv', (['data_url'], {}), '(data_url)\n', (668, 678), True, 'import pandas as pd\n'), ((800, 849), 'pandas.to_datetime', 'pd.to_datetime', (["(data['date'] + ' ' + data['time'])"], {}), "(data['date'] + ' ' + data['time'])\n", (814, 849), True, 'import pandas as pd\n'), ((1149, 1184), 'pandas.read_excel', 'pd.read_excel', (['url'], {'sheet_name': 'None'}), '(url, sheet_name=None)\n', (1162, 1184), True, 'import pandas as pd\n'), ((1335, 1349), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1347, 1349), True, 'import pandas as pd\n'), ((4774, 4847), 'pandas.merge', 'pd.merge', (['small', 'good_hashes'], {'left_on': '"""hash"""', 'right_on': '"""hash"""', 'copy': '(False)'}), "(small, good_hashes, left_on='hash', right_on='hash', copy=False)\n", (4782, 4847), True, 'import pandas as pd\n'), ((4889, 4909), 'numpy.ceil', 'np.ceil', (['total_goals'], {}), '(total_goals)\n', (4896, 4909), True, 'import numpy as np\n'), ((11514, 11653), 'pandas.merge', 'pd.merge', (['season_odds', 'season_results'], {'left_on': "['Date', 'AwayTeam', 'HomeTeam']", 'right_on': "['Date', 'AwayTeam', 'HomeTeam']", 'how': '"""inner"""'}), "(season_odds, season_results, left_on=['Date', 'AwayTeam',\n 'HomeTeam'], right_on=['Date', 'AwayTeam', 'HomeTeam'], how='inner')\n", (11522, 11653), True, 'import pandas as pd\n'), ((25739, 25850), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Rank teams by"""', "['Total goals Home+Away', 'Total goals Home', 'Total goals Away']", '(0)'], {}), "('Rank teams by', ['Total goals Home+Away',\n 'Total goals Home', 'Total goals Away'], 0)\n", (25759, 25850), True, 'import streamlit as st\n'), ((1768, 1792), 'pandas.read_excel', 'pd.read_excel', (['url', 'None'], {}), '(url, None)\n', (1781, 1792), True, 'import pandas as pd\n'), ((1826, 1870), 'pandas.concat', 'pd.concat', (['og_spreadsheet'], {'ignore_index': '(True)'}), '(og_spreadsheet, ignore_index=True)\n', (1835, 1870), True, 'import pandas as pd\n'), ((2049, 2088), 'pandas.to_datetime', 'pd.to_datetime', (["big_spreadsheet['Date']"], {}), "(big_spreadsheet['Date'])\n", (2063, 2088), True, 'import pandas as pd\n'), ((5502, 5604), 'pandas.merge', 'pd.merge', (['test_df', 'decision_df'], {'left_on': '"""hash"""', 'right_on': '"""hash"""', 'copy': '(False)', 'suffixes': "['_t', '_d']"}), "(test_df, decision_df, left_on='hash', right_on='hash', copy=False,\n suffixes=['_t', '_d'])\n", (5510, 5604), True, 'import pandas as pd\n'), ((7027, 7112), 'pandas.merge', 'pd.merge', (['first_round', 'decision_df'], {'on': '"""hash"""', 'copy': '(False)', 'suffixes': "['_t', '_d']"}), "(first_round, decision_df, on='hash', copy=False, suffixes=['_t', '_d']\n )\n", (7035, 7112), True, 'import pandas as pd\n'), ((20676, 20859), 'pandas.DataFrame', 'pd.DataFrame', (["{'broker-name': broker_names, 'won-size': won_sizes, 'profit': profits,\n 'succ-rate': succ_rates, 'avg-price': avg_prices, 'roi': rois,\n 'total-cost': total_costs}"], {}), "({'broker-name': broker_names, 'won-size': won_sizes, 'profit':\n profits, 'succ-rate': succ_rates, 'avg-price': avg_prices, 'roi': rois,\n 'total-cost': total_costs})\n", (20688, 20859), True, 'import pandas as pd\n'), ((21170, 21206), 'streamlit.write', 'st.write', (['"""### Selected odds"""', 'clean'], {}), "('### Selected odds', clean)\n", (21178, 21206), True, 'import streamlit as st\n'), ((21214, 21251), 'streamlit.write', 'st.write', (['"""### Results"""', 'output_table'], {}), "('### Results', output_table)\n", (21222, 21251), True, 'import streamlit as st\n'), ((22388, 22406), 'pandas.DataFrame', 'pd.DataFrame', (['rank'], {}), '(rank)\n', (22400, 22406), True, 'import pandas as pd\n'), ((22428, 22502), 'pandas.merge', 'pd.merge', (['df', 'home_teams'], {'left_on': '"""HomeTeam"""', 'right_on': '"""Team"""', 'how': '"""inner"""'}), "(df, home_teams, left_on='HomeTeam', right_on='Team', how='inner')\n", (22436, 22502), True, 'import pandas as pd\n'), ((22520, 22594), 'pandas.merge', 'pd.merge', (['df', 'home_teams'], {'left_on': '"""AwayTeam"""', 'right_on': '"""Team"""', 'how': '"""inner"""'}), "(df, home_teams, left_on='AwayTeam', right_on='Team', how='inner')\n", (22528, 22594), True, 'import pandas as pd\n'), ((23413, 23431), 'pandas.DataFrame', 'pd.DataFrame', (['rank'], {}), '(rank)\n', (23425, 23431), True, 'import pandas as pd\n'), ((23448, 23526), 'pandas.merge', 'pd.merge', (['df', 'home_teams'], {'left_on': '"""HomeTeam"""', 'right_on': '"""HomeTeam"""', 'how': '"""inner"""'}), "(df, home_teams, left_on='HomeTeam', right_on='HomeTeam', how='inner')\n", (23456, 23526), True, 'import pandas as pd\n'), ((26468, 26566), 'streamlit.selectbox', 'st.selectbox', (['"""Select x/y"""', "['1/1', '1/2', '2/2', '2/3', '3/3', '3/4', '4/4', '4/5', '5/5']", '(5)'], {}), "('Select x/y', ['1/1', '1/2', '2/2', '2/3', '3/3', '3/4', '4/4',\n '4/5', '5/5'], 5)\n", (26480, 26566), True, 'import streamlit as st\n'), ((517, 548), 're.search', 're.search', (['"""[0-9]+-[0-9]+"""', 'tmp'], {}), "('[0-9]+-[0-9]+', tmp)\n", (526, 548), False, 'import re\n'), ((8129, 8228), 'pandas.merge', 'pd.merge', (['second_round', 'res'], {'left_on': '"""hash"""', 'right_on': '"""hash"""', 'copy': '(False)', 'suffixes': "['_t', '_d']"}), "(second_round, res, left_on='hash', right_on='hash', copy=False,\n suffixes=['_t', '_d'])\n", (8137, 8228), True, 'import pandas as pd\n'), ((24248, 24266), 'pandas.DataFrame', 'pd.DataFrame', (['rank'], {}), '(rank)\n', (24260, 24266), True, 'import pandas as pd\n'), ((24283, 24361), 'pandas.merge', 'pd.merge', (['df', 'away_teams'], {'left_on': '"""AwayTeam"""', 'right_on': '"""AwayTeam"""', 'how': '"""inner"""'}), "(df, away_teams, left_on='AwayTeam', right_on='AwayTeam', how='inner')\n", (24291, 24361), True, 'import pandas as pd\n'), ((26648, 26672), 'numpy.linspace', 'np.linspace', (['(0.5)', '(8.5)', '(9)'], {}), '(0.5, 8.5, 9)\n', (26659, 26672), True, 'import numpy as np\n'), ((9464, 9558), 'pandas.merge', 'pd.merge', (['second_round', 'second_round_choices'], {'on': '"""hash"""', 'copy': '(False)', 'suffixes': "['_t', '_d']"}), "(second_round, second_round_choices, on='hash', copy=False,\n suffixes=['_t', '_d'])\n", (9472, 9558), True, 'import pandas as pd\n'), ((9924, 10041), 'pandas.merge', 'pd.merge', (['second_round', 'second_round_choices'], {'left_on': '"""hash"""', 'right_on': '"""hash"""', 'copy': '(False)', 'suffixes': "['_t', '_d']"}), "(second_round, second_round_choices, left_on='hash', right_on=\n 'hash', copy=False, suffixes=['_t', '_d'])\n", (9932, 10041), True, 'import pandas as pd\n')]
|
"""计算分位数"""
from scipy import stats
import numpy as np
S = 47
N = 100
a = S + 1
b = (N -S) + 1
alpha = 0.05
lu = stats.beta.ppf([alpha/2, 1-alpha/2], a, b)
print(lu)
## MC方法
S = 1000
X = stats.beta.rvs(a, b, size=S)
X = np.sort(X, axis=0)
l = X[round(S*alpha/2)]
u = X[round(S*(1-alpha)/2)]
print(l,u)
|
[
"numpy.sort",
"scipy.stats.beta.rvs",
"scipy.stats.beta.ppf"
] |
[((115, 163), 'scipy.stats.beta.ppf', 'stats.beta.ppf', (['[alpha / 2, 1 - alpha / 2]', 'a', 'b'], {}), '([alpha / 2, 1 - alpha / 2], a, b)\n', (129, 163), False, 'from scipy import stats\n'), ((190, 218), 'scipy.stats.beta.rvs', 'stats.beta.rvs', (['a', 'b'], {'size': 'S'}), '(a, b, size=S)\n', (204, 218), False, 'from scipy import stats\n'), ((223, 241), 'numpy.sort', 'np.sort', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (230, 241), True, 'import numpy as np\n')]
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement VOLO Class
"""
import math
import copy
import numpy as np
import paddle
import paddle.nn as nn
from droppath import DropPath
from fold import fold
#from utils import MyPrint
#myprint = MyPrint()
class Identity(nn.Layer):
""" Identity layer
The output of this layer is the input without any change.
Use this layer to avoid using 'if' condition in forward methods
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Downsample(nn.Layer):
"""Apply a Conv2D with kernel size = patch_size and stride = patch_size
The shape of input tensor is [N, H, W, C], which will be transposed to
[N, C, H, W] and feed into Conv, finally the output is transposed back
to [N, H, W, C].
Args:
in_embed_dim: int, input feature dimension
out_embed_dim: int, output feature dimension
patch_size: kernel_size and stride
"""
def __init__(self, in_embed_dim, out_embed_dim, patch_size):
super().__init__()
self.proj = nn.Conv2D(in_embed_dim,
out_embed_dim,
kernel_size=patch_size,
stride=patch_size)
def forward(self, x):
x = x.transpose([0, 3, 1, 2])
x = self.proj(x)
x = x.transpose([0, 2, 3, 1])
return x
class PatchEmbedding(nn.Layer):
"""Patch Embeddings with stem conv layers
If stem conv layers are set, the image is firstly feed into stem layers,
stem layers contains 3 conv-bn-relu blocks.
Then a proj (conv2d) layer is applied as the patch embedding.
Args:
image_size: int, input image size, default: 224
stem_conv: bool, if apply stem conv layers, default: False
stem_stride: int, conv stride in stem layers, default: 1
patch_size: int, patch size for patch embedding (k and stride for proj conv), default: 8
in_channels: int, input channels, default: 3
hidden_dim: int, input dimension of patch embedding (out dim for stem), default: 64
embed_dim: int, output dimension of patch embedding, default: 384
"""
def __init__(self,
image_size=224,
stem_conv=False,
stem_stride=1,
patch_size=8,
in_channels=3,
hidden_dim=64,
embed_dim=384):
super().__init__()
assert patch_size in [4, 8, 16]
# define stem conv layers
if stem_conv:
self.stem = nn.Sequential(
nn.Conv2D(in_channels,
hidden_dim,
kernel_size=7,
stride=stem_stride,
padding=3,
bias_attr=False),
nn.BatchNorm2D(hidden_dim, momentum=0.9),
nn.ReLU(),
nn.Conv2D(hidden_dim,
hidden_dim,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.BatchNorm2D(hidden_dim, momentum=0.9),
nn.ReLU(),
nn.Conv2D(hidden_dim,
hidden_dim,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.BatchNorm2D(hidden_dim, momentum=0.9),
nn.ReLU(),
)
else:
self.stem = Identity()
# define patch embeddings
self.proj = nn.Conv2D(hidden_dim,
embed_dim,
kernel_size = patch_size // stem_stride,
stride = patch_size // stem_stride)
# num patches
self.num_patches = (image_size // patch_size) * (image_size // patch_size)
def forward(self, x):
x = self.stem(x) # Identity layer if stem is not set
x = self.proj(x)
return x
class Mlp(nn.Layer):
""" MLP module
Impl using nn.Linear and activation is GELU, dropout is applied.
Ops: fc -> act -> dropout -> fc -> dropout
Attributes:
fc1: nn.Linear
fc2: nn.Linear
act: GELU
dropout1: dropout after fc1
dropout2: dropout after fc2
"""
def __init__(self, in_features, hidden_features, dropout=0.):
super(Mlp, self).__init__()
w_attr_1, b_attr_1 = self._init_weights()
self.fc1 = nn.Linear(in_features,
hidden_features,
weight_attr=w_attr_1,
bias_attr=b_attr_1)
w_attr_2, b_attr_2 = self._init_weights()
self.fc2 = nn.Linear(hidden_features,
in_features,
weight_attr=w_attr_2,
bias_attr=b_attr_2)
self.act = nn.GELU()
self.dropout = nn.Dropout(dropout)
def _init_weights(self):
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=1e-6))
return weight_attr, bias_attr
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class OutlookerAttention(nn.Layer):
""" Outlooker Attention
Outlooker attention firstly applies a nn.Linear op, and unfold (im2col) the output
tensor, then use tensor reshape to get the 'V'. 'Attn' is obtained by pool, linear and reshape
ops applied on input tensor. Then a matmul is applied for 'V' and 'Attn'. Finally, a
fold op is applied with a linear projection to get the output.
Args:
dim: int, all heads dimension
num_heads: int, num of heads
kernel_size: int, size used in fold/unfold, and pool, default: 3
padding: int, pad used in fold/unfold, default: 1
stride: int, stride used in fold/unfold, and pool, default: 1
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads,
kernel_size=3,
padding=1,
stride=1,
qkv_bias=False,
qk_scale=None,
attention_dropout=0.,
dropout=0.):
super().__init__()
self.num_heads = num_heads
self.dim = dim
self.dim_head = dim // num_heads
self.scale = qk_scale or self.dim_head ** -0.5
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
self.v = nn.Linear(dim, dim, bias_attr=qkv_bias)
self.attn = nn.Linear(dim, (kernel_size ** 4) * num_heads)
self.attn_dropout = nn.Dropout(attention_dropout)
self.proj = nn.Linear(dim, dim)
self.proj_dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(axis=-1)
self.pool = nn.AvgPool2D(kernel_size=stride, stride=stride, ceil_mode=True)
self.unfold = paddle.nn.Unfold(kernel_sizes=kernel_size, strides=self.stride, paddings=self.padding)
def forward(self, x):
B, H, W, C = x.shape
v = self.v(x) # B, H, W, C
v = v.transpose([0, 3, 1, 2]) # B, C, H, W
h, w = math.ceil(H / self.stride), math.ceil(W / self.stride)
# current paddle version has bugs using nn.Unfold
v = paddle.nn.functional.unfold(v,
kernel_sizes=self.kernel_size,
paddings=self.padding,
strides=self.stride) # B, C*kernel_size*kernel_size, L(num of patches)
v = v.reshape([B,
self.num_heads,
C // self.num_heads,
self.kernel_size * self.kernel_size,
h * w])
v = v.transpose([0, 1, 4, 3, 2])
x = x.transpose([0, 3, 1, 2])
attn = self.pool(x)
attn = attn.transpose([0, 2, 3, 1]) # B, H', W', C
attn = self.attn(attn)
attn = attn.reshape([B,
h*w,
self.num_heads,
self.kernel_size * self.kernel_size,
self.kernel_size * self.kernel_size])
attn = attn.transpose([0, 2, 1, 3, 4])
attn = attn * self.scale
attn = self.softmax(attn)
attn = self.attn_dropout(attn)
z = paddle.matmul(attn, v)
z = z.transpose([0, 1, 4, 3, 2])
new_shape = [B, C * self.kernel_size * self.kernel_size, h * w]
z = z.reshape(new_shape)
# Current Paddle dose not have Fold op, we hacked our fold op, see ./fold.py for details
z = fold(z, output_size=(H, W), kernel_size=self.kernel_size,
padding=self.padding, stride=self.stride)
z = z.transpose([0, 2, 3, 1])
z = self.proj(z)
z = self.proj_dropout(z)
return z
class Outlooker(nn.Layer):
""" Outlooker
Outlooker contains norm layers, outlooker attention, mlp and droppath layers,
and residual is applied during forward.
Args:
dim: int, all heads dimension
num_heads: int, num of heads
kernel_size: int, size used in fold/unfold, and pool, default: 3
padding: int, pad used in fold/unfold, default: 1
mlp_ratio: float, ratio to multiply with dim for mlp hidden feature dim, default: 3.
stride: int, stride used in fold/unfold, and pool, default: 1
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
kernel_size,
padding,
stride=1,
num_heads=1,
mlp_ratio=3.,
attention_dropout=0.,
droppath=0.,
qkv_bias=False,
qk_scale=None):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = OutlookerAttention(dim,
num_heads,
kernel_size=kernel_size,
padding=padding,
stride=stride,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout)
self.drop_path = Droppath(droppath) if droppath > 0. else Identity()
self.norm2 = nn.LayerNorm(dim)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio))
def forward(self, x):
h = x
x = self.norm1(x)
x = self.attn(x)
x = self.drop_path(x)
x = h + x
h = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = h + x
return x
class Attention(nn.Layer):
""" Attention
Regular Attention module same as ViT
Args:
dim: int, all heads dimension
num_heads: int, num of heads
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attention_dropout=0.,
dropout=0.):
super().__init__()
self.num_heads = num_heads
self.dim_head = dim // num_heads
self.scale = qk_scale or self.dim_head ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias_attr=qkv_bias)
self.attn_dropout = nn.Dropout(attention_dropout)
self.softmax = nn.Softmax(axis=-1)
self.proj = nn.Linear(dim, dim)
self.proj_dropout = nn.Dropout(dropout)
def forward(self, x):
B, H, W, C = x.shape
qkv = self.qkv(x)
qkv = qkv.reshape([B, H * W, 3, self.num_heads, C // self.num_heads])
qkv = qkv.transpose([2, 0, 3, 1, 4])
q, k, v = qkv[0], qkv[1], qkv[2]
attn = paddle.matmul(q, k, transpose_y=True)
attn = attn * self.scale
attn = self.softmax(attn)
attn = self.attn_dropout(attn)
z = paddle.matmul(attn, v)
z = z.transpose([0, 2, 1, 3])
z = z.reshape([B, H, W, C])
z = self.proj(z)
z = self.proj_dropout(z)
return z
class Transformer(nn.Layer):
"""Transformer
Transformer module, same as ViT
Args:
dim: int, all heads dimension
num_heads: int, num of heads
mlp_ratio: float, ratio to multiply with dim for mlp hidden feature dim, default: 4.
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
attention_dropout=0,
droppath=0.):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = Attention(dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout)
self.drop_path = DropPath(droppath) if droppath > 0. else Identity()
self.norm2 = nn.LayerNorm(dim)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio))
def forward(self, x):
h = x
x = self.norm1(x)
x = self.attn(x)
x = self.drop_path(x)
x = h + x
h = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = h + x
return x
class ClassAttention(nn.Layer):
""" Class Attention
Class Attention modlee same as CaiT
Args:
dim: int, all heads dimension
dim_head: int, single heads dimension, default: None
num_heads: int, num of heads
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads=8,
dim_head=None,
qkv_bias=False,
qk_scale=None,
attention_dropout=0.,
dropout=0.):
super().__init__()
self.num_heads = num_heads
if dim_head is not None:
self.dim_head = dim_head
else:
self.dim_head = dim // num_heads
self.scale = qk_scale or self.dim_head ** -0.5
self.kv = nn.Linear(dim,
self.dim_head * self.num_heads * 2,
bias_attr=qkv_bias)
self.q = nn.Linear(dim,
self.dim_head * self.num_heads,
bias_attr=qkv_bias)
self.attn_dropout = nn.Dropout(attention_dropout)
self.proj = nn.Linear(self.dim_head * self.num_heads, dim)
self.proj_dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(axis=-1)
def forward(self, x):
B, N, C = x.shape
kv = self.kv(x)
kv = kv.reshape([B, N, 2, self.num_heads, self.dim_head])
kv = kv.transpose([2, 0, 3, 1, 4])
k, v = kv[0], kv[1]
q = self.q(x[:, :1, :])
q = q.reshape([B, self.num_heads, 1, self.dim_head])
attn = paddle.matmul(q * self.scale, k, transpose_y=True)
attn = self.softmax(attn)
attn = self.attn_dropout(attn)
cls_embed = paddle.matmul(attn, v)
cls_embed = cls_embed.transpose([0, 2, 1, 3])
cls_embed = cls_embed.reshape([B, 1, self.dim_head * self.num_heads])
cls_embed = self.proj(cls_embed)
cls_embed = self.proj_dropout(cls_embed)
return cls_embed
class ClassBlock(nn.Layer):
"""Class Attention Block (CaiT)
CaiT module
Args:
dim: int, all heads dimension
num_heads: int, num of heads
mlp_ratio: float, ratio to multiply with dim for mlp hidden feature dim, default: 4.
qkv_bias: bool, if True, qkv linear layer is using bias, default: False
qk_scale: float, if None, qk_scale is dim_head ** -0.5, default: None
attention_dropout: float, dropout rate for attention dropout, default: 0.
dropout: float, dropout rate for projection dropout, default: 0.
"""
def __init__(self,
dim,
num_heads,
dim_head=None,
mlp_ratio=4.,
qkv_bias=False,
qk_scale=None,
dropout=0.,
attention_dropout=0.,
droppath=0.):
super().__init__()
self.norm1 = nn.LayerNorm(dim)
self.attn = ClassAttention(dim,
num_heads=num_heads,
dim_head=dim_head,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout,
dropout=dropout)
self.drop_path = DropPath(droppath) if droppath > 0. else Identity()
self.norm2 = nn.LayerNorm(dim)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio),
dropout=dropout)
def forward(self, x):
cls_embed = x[:, :1]
h = self.norm1(x)
h = self.attn(h)
h = self.drop_path(h)
cls_embed = cls_embed + h
h = cls_embed
cls_embed = self.norm2(cls_embed)
cls_embed = self.mlp(cls_embed)
cls_embed = self.drop_path(cls_embed)
cls_embed = h + cls_embed
out = paddle.concat([cls_embed, x[:, 1:]], axis=1)
return out
def rand_bbox(size, lam, scale=1):
"""
get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling)
return: bounding box
"""
W = size[1] // scale
H = size[2] // scale
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
# item() get the python native dtype
return bbx1.item(), bby1.item(), bbx2.item(), bby2.item()
class VOLO(nn.Layer):
def __init__(self,
layers,
image_size=224,
in_channels=3,
num_classes=1000,
patch_size=8,
stem_hidden_dim=64,
embed_dims=None,
num_heads=None,
downsamples=None,
outlook_attention=None,
mlp_ratios=None,
qkv_bias=False,
qk_scale=None,
dropout=0.,
attention_dropout=0.,
droppath=0.,
num_post_layers=2,
return_mean=False,
return_dense=True,
mix_token=True,
pooling_scale=2,
out_kernel=3,
out_stride=2,
out_padding=1):
super().__init__()
self.num_classes = num_classes
self.patch_embed = PatchEmbedding(image_size=image_size,
stem_conv=True,
stem_stride=2,
patch_size=patch_size,
in_channels=in_channels,
hidden_dim=stem_hidden_dim,
embed_dim=embed_dims[0])
self.pos_embed = paddle.create_parameter(
shape=[1,
image_size // patch_size // pooling_scale,
image_size // patch_size // pooling_scale,
embed_dims[-1]],
dtype='float32',
default_initializer=nn.initializer.Constant(0.0))
self.pos_dropout = nn.Dropout(dropout)
layer_list = []
for i in range(len(layers)):
blocks = []
for block_idx in range(layers[i]):
block_droppath = droppath * (
block_idx + sum(layers[:i])) / (sum(layers) - 1)
if outlook_attention[i]:
blocks.append(
copy.deepcopy(
Outlooker(dim=embed_dims[i],
kernel_size=out_kernel,
padding=out_padding,
stride=out_stride,
num_heads=num_heads[i],
mlp_ratio=mlp_ratios[i],
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout,
droppath=block_droppath)))
else:
blocks.append(
copy.deepcopy(
Transformer(dim=embed_dims[i],
num_heads=num_heads[i],
mlp_ratio=mlp_ratios[i],
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout,
droppath=block_droppath))
)
stage = nn.Sequential(*blocks)
layer_list.append(stage)
if downsamples[i]:
layer_list.append(copy.deepcopy(Downsample(embed_dims[i], embed_dims[i + 1], 2)))
self.model = nn.LayerList(layer_list)
# POST Layers (from CaiT)
self.post_model = None
if num_post_layers is not None:
self.post_model = nn.LayerList([
copy.deepcopy(
ClassBlock(dim=embed_dims[-1],
num_heads=num_heads[-1],
mlp_ratio=mlp_ratios[-1],
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention_dropout=attention_dropout,
droppath=0.)
) for i in range(num_post_layers)
])
self.cls_token = paddle.create_parameter(
shape=[1, 1, embed_dims[-1]],
dtype='float32',
default_initializer=nn.initializer.TruncatedNormal(std=.02))
# Output
self.return_mean = return_mean # if True, return mean, not use class token
self.return_dense = return_dense # if True, return class token and all feature tokens
if return_dense:
assert not return_mean, "Cannot return both mean and dense"
self.mix_token = mix_token
self.pooling_scale = pooling_scale
if mix_token:
self.beta = 1.0
assert return_dense, 'return all tokens if mix_token is enabled'
if return_dense:
self.aux_head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else Identity()
self.norm = nn.LayerNorm(embed_dims[-1])
self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else Identity()
# For training:
# TODO: set pos_embed, trunc_normal
# TODO: set init weights for linear layers and layernorm layers
# TODO: set no weight decay for pos_embed and cls_token
def forward(self, x):
# Step1: patch embedding
x = self.patch_embed(x)
x = x.transpose([0, 2, 3, 1])
if self.mix_token and self.training:
lam = np.random.beta(self.beta, self.beta)
patch_h = x.shape[1] // self.pooling_scale
patch_w = x.shape[2] // self.pooling_scale
bbx1, bby1, bbx2, bby2 = rand_bbox(x.shape, lam, scale=self.pooling_scale)
temp_x = x.clone()
sbbx1 = self.pooling_scale * bbx1
sbby1 = self.pooling_scale * bby1
sbbx2 = self.pooling_scale * bbx2
sbby2 = self.pooling_scale * bby2
temp_x[:, sbbx1: sbbx2, sbby1: sbby2, :] = x.flip(axis=[0])[:, sbbx1: sbbx2, sbby1: sbby2, :]
x = temp_x
else:
bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0
# Step2: 2-stages tokens learning
for idx, block in enumerate(self.model):
if idx == 2: # add pos_embed after outlooker blocks (and a downsample layer)
x = x + self.pos_embed
x = self.pos_dropout(x)
x = block(x)
x = x.reshape([x.shape[0], -1, x.shape[-1]]) # B, H*W, C
# Step3: post layers (from CaiT)
if self.post_model is not None:
cls_token = self.cls_token.expand([x.shape[0], -1, -1])
x = paddle.concat([cls_token, x], axis=1)
for block in self.post_model:
x = block(x)
x = self.norm(x)
if self.return_mean:
return self.head(x.mean(1))
x_cls = self.head(x[:, 0])
if not self.return_dense:
return x_cls
x_aux = self.aux_head(x[:, 1:])
if not self.training:
#NOTE: pytorch Tensor.max() returns a tuple of Tensor: (values, indices), while
# paddle Tensor.max() returns a single Tensor: values
return x_cls + 0.5 * x_aux.max(1)
if self.mix_token and self.training:
x_aux = x_aux.reshape([x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]])
temp_x = x_aux.clone()
temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(axis=[0])[:, bbx1:bbx2, bby1:bby2, :]
x_aux = temp_x
x_aux = x_aux.reshape([x_aux.shape[0], patch_h*patch_w, x_aux.shape[-1]])
return x_cls, x_aux, (bbx1, bby1, bbx2, bby2)
def build_volo(config):
"""build volo model using config"""
model = VOLO(image_size=config.DATA.IMAGE_SIZE,
layers=config.MODEL.TRANS.LAYERS,
embed_dims=config.MODEL.TRANS.EMBED_DIMS,
mlp_ratios=config.MODEL.TRANS.MLP_RATIOS,
downsamples=config.MODEL.TRANS.DOWNSAMPLES,
outlook_attention=config.MODEL.TRANS.OUTLOOK_ATTENTION,
stem_hidden_dim=config.MODEL.STEM_HIDDEN_DIM,
num_heads=config.MODEL.TRANS.NUM_HEADS,
qkv_bias=config.MODEL.TRANS.QKV_BIAS,
qk_scale=config.MODEL.TRANS.QK_SCALE)
return model
|
[
"numpy.clip",
"numpy.sqrt",
"paddle.matmul",
"paddle.nn.Sequential",
"paddle.nn.LayerNorm",
"paddle.nn.LayerList",
"fold.fold",
"paddle.nn.AvgPool2D",
"numpy.random.beta",
"paddle.nn.initializer.XavierUniform",
"paddle.nn.Softmax",
"paddle.nn.Unfold",
"paddle.nn.Linear",
"paddle.nn.initializer.Normal",
"paddle.nn.BatchNorm2D",
"paddle.nn.initializer.Constant",
"numpy.int",
"paddle.nn.Dropout",
"math.ceil",
"paddle.nn.Conv2D",
"droppath.DropPath",
"paddle.nn.ReLU",
"paddle.nn.GELU",
"numpy.random.randint",
"paddle.nn.initializer.TruncatedNormal",
"paddle.nn.functional.unfold",
"paddle.concat"
] |
[((20249, 20267), 'numpy.sqrt', 'np.sqrt', (['(1.0 - lam)'], {}), '(1.0 - lam)\n', (20256, 20267), True, 'import numpy as np\n'), ((20279, 20298), 'numpy.int', 'np.int', (['(W * cut_rat)'], {}), '(W * cut_rat)\n', (20285, 20298), True, 'import numpy as np\n'), ((20311, 20330), 'numpy.int', 'np.int', (['(H * cut_rat)'], {}), '(H * cut_rat)\n', (20317, 20330), True, 'import numpy as np\n'), ((20355, 20375), 'numpy.random.randint', 'np.random.randint', (['W'], {}), '(W)\n', (20372, 20375), True, 'import numpy as np\n'), ((20385, 20405), 'numpy.random.randint', 'np.random.randint', (['H'], {}), '(H)\n', (20402, 20405), True, 'import numpy as np\n'), ((20418, 20448), 'numpy.clip', 'np.clip', (['(cx - cut_w // 2)', '(0)', 'W'], {}), '(cx - cut_w // 2, 0, W)\n', (20425, 20448), True, 'import numpy as np\n'), ((20460, 20490), 'numpy.clip', 'np.clip', (['(cy - cut_h // 2)', '(0)', 'H'], {}), '(cy - cut_h // 2, 0, H)\n', (20467, 20490), True, 'import numpy as np\n'), ((20502, 20532), 'numpy.clip', 'np.clip', (['(cx + cut_w // 2)', '(0)', 'W'], {}), '(cx + cut_w // 2, 0, W)\n', (20509, 20532), True, 'import numpy as np\n'), ((20544, 20574), 'numpy.clip', 'np.clip', (['(cy + cut_h // 2)', '(0)', 'H'], {}), '(cy + cut_h // 2, 0, H)\n', (20551, 20574), True, 'import numpy as np\n'), ((1670, 1756), 'paddle.nn.Conv2D', 'nn.Conv2D', (['in_embed_dim', 'out_embed_dim'], {'kernel_size': 'patch_size', 'stride': 'patch_size'}), '(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=\n patch_size)\n', (1679, 1756), True, 'import paddle.nn as nn\n'), ((4290, 4399), 'paddle.nn.Conv2D', 'nn.Conv2D', (['hidden_dim', 'embed_dim'], {'kernel_size': '(patch_size // stem_stride)', 'stride': '(patch_size // stem_stride)'}), '(hidden_dim, embed_dim, kernel_size=patch_size // stem_stride,\n stride=patch_size // stem_stride)\n', (4299, 4399), True, 'import paddle.nn as nn\n'), ((5229, 5315), 'paddle.nn.Linear', 'nn.Linear', (['in_features', 'hidden_features'], {'weight_attr': 'w_attr_1', 'bias_attr': 'b_attr_1'}), '(in_features, hidden_features, weight_attr=w_attr_1, bias_attr=\n b_attr_1)\n', (5238, 5315), True, 'import paddle.nn as nn\n'), ((5472, 5558), 'paddle.nn.Linear', 'nn.Linear', (['hidden_features', 'in_features'], {'weight_attr': 'w_attr_2', 'bias_attr': 'b_attr_2'}), '(hidden_features, in_features, weight_attr=w_attr_2, bias_attr=\n b_attr_2)\n', (5481, 5558), True, 'import paddle.nn as nn\n'), ((5660, 5669), 'paddle.nn.GELU', 'nn.GELU', ([], {}), '()\n', (5667, 5669), True, 'import paddle.nn as nn\n'), ((5693, 5712), 'paddle.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (5703, 5712), True, 'import paddle.nn as nn\n'), ((7760, 7799), 'paddle.nn.Linear', 'nn.Linear', (['dim', 'dim'], {'bias_attr': 'qkv_bias'}), '(dim, dim, bias_attr=qkv_bias)\n', (7769, 7799), True, 'import paddle.nn as nn\n'), ((7820, 7864), 'paddle.nn.Linear', 'nn.Linear', (['dim', '(kernel_size ** 4 * num_heads)'], {}), '(dim, kernel_size ** 4 * num_heads)\n', (7829, 7864), True, 'import paddle.nn as nn\n'), ((7895, 7924), 'paddle.nn.Dropout', 'nn.Dropout', (['attention_dropout'], {}), '(attention_dropout)\n', (7905, 7924), True, 'import paddle.nn as nn\n'), ((7946, 7965), 'paddle.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (7955, 7965), True, 'import paddle.nn as nn\n'), ((7994, 8013), 'paddle.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (8004, 8013), True, 'import paddle.nn as nn\n'), ((8037, 8056), 'paddle.nn.Softmax', 'nn.Softmax', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8047, 8056), True, 'import paddle.nn as nn\n'), ((8078, 8141), 'paddle.nn.AvgPool2D', 'nn.AvgPool2D', ([], {'kernel_size': 'stride', 'stride': 'stride', 'ceil_mode': '(True)'}), '(kernel_size=stride, stride=stride, ceil_mode=True)\n', (8090, 8141), True, 'import paddle.nn as nn\n'), ((8165, 8256), 'paddle.nn.Unfold', 'paddle.nn.Unfold', ([], {'kernel_sizes': 'kernel_size', 'strides': 'self.stride', 'paddings': 'self.padding'}), '(kernel_sizes=kernel_size, strides=self.stride, paddings=\n self.padding)\n', (8181, 8256), False, 'import paddle\n'), ((8538, 8648), 'paddle.nn.functional.unfold', 'paddle.nn.functional.unfold', (['v'], {'kernel_sizes': 'self.kernel_size', 'paddings': 'self.padding', 'strides': 'self.stride'}), '(v, kernel_sizes=self.kernel_size, paddings=self\n .padding, strides=self.stride)\n', (8565, 8648), False, 'import paddle\n'), ((9626, 9648), 'paddle.matmul', 'paddle.matmul', (['attn', 'v'], {}), '(attn, v)\n', (9639, 9648), False, 'import paddle\n'), ((9905, 10009), 'fold.fold', 'fold', (['z'], {'output_size': '(H, W)', 'kernel_size': 'self.kernel_size', 'padding': 'self.padding', 'stride': 'self.stride'}), '(z, output_size=(H, W), kernel_size=self.kernel_size, padding=self.\n padding, stride=self.stride)\n', (9909, 10009), False, 'from fold import fold\n'), ((11387, 11404), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (11399, 11404), True, 'import paddle.nn as nn\n'), ((11963, 11980), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (11975, 11980), True, 'import paddle.nn as nn\n'), ((13236, 13279), 'paddle.nn.Linear', 'nn.Linear', (['dim', '(dim * 3)'], {'bias_attr': 'qkv_bias'}), '(dim, dim * 3, bias_attr=qkv_bias)\n', (13245, 13279), True, 'import paddle.nn as nn\n'), ((13308, 13337), 'paddle.nn.Dropout', 'nn.Dropout', (['attention_dropout'], {}), '(attention_dropout)\n', (13318, 13337), True, 'import paddle.nn as nn\n'), ((13361, 13380), 'paddle.nn.Softmax', 'nn.Softmax', ([], {'axis': '(-1)'}), '(axis=-1)\n', (13371, 13380), True, 'import paddle.nn as nn\n'), ((13401, 13420), 'paddle.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (13410, 13420), True, 'import paddle.nn as nn\n'), ((13449, 13468), 'paddle.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (13459, 13468), True, 'import paddle.nn as nn\n'), ((13733, 13770), 'paddle.matmul', 'paddle.matmul', (['q', 'k'], {'transpose_y': '(True)'}), '(q, k, transpose_y=True)\n', (13746, 13770), False, 'import paddle\n'), ((13890, 13912), 'paddle.matmul', 'paddle.matmul', (['attn', 'v'], {}), '(attn, v)\n', (13903, 13912), False, 'import paddle\n'), ((14938, 14955), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (14950, 14955), True, 'import paddle.nn as nn\n'), ((15305, 15322), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (15317, 15322), True, 'import paddle.nn as nn\n'), ((16774, 16844), 'paddle.nn.Linear', 'nn.Linear', (['dim', '(self.dim_head * self.num_heads * 2)'], {'bias_attr': 'qkv_bias'}), '(dim, self.dim_head * self.num_heads * 2, bias_attr=qkv_bias)\n', (16783, 16844), True, 'import paddle.nn as nn\n'), ((16918, 16984), 'paddle.nn.Linear', 'nn.Linear', (['dim', '(self.dim_head * self.num_heads)'], {'bias_attr': 'qkv_bias'}), '(dim, self.dim_head * self.num_heads, bias_attr=qkv_bias)\n', (16927, 16984), True, 'import paddle.nn as nn\n'), ((17067, 17096), 'paddle.nn.Dropout', 'nn.Dropout', (['attention_dropout'], {}), '(attention_dropout)\n', (17077, 17096), True, 'import paddle.nn as nn\n'), ((17117, 17163), 'paddle.nn.Linear', 'nn.Linear', (['(self.dim_head * self.num_heads)', 'dim'], {}), '(self.dim_head * self.num_heads, dim)\n', (17126, 17163), True, 'import paddle.nn as nn\n'), ((17192, 17211), 'paddle.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (17202, 17211), True, 'import paddle.nn as nn\n'), ((17235, 17254), 'paddle.nn.Softmax', 'nn.Softmax', ([], {'axis': '(-1)'}), '(axis=-1)\n', (17245, 17254), True, 'import paddle.nn as nn\n'), ((17578, 17628), 'paddle.matmul', 'paddle.matmul', (['(q * self.scale)', 'k'], {'transpose_y': '(True)'}), '(q * self.scale, k, transpose_y=True)\n', (17591, 17628), False, 'import paddle\n'), ((17723, 17745), 'paddle.matmul', 'paddle.matmul', (['attn', 'v'], {}), '(attn, v)\n', (17736, 17745), False, 'import paddle\n'), ((18929, 18946), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (18941, 18946), True, 'import paddle.nn as nn\n'), ((19427, 19444), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (19439, 19444), True, 'import paddle.nn as nn\n'), ((19956, 20000), 'paddle.concat', 'paddle.concat', (['[cls_embed, x[:, 1:]]'], {'axis': '(1)'}), '([cls_embed, x[:, 1:]], axis=1)\n', (19969, 20000), False, 'import paddle\n'), ((22340, 22359), 'paddle.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (22350, 22359), True, 'import paddle.nn as nn\n'), ((24072, 24096), 'paddle.nn.LayerList', 'nn.LayerList', (['layer_list'], {}), '(layer_list)\n', (24084, 24096), True, 'import paddle.nn as nn\n'), ((25575, 25603), 'paddle.nn.LayerNorm', 'nn.LayerNorm', (['embed_dims[-1]'], {}), '(embed_dims[-1])\n', (25587, 25603), True, 'import paddle.nn as nn\n'), ((8411, 8437), 'math.ceil', 'math.ceil', (['(H / self.stride)'], {}), '(H / self.stride)\n', (8420, 8437), False, 'import math\n'), ((8439, 8465), 'math.ceil', 'math.ceil', (['(W / self.stride)'], {}), '(W / self.stride)\n', (8448, 8465), False, 'import math\n'), ((15232, 15250), 'droppath.DropPath', 'DropPath', (['droppath'], {}), '(droppath)\n', (15240, 15250), False, 'from droppath import DropPath\n'), ((19354, 19372), 'droppath.DropPath', 'DropPath', (['droppath'], {}), '(droppath)\n', (19362, 19372), False, 'from droppath import DropPath\n'), ((23860, 23882), 'paddle.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (23873, 23882), True, 'import paddle.nn as nn\n'), ((25625, 25663), 'paddle.nn.Linear', 'nn.Linear', (['embed_dims[-1]', 'num_classes'], {}), '(embed_dims[-1], num_classes)\n', (25634, 25663), True, 'import paddle.nn as nn\n'), ((26098, 26134), 'numpy.random.beta', 'np.random.beta', (['self.beta', 'self.beta'], {}), '(self.beta, self.beta)\n', (26112, 26134), True, 'import numpy as np\n'), ((27253, 27290), 'paddle.concat', 'paddle.concat', (['[cls_token, x]'], {'axis': '(1)'}), '([cls_token, x], axis=1)\n', (27266, 27290), False, 'import paddle\n'), ((3220, 3321), 'paddle.nn.Conv2D', 'nn.Conv2D', (['in_channels', 'hidden_dim'], {'kernel_size': '(7)', 'stride': 'stem_stride', 'padding': '(3)', 'bias_attr': '(False)'}), '(in_channels, hidden_dim, kernel_size=7, stride=stem_stride,\n padding=3, bias_attr=False)\n', (3229, 3321), True, 'import paddle.nn as nn\n'), ((3465, 3505), 'paddle.nn.BatchNorm2D', 'nn.BatchNorm2D', (['hidden_dim'], {'momentum': '(0.9)'}), '(hidden_dim, momentum=0.9)\n', (3479, 3505), True, 'import paddle.nn as nn\n'), ((3523, 3532), 'paddle.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3530, 3532), True, 'import paddle.nn as nn\n'), ((3550, 3640), 'paddle.nn.Conv2D', 'nn.Conv2D', (['hidden_dim', 'hidden_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias_attr': '(False)'}), '(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1,\n bias_attr=False)\n', (3559, 3640), True, 'import paddle.nn as nn\n'), ((3784, 3824), 'paddle.nn.BatchNorm2D', 'nn.BatchNorm2D', (['hidden_dim'], {'momentum': '(0.9)'}), '(hidden_dim, momentum=0.9)\n', (3798, 3824), True, 'import paddle.nn as nn\n'), ((3842, 3851), 'paddle.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3849, 3851), True, 'import paddle.nn as nn\n'), ((3869, 3959), 'paddle.nn.Conv2D', 'nn.Conv2D', (['hidden_dim', 'hidden_dim'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias_attr': '(False)'}), '(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1,\n bias_attr=False)\n', (3878, 3959), True, 'import paddle.nn as nn\n'), ((4103, 4143), 'paddle.nn.BatchNorm2D', 'nn.BatchNorm2D', (['hidden_dim'], {'momentum': '(0.9)'}), '(hidden_dim, momentum=0.9)\n', (4117, 4143), True, 'import paddle.nn as nn\n'), ((4161, 4170), 'paddle.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4168, 4170), True, 'import paddle.nn as nn\n'), ((5798, 5835), 'paddle.nn.initializer.XavierUniform', 'paddle.nn.initializer.XavierUniform', ([], {}), '()\n', (5833, 5835), False, 'import paddle\n'), ((5886, 5925), 'paddle.nn.initializer.Normal', 'paddle.nn.initializer.Normal', ([], {'std': '(1e-06)'}), '(std=1e-06)\n', (5914, 5925), False, 'import paddle\n'), ((22282, 22310), 'paddle.nn.initializer.Constant', 'nn.initializer.Constant', (['(0.0)'], {}), '(0.0)\n', (22305, 22310), True, 'import paddle.nn as nn\n'), ((25481, 25519), 'paddle.nn.Linear', 'nn.Linear', (['embed_dims[-1]', 'num_classes'], {}), '(embed_dims[-1], num_classes)\n', (25490, 25519), True, 'import paddle.nn as nn\n'), ((24890, 24930), 'paddle.nn.initializer.TruncatedNormal', 'nn.initializer.TruncatedNormal', ([], {'std': '(0.02)'}), '(std=0.02)\n', (24920, 24930), True, 'import paddle.nn as nn\n')]
|
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
# 仿射变换(图像位置校正)
def img_three(imgPath):
# ---------------------------三点得到一个变换矩阵 ---------------------------
"""
三点确定一个平面,通过确定三个点的关系来得到转换矩阵
然后再通过warpAffine来进行变换
"""
img = cv2.imread(imgPath)
rows,cols,_ = img.shape
points1 = np.float32([[50,50],[200,50],[50,200]])
points2 = np.float32([[10,100],[200,50],[100,250]])
matrix = cv2.getAffineTransform(points1,points2)
output = cv2.warpAffine(img,matrix,(cols,rows))
cv2.imshow('input1',img)
cv2.imshow('output1',output)
cv2.waitKey(0)
cv2.destroyAllWindows()
def img_four(imgPath):
# ---------------------------四点得到一个变换矩阵---------------------------
"""
进行透视变换
可以先用四个点来确定一个3*3的变换矩阵(cv2.getPerspectiveTransform)
然后通过cv2.warpPerspective和上述矩阵对图像进行变换
"""
img = cv2.imread(imgPath)
rows,cols,_ = img.shape
points1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
points2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
matrix = cv2.getPerspectiveTransform(points1,points2)
# 将四个点组成的平面转换成另四个点组成的一个平面
output = cv2.warpPerspective(img, matrix, (cols, rows))
# 通过warpPerspective函数来进行变换
cv2.imshow('input2',img)
cv2.imshow('output2',output)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
imgPath = 'src/python-opencv/a.jpg'
# img_three(imgPath)
img_four(imgPath)
|
[
"cv2.warpAffine",
"cv2.getPerspectiveTransform",
"cv2.imshow",
"cv2.waitKey",
"cv2.warpPerspective",
"cv2.destroyAllWindows",
"cv2.getAffineTransform",
"cv2.imread",
"numpy.float32"
] |
[((260, 279), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (270, 279), False, 'import cv2\n'), ((322, 366), 'numpy.float32', 'np.float32', (['[[50, 50], [200, 50], [50, 200]]'], {}), '([[50, 50], [200, 50], [50, 200]])\n', (332, 366), True, 'import numpy as np\n'), ((376, 422), 'numpy.float32', 'np.float32', (['[[10, 100], [200, 50], [100, 250]]'], {}), '([[10, 100], [200, 50], [100, 250]])\n', (386, 422), True, 'import numpy as np\n'), ((432, 472), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['points1', 'points2'], {}), '(points1, points2)\n', (454, 472), False, 'import cv2\n'), ((485, 526), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'matrix', '(cols, rows)'], {}), '(img, matrix, (cols, rows))\n', (499, 526), False, 'import cv2\n'), ((529, 554), 'cv2.imshow', 'cv2.imshow', (['"""input1"""', 'img'], {}), "('input1', img)\n", (539, 554), False, 'import cv2\n'), ((558, 587), 'cv2.imshow', 'cv2.imshow', (['"""output1"""', 'output'], {}), "('output1', output)\n", (568, 587), False, 'import cv2\n'), ((591, 605), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (602, 605), False, 'import cv2\n'), ((610, 633), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (631, 633), False, 'import cv2\n'), ((872, 891), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (882, 891), False, 'import cv2\n'), ((934, 990), 'numpy.float32', 'np.float32', (['[[56, 65], [368, 52], [28, 387], [389, 390]]'], {}), '([[56, 65], [368, 52], [28, 387], [389, 390]])\n', (944, 990), True, 'import numpy as np\n'), ((998, 1050), 'numpy.float32', 'np.float32', (['[[0, 0], [300, 0], [0, 300], [300, 300]]'], {}), '([[0, 0], [300, 0], [0, 300], [300, 300]])\n', (1008, 1050), True, 'import numpy as np\n'), ((1058, 1103), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['points1', 'points2'], {}), '(points1, points2)\n', (1085, 1103), False, 'import cv2\n'), ((1146, 1192), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'matrix', '(cols, rows)'], {}), '(img, matrix, (cols, rows))\n', (1165, 1192), False, 'import cv2\n'), ((1229, 1254), 'cv2.imshow', 'cv2.imshow', (['"""input2"""', 'img'], {}), "('input2', img)\n", (1239, 1254), False, 'import cv2\n'), ((1258, 1287), 'cv2.imshow', 'cv2.imshow', (['"""output2"""', 'output'], {}), "('output2', output)\n", (1268, 1287), False, 'import cv2\n'), ((1291, 1305), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1302, 1305), False, 'import cv2\n'), ((1310, 1333), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1331, 1333), False, 'import cv2\n')]
|
import matplotlib.pyplot as plt
import numpy as np
def plot(ac):
ac=np.array(ac)
ac=ac.reshape((28,28))
ac=[[int(ac2+0.48) for ac2 in ac1] for ac1 in ac]
plt.imshow(ac)
f=np.load("model.npz",allow_pickle=True)
f2=np.load("model2.npz",allow_pickle=True)
f3=np.load("model3.npz",allow_pickle=True)
f4=np.load("model4.npz",allow_pickle=True)
f5=np.load("model5.npz",allow_pickle=True)
model=f["model"]
print("exp model1")
model2=f2["model"]
print("exp model2")
model3=f3["model"]
print("exp model3")
model4=f4["model"]
print("exp model4")
model5=f5["model"]
print("exp model5")
models=[model,model2,model3,model4,model5]
t=f["t"]
t0=np.mean(t[np.where(t<0.5)])
t1=np.mean(t[np.where(t>0.5)])
def runmodel(inp,model):
x=inp
for l in model:
x=np.dot(x,l)
x=np.maximum(x,0)
x=np.mean(x,axis=-1)
return x
def lossbybool(inp):
"""input either 0 or 1"""
dt=t1-t0
inp/=dt
inp+=t0
ret=(runmodel(inp,models[0])-m7[0])**2
for zw,mea in zip(models[1:],m7):
ret+=(runmodel(inp,models[0])-mea)**2
return ret/len(m7)
m7=[np.mean(runmodel(t,m)) for m in models]
print("done loading")
|
[
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.where",
"numpy.array",
"numpy.dot",
"numpy.maximum",
"numpy.load"
] |
[((185, 224), 'numpy.load', 'np.load', (['"""model.npz"""'], {'allow_pickle': '(True)'}), "('model.npz', allow_pickle=True)\n", (192, 224), True, 'import numpy as np\n'), ((227, 267), 'numpy.load', 'np.load', (['"""model2.npz"""'], {'allow_pickle': '(True)'}), "('model2.npz', allow_pickle=True)\n", (234, 267), True, 'import numpy as np\n'), ((270, 310), 'numpy.load', 'np.load', (['"""model3.npz"""'], {'allow_pickle': '(True)'}), "('model3.npz', allow_pickle=True)\n", (277, 310), True, 'import numpy as np\n'), ((313, 353), 'numpy.load', 'np.load', (['"""model4.npz"""'], {'allow_pickle': '(True)'}), "('model4.npz', allow_pickle=True)\n", (320, 353), True, 'import numpy as np\n'), ((356, 396), 'numpy.load', 'np.load', (['"""model5.npz"""'], {'allow_pickle': '(True)'}), "('model5.npz', allow_pickle=True)\n", (363, 396), True, 'import numpy as np\n'), ((73, 85), 'numpy.array', 'np.array', (['ac'], {}), '(ac)\n', (81, 85), True, 'import numpy as np\n'), ((167, 181), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ac'], {}), '(ac)\n', (177, 181), True, 'import matplotlib.pyplot as plt\n'), ((801, 820), 'numpy.mean', 'np.mean', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (808, 820), True, 'import numpy as np\n'), ((656, 673), 'numpy.where', 'np.where', (['(t < 0.5)'], {}), '(t < 0.5)\n', (664, 673), True, 'import numpy as np\n'), ((687, 704), 'numpy.where', 'np.where', (['(t > 0.5)'], {}), '(t > 0.5)\n', (695, 704), True, 'import numpy as np\n'), ((763, 775), 'numpy.dot', 'np.dot', (['x', 'l'], {}), '(x, l)\n', (769, 775), True, 'import numpy as np\n'), ((781, 797), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (791, 797), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import time
from datetime import timedelta
import os
# Importing a helper module for the functions of the Inception model.
import inception
import cifar10
from cifar10 import num_classes
from inception import transfer_values_cache
#Importing the color map for plotting each class with different color.
import matplotlib.cm as color_map
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix
cifar10.data_path = "data/CIFAR-10/"
cifar10.maybe_download_and_extract()
class_names = cifar10.load_class_names()
print(class_names)
print('Loading the training set...')
training_images, training_cls_integers, trainig_one_hot_labels = cifar10.load_training_data()
print('Loading the test set...')
testing_images, testing_cls_integers, testing_one_hot_labels = cifar10.load_test_data()
print("-Number of images in the training set:\t\t{}".format(len(training_images)))
print("-Number of images in the testing set:\t\t{}".format(len(testing_images)))
def plot_imgs(imgs, true_class, predicted_class=None):
assert len(imgs) == len(true_class)
# Creating a placeholders for 9 subplots
fig, axes = plt.subplots(3, 3)
# Adjustting spacing.
if predicted_class is None:
hspace = 0.3
else:
hspace = 0.6
fig.subplots_adjust(hspace=hspace, wspace=0.3)
for i, ax in enumerate(axes.flat):
# There may be less than 9 images, ensure it doesn't crash.
if i < len(imgs):
# Plot image.
ax.imshow(imgs[i],
interpolation='nearest')
# Get the actual name of the true class from the class_names array
true_class_name = class_names[true_class[i]]
# Showing labels for the predicted and true classes
if predicted_class is None:
xlabel = "True: {0}".format(true_class_name)
else:
# Name of the predicted class.
predicted_class_name = class_names[predicted_class[i]]
xlabel = "True: {0}\nPred: {1}".format(true_class_name, predicted_class_name)
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# get the first 9 images in the test set
imgs = testing_images[0:9]
# Get the integer representation of the true class.
true_class = testing_cls_integers[0:9]
# Plotting the images
plot_imgs(imgs=imgs, true_class=true_class)
print('Downloading the pretrained inception v3 model')
inception.maybe_download()
# Loading the inception model so that we can inialized it with the pretrained weights and customize for our model
inception_model = inception.Inception()
file_path_train = os.path.join(cifar10.data_path, 'inception_cifar10_train.pkl')
file_path_test = os.path.join(cifar10.data_path, 'inception_cifar10_test.pkl')
print("Processing Inception transfer-values for the training images of Cifar-10 ...")
# First we need to scale the imgs to fit the Inception model requirements as it requires all pixels to be from 0 to 255,
# while our training examples of the CIFAR-10 pixels are between 0.0 and 1.0
imgs_scaled = training_images * 255.0
# Checking if the transfer-values for our training images are already calculated and loading them, if not calcaulate and save them.
transfer_values_training = transfer_values_cache(cache_path=file_path_train,
images=imgs_scaled,
model=inception_model)
print("Processing Inception transfer-values for the testing images of Cifar-10 ...")
# First we need to scale the imgs to fit the Inception model requirements as it requires all pixels to be from 0 to 255,
# while our training examples of the CIFAR-10 pixels are between 0.0 and 1.0
imgs_scaled = testing_images * 255.0
# Checking if the transfer-values for our training images are already calculated and loading them, if not calcaulate and save them.
transfer_values_testing = transfer_values_cache(cache_path=file_path_test,
images=imgs_scaled,
model=inception_model)
print('Shape of the training set transfer values...')
print(transfer_values_training.shape)
print('Shape of the testing set transfer values...')
print(transfer_values_testing.shape)
def plot_transferValues(ind):
print("Original input image:")
# Plot the image at index ind of the test set.
plt.imshow(testing_images[ind], interpolation='nearest')
plt.show()
print("Transfer values using Inception model:")
# Visualize the transfer values as an image.
transferValues_img = transfer_values_testing[ind]
transferValues_img = transferValues_img.reshape((32, 64))
# Plotting the transfer values image.
plt.imshow(transferValues_img, interpolation='nearest', cmap='Reds')
plt.show()
plot_transferValues(ind=15)
pca_obj = PCA(n_components=2)
subset_transferValues = transfer_values_training[0:3000]
cls_integers = testing_cls_integers[0:3000]
print('Shape of a subset form the transfer values...')
print(subset_transferValues.shape)
reduced_transferValues = pca_obj.fit_transform(subset_transferValues)
print('Shape of the reduced version of the transfer values...')
print(reduced_transferValues.shape)
def plot_reduced_transferValues(transferValues, cls_integers):
# Create a color-map with a different color for each class.
c_map = color_map.rainbow(np.linspace(0.0, 1.0, num_classes))
# Getting the color for each sample.
colors = c_map[cls_integers]
# Getting the x and y values.
x_val = transferValues[:, 0]
y_val = transferValues[:, 1]
# Plot the transfer values in a scatter plot
plt.scatter(x_val, y_val, color=colors)
plt.show()
plot_reduced_transferValues(reduced_transferValues, cls_integers)
pca_obj = PCA(n_components=50)
transferValues_50d = pca_obj.fit_transform(subset_transferValues)
tsne_obj = TSNE(n_components=2)
reduced_transferValues = tsne_obj.fit_transform(transferValues_50d)
print('Shape of the reduced version of the transfer values using t-SNE method...')
print(reduced_transferValues.shape)
plot_reduced_transferValues(reduced_transferValues, cls_integers)
transferValues_arrLength = inception_model.transfer_len
input_values = tf.placeholder(tf.float32, shape=[None, transferValues_arrLength], name='input_values')
y_actual = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_actual')
y_actual_cls = tf.argmax(y_actual, axis=1)
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
def new_fc_layer(input, # The previous layer.
num_inputs, # Num. inputs from prev. layer.
num_outputs, # Num. outputs.
use_relu=True): # Use Rectified Linear Unit (ReLU)?
# Create new weights and biases.
weights = new_weights(shape=[num_inputs, num_outputs])
biases = new_biases(length=num_outputs)
# Calculate the layer as the matrix multiplication of
# the input and weights, and then add the bias-values.
layer = tf.matmul(input, weights) + biases
# Use ReLU?
if use_relu:
layer = tf.nn.relu(layer)
return layer
# First fully-connected layer.
layer_fc1 = new_fc_layer(input=input_values,
num_inputs=2048,
num_outputs=1024,
use_relu=True)
# Second fully-connected layer.
layer_fc2 = new_fc_layer(input=layer_fc1,
num_inputs=1024,
num_outputs=num_classes,
use_relu=False)
# Predicted class-label.
y_predicted = tf.nn.softmax(layer_fc2)
# Cross-entropy for the classification of each image.
cross_entropy = \
tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
labels=y_actual)
# Loss aka. cost-measure.
# This is the scalar value that must be minimized.
loss = tf.reduce_mean(cross_entropy)
step = tf.Variable(initial_value=0,
name='step', trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss, step)
y_predicted_cls = tf.argmax(y_predicted, axis=1)
correct_prediction = tf.equal(y_predicted_cls, y_actual_cls)
model_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.global_variables_initializer())
training_batch_size = 32
def select_random_batch():
# Number of images (transfer-values) in the training-set.
num_imgs = len(transfer_values_training)
# Create a random index.
ind = np.random.choice(num_imgs,
size=training_batch_size,
replace=False)
# Use the random index to select random x and y-values.
# We use the transfer-values instead of images as x-values.
x_batch = transfer_values_training[ind]
y_batch = trainig_one_hot_labels[ind]
return x_batch, y_batch
def optimize(num_iterations):
for i in range(num_iterations):
# Selectin a random batch of images for training
# where the transfer values of the images will be stored in input_batch
# and the actual labels of those batch of images will be stored in y_actual_batch
input_batch, y_actual_batch = select_random_batch()
# storing the batch in a dict with the proper names
# such as the input placeholder variables that we define above.
feed_dict = {input_values: input_batch,
y_actual: y_actual_batch}
# Now we call the optimizer of this batch of images
# TensorFlow will automatically feed the values of the dict we created above
# to the model input placeholder variables that we defined above.
i_global, _ = session.run([step, optimizer],
feed_dict=feed_dict)
# print the accuracy every 100 steps.
if (i_global % 100 == 0) or (i == num_iterations - 1):
# Calculate the accuracy on the training-batch.
batch_accuracy = session.run(model_accuracy,
feed_dict=feed_dict)
msg = "Step: {0:>6}, Training Accuracy: {1:>6.1%}"
print(msg.format(i_global, batch_accuracy))
def plot_errors(cls_predicted, cls_correct):
# cls_predicted is an array of the predicted class-number for
# all images in the test-set.
# cls_correct is an array with boolean values to indicate
# whether is the model predicted the correct class or not.
# Negate the boolean array.
incorrect = (cls_correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
incorrectly_classified_images = testing_images[incorrect]
# Get the predicted classes for those images.
cls_predicted = cls_predicted[incorrect]
# Get the true classes for those images.
true_class = testing_cls_integers[incorrect]
n = min(9, len(incorrectly_classified_images))
# Plot the first n images.
plot_imgs(imgs=incorrectly_classified_images[0:n],
true_class=true_class[0:n],
predicted_class=cls_predicted[0:n])
def plot_confusionMatrix(cls_predicted):
# cls_predicted array of all the predicted
# classes numbers in the test.
# Call the confucion matrix of sklearn
cm = confusion_matrix(y_true=testing_cls_integers,
y_pred=cls_predicted)
# Printing the confusion matrix
for i in range(num_classes):
# Append the class-name to each line.
class_name = "({}) {}".format(i, class_names[i])
print(cm[i, :], class_name)
# labeling each column of the confusion matrix with the class number
cls_numbers = [" ({0})".format(i) for i in range(num_classes)]
print("".join(cls_numbers))
# Split the data-set in batches of this size to limit RAM usage.
batch_size = 128
def predict_class(transferValues, labels, cls_true):
# Number of images.
num_imgs = len(transferValues)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_predicted = np.zeros(shape=num_imgs, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_imgs:
# The ending index for the next batch is denoted j.
j = min(i + batch_size, num_imgs)
# Create a feed-dict with the images and labels
# between index i and j.
feed_dict = {input_values: transferValues[i:j],
y_actual: labels[i:j]}
# Calculate the predicted class using TensorFlow.
cls_predicted[i:j] = session.run(y_predicted_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Create a boolean array whether each image is correctly classified.
correct = [a == p for a, p in zip(cls_true, cls_predicted)]
print(type(correct))
return correct, cls_predicted
def predict_class_test():
return predict_class(transferValues = transfer_values_testing,
labels = trainig_one_hot_labels,
cls_true = training_cls_integers)
def classification_accuracy(correct):
# When averaging a boolean array, False means 0 and True means 1.
# So we are calculating: number of True / len(correct) which is
# the same as the classification accuracy.
# Return the classification accuracy
# and the number of correct classifications.
return np.mean(correct), np.sum(correct)
def test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# For all the images in the test-set,
# calculate the predicted classes and whether they are correct.
correct, cls_pred = predict_class_test()
print(type(correct))
# Classification accuracypredict_class_test and the number of correct classifications.
accuracy, num_correct = classification_accuracy(correct)
# Number of images being classified.
num_images = len(correct)
# Print the accuracy.
msg = "Test set accuracy: {0:.1%} ({1} / {2})"
print(msg.format(accuracy, num_correct, num_images))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_errors(cls_predicted=cls_pred, cls_correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusionMatrix(cls_predicted=cls_pred)
test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
optimize(num_iterations=1000)
test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
|
[
"tensorflow.equal",
"tensorflow.nn.softmax",
"cifar10.load_training_data",
"inception.Inception",
"tensorflow.reduce_mean",
"tensorflow.cast",
"matplotlib.pyplot.imshow",
"numpy.mean",
"sklearn.decomposition.PCA",
"tensorflow.placeholder",
"tensorflow.Session",
"inception.maybe_download",
"sklearn.manifold.TSNE",
"numpy.linspace",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"matplotlib.pyplot.scatter",
"tensorflow.matmul",
"cifar10.load_test_data",
"tensorflow.train.AdamOptimizer",
"sklearn.metrics.confusion_matrix",
"inception.transfer_values_cache",
"tensorflow.Variable",
"numpy.random.choice",
"tensorflow.truncated_normal",
"matplotlib.pyplot.show",
"tensorflow.nn.relu",
"os.path.join",
"tensorflow.global_variables_initializer",
"numpy.sum",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.constant",
"cifar10.load_class_names",
"cifar10.maybe_download_and_extract",
"matplotlib.pyplot.subplots"
] |
[((574, 610), 'cifar10.maybe_download_and_extract', 'cifar10.maybe_download_and_extract', ([], {}), '()\n', (608, 610), False, 'import cifar10\n'), ((626, 652), 'cifar10.load_class_names', 'cifar10.load_class_names', ([], {}), '()\n', (650, 652), False, 'import cifar10\n'), ((775, 803), 'cifar10.load_training_data', 'cifar10.load_training_data', ([], {}), '()\n', (801, 803), False, 'import cifar10\n'), ((901, 925), 'cifar10.load_test_data', 'cifar10.load_test_data', ([], {}), '()\n', (923, 925), False, 'import cifar10\n'), ((2629, 2655), 'inception.maybe_download', 'inception.maybe_download', ([], {}), '()\n', (2653, 2655), False, 'import inception\n'), ((2789, 2810), 'inception.Inception', 'inception.Inception', ([], {}), '()\n', (2808, 2810), False, 'import inception\n'), ((2830, 2892), 'os.path.join', 'os.path.join', (['cifar10.data_path', '"""inception_cifar10_train.pkl"""'], {}), "(cifar10.data_path, 'inception_cifar10_train.pkl')\n", (2842, 2892), False, 'import os\n'), ((2910, 2971), 'os.path.join', 'os.path.join', (['cifar10.data_path', '"""inception_cifar10_test.pkl"""'], {}), "(cifar10.data_path, 'inception_cifar10_test.pkl')\n", (2922, 2971), False, 'import os\n'), ((3456, 3553), 'inception.transfer_values_cache', 'transfer_values_cache', ([], {'cache_path': 'file_path_train', 'images': 'imgs_scaled', 'model': 'inception_model'}), '(cache_path=file_path_train, images=imgs_scaled, model\n =inception_model)\n', (3477, 3553), False, 'from inception import transfer_values_cache\n'), ((4122, 4218), 'inception.transfer_values_cache', 'transfer_values_cache', ([], {'cache_path': 'file_path_test', 'images': 'imgs_scaled', 'model': 'inception_model'}), '(cache_path=file_path_test, images=imgs_scaled, model=\n inception_model)\n', (4143, 4218), False, 'from inception import transfer_values_cache\n'), ((5073, 5092), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (5076, 5092), False, 'from sklearn.decomposition import PCA\n'), ((6016, 6036), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(50)'}), '(n_components=50)\n', (6019, 6036), False, 'from sklearn.decomposition import PCA\n'), ((6114, 6134), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (6118, 6134), False, 'from sklearn.manifold import TSNE\n'), ((6463, 6555), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, transferValues_arrLength]', 'name': '"""input_values"""'}), "(tf.float32, shape=[None, transferValues_arrLength], name=\n 'input_values')\n", (6477, 6555), True, 'import tensorflow as tf\n'), ((6562, 6632), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_classes]', 'name': '"""y_actual"""'}), "(tf.float32, shape=[None, num_classes], name='y_actual')\n", (6576, 6632), True, 'import tensorflow as tf\n'), ((6648, 6675), 'tensorflow.argmax', 'tf.argmax', (['y_actual'], {'axis': '(1)'}), '(y_actual, axis=1)\n', (6657, 6675), True, 'import tensorflow as tf\n'), ((7953, 7977), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['layer_fc2'], {}), '(layer_fc2)\n', (7966, 7977), True, 'import tensorflow as tf\n'), ((8055, 8129), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'layer_fc2', 'labels': 'y_actual'}), '(logits=layer_fc2, labels=y_actual)\n', (8094, 8129), True, 'import tensorflow as tf\n'), ((8263, 8292), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (8277, 8292), True, 'import tensorflow as tf\n'), ((8301, 8359), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'name': '"""step"""', 'trainable': '(False)'}), "(initial_value=0, name='step', trainable=False)\n", (8312, 8359), True, 'import tensorflow as tf\n'), ((8482, 8512), 'tensorflow.argmax', 'tf.argmax', (['y_predicted'], {'axis': '(1)'}), '(y_predicted, axis=1)\n', (8491, 8512), True, 'import tensorflow as tf\n'), ((8534, 8573), 'tensorflow.equal', 'tf.equal', (['y_predicted_cls', 'y_actual_cls'], {}), '(y_predicted_cls, y_actual_cls)\n', (8542, 8573), True, 'import tensorflow as tf\n'), ((8659, 8671), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8669, 8671), True, 'import tensorflow as tf\n'), ((1250, 1268), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {}), '(3, 3)\n', (1262, 1268), True, 'import matplotlib.pyplot as plt\n'), ((2334, 2344), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2342, 2344), True, 'import matplotlib.pyplot as plt\n'), ((4611, 4667), 'matplotlib.pyplot.imshow', 'plt.imshow', (['testing_images[ind]'], {'interpolation': '"""nearest"""'}), "(testing_images[ind], interpolation='nearest')\n", (4621, 4667), True, 'import matplotlib.pyplot as plt\n'), ((4672, 4682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4680, 4682), True, 'import matplotlib.pyplot as plt\n'), ((4949, 5017), 'matplotlib.pyplot.imshow', 'plt.imshow', (['transferValues_img'], {'interpolation': '"""nearest"""', 'cmap': '"""Reds"""'}), "(transferValues_img, interpolation='nearest', cmap='Reds')\n", (4959, 5017), True, 'import matplotlib.pyplot as plt\n'), ((5022, 5032), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5030, 5032), True, 'import matplotlib.pyplot as plt\n'), ((5882, 5921), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_val', 'y_val'], {'color': 'colors'}), '(x_val, y_val, color=colors)\n', (5893, 5921), True, 'import matplotlib.pyplot as plt\n'), ((5926, 5936), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5934, 5936), True, 'import matplotlib.pyplot as plt\n'), ((8607, 8646), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (8614, 8646), True, 'import tensorflow as tf\n'), ((8684, 8717), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8715, 8717), True, 'import tensorflow as tf\n'), ((8920, 8987), 'numpy.random.choice', 'np.random.choice', (['num_imgs'], {'size': 'training_batch_size', 'replace': '(False)'}), '(num_imgs, size=training_batch_size, replace=False)\n', (8936, 8987), True, 'import numpy as np\n'), ((11696, 11763), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', ([], {'y_true': 'testing_cls_integers', 'y_pred': 'cls_predicted'}), '(y_true=testing_cls_integers, y_pred=cls_predicted)\n', (11712, 11763), False, 'from sklearn.metrics import confusion_matrix\n'), ((12511, 12549), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_imgs', 'dtype': 'np.int'}), '(shape=num_imgs, dtype=np.int)\n', (12519, 12549), True, 'import numpy as np\n'), ((5616, 5650), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'num_classes'], {}), '(0.0, 1.0, num_classes)\n', (5627, 5650), True, 'import numpy as np\n'), ((6724, 6763), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.05)'}), '(shape, stddev=0.05)\n', (6743, 6763), True, 'import tensorflow as tf\n'), ((6813, 6846), 'tensorflow.constant', 'tf.constant', (['(0.05)'], {'shape': '[length]'}), '(0.05, shape=[length])\n', (6824, 6846), True, 'import tensorflow as tf\n'), ((7358, 7383), 'tensorflow.matmul', 'tf.matmul', (['input', 'weights'], {}), '(input, weights)\n', (7367, 7383), True, 'import tensorflow as tf\n'), ((7443, 7460), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (7453, 7460), True, 'import tensorflow as tf\n'), ((8399, 8443), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (8421, 8443), True, 'import tensorflow as tf\n'), ((14099, 14115), 'numpy.mean', 'np.mean', (['correct'], {}), '(correct)\n', (14106, 14115), True, 'import numpy as np\n'), ((14117, 14132), 'numpy.sum', 'np.sum', (['correct'], {}), '(correct)\n', (14123, 14132), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from __future__ import print_function
import sys
sys.path.insert(0, "/home/liangjiang/code/keras-jl-mean/")
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.callbacks import EarlyStopping, LearningRateScheduler
from keras.regularizers import l2, activity_l1l2
from keras import backend as K
import argparse
import json
import numpy as np
import matplotlib.pyplot as plt
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument("weight_path", action = 'store',
help = "Path of learned weight")
parser.add_argument("--layer", "-l", action = 'store', type = int, default = 1,
dest = 'layer', help = "Layer to be visualized")
return parser
def random_crop(X_train, size = (3, 3), times = 10):
num_samples = times * X_train.shape[0]
print("num_samples: ", num_samples)
row = X_train.shape[2]
col = X_train.shape[3]
crop_row = size[0]
crop_col = size[1]
random_sample = np.random.randint(0, X_train.shape[0], size = num_samples)
print("random_sample: ", random_sample)
random_col_index = np.random.randint(0, row - crop_row + 1, size = num_samples)
print("random_col_index: ", random_col_index)
random_row_index = np.random.randint(0, col - crop_col, size = num_samples)
print("random_row_index: ", random_row_index)
# cropped_x_cols = cropped_x.shape[2]
# cropped_x_rows = cropped_x.shape[3]
crop_x = np.zeros((num_samples, X_train.shape[1], crop_row, crop_col))
for i in range(num_samples):
crop_x[i, :, :, :] = X_train[random_sample[i], :,
random_row_index[i] : random_row_index[i] + crop_row,
random_col_index[i] : random_col_index[i] + crop_col]
# print("crop_x[0]: ", crop_x[0, :, :, :])
return crop_x
def main():
parser = argparser()
args = parser.parse_args()
weight_path = args.weight_path
layer = args.layer
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
batch_size = 32
nb_classes = 10
model = Sequential()
print("Making model")
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=(img_channels, img_rows, img_cols),
W_regularizer = l2(l = 0.),
b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3,
W_regularizer = l2(l = 0.),
b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same',
W_regularizer = l2(l = 0.),
b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3,
W_regularizer = l2(l = 0.),
b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, W_regularizer = l2(l = 0.), b_regularizer = l2(l = 0.)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, W_regularizer = l2(l = 0.), b_regularizer = l2(l = 0.)))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
print("Compiling model")
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
print("Going to visualize layer ", layer)
print(model.layers[layer].get_config())
# load learned weight
print("Loading weight")
model.load_weights(weight_path)
weight = model.layers[0].get_weights()
print("shape of weight: ", weight[0].shape)
# generate function to get output at layer to be visualized
for i in range(len(model.layers)):
print(i)
input = model.layers[0].input
output = model.layers[layer].output
func = K.function([K.learning_phase()] + [input], output)
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# im = X_train[100, :, :, :]
# im = np.swapaxes(im, 0, 2)
# im = np.swapaxes(im, 0, 1)
# plt.figure(1)
# plt.imshow(im)
# plt.show()
# sys.exit()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_test.shape[0], 'test samples')
crop_x = X_test
# crop_x = random_crop(X_test, size = (9, 9), times = 10)
print("shape of crop_x: ", crop_x.shape)
im = crop_x[0, :, :, :]
# print("crop_x[0]", im)
im = im * 255
im = im.astype(np.uint8)
# print("im of uint8: ", im)
fig = plt.figure()
# plt.imshow(im)
# plt.show()
# sys.exit()
# get output from layer to be visualized
# print(X_test[50][1])
activation = func([0] + [crop_x])
print("shape of activation: ", activation.shape)
# max_sample_index = np.argmax(activation, axis = 0)
# max_sample_index = max_sample_index.squeeze()
# np.savetxt("max_sample_index", max_sample_index, fmt = "%d")
# print("shape of max_sample_index: ", max_sample_index.shape)
# # print("max_29", activation[:, 29, :, :])
# for i in range(32):
# ax = fig.add_subplot(8, 4, i + 1, frameon=False)
# ax.set_xticks([])
# ax.set_yticks([])
# ax.xaxis.set_ticks_position('none')
# ax.yaxis.set_ticks_position('none')
# im = crop_x[max_sample_index[i], :, :, :]
# im = np.swapaxes(im, 0, 2)
# im = np.swapaxes(im, 1, 0)
# # print("shape of im: ", im.shape)
# im = im * 255
# im = im.astype(np.uint8)
# ax.imshow(im)
# plt.show()
if activation.ndim == 4:
num = activation.shape[0]
print("num: ", num)
col = activation.shape[1]
print("col: ", col)
map_size = activation.shape[2] * activation.shape[3]
print("map_size: ", map_size)
# temp = np.mean(activation, axis = -1)
# matrix_activation = np.mean(temp, axis = -1)
flatten_activation = np.reshape(activation, (num, col * map_size))
print("shape of flatten_activation: ", flatten_activation.shape)
trans_activation = flatten_activation.transpose()
print("shape of trans_activation: ", trans_activation.shape)
reshape_activation = np.reshape(trans_activation, (col, num * map_size))
print("shape of reshape_activation: ", reshape_activation.shape)
matrix_activation = reshape_activation.transpose()
print("shape of matrix_activation: ", matrix_activation.shape)
mean = np.mean(matrix_activation, axis = 0, keepdims = True)
# mean_p = T.printing.Print('mean')(mean)
std = np.std(matrix_activation, axis = 0, keepdims = True)
normalized_output = (matrix_activation - mean) / std
covariance = np.dot(np.transpose(normalized_output), normalized_output) / num / map_size
else:
num = activation.shape[0]
mean = np.mean(activation, axis = 0, keepdims = True)
# mean_p = T.printing.Print('mean')(mean)
std = np.std(activation, axis = 0, keepdims = True)
normalized_output = (activation - mean) / std
covariance = np.dot(np.transpose(normalized_output), normalized_output) / num
np.savetxt("mean", mean, fmt = "%f")
np.savetxt("std", std, fmt = "%f")
np.savetxt("covariance", covariance, fmt = "%f")
if "__main__" == __name__:
main()
|
[
"sys.path.insert",
"keras.backend.learning_phase",
"keras.optimizers.SGD",
"keras.layers.Activation",
"numpy.mean",
"numpy.reshape",
"argparse.ArgumentParser",
"keras.layers.Flatten",
"keras.datasets.cifar10.load_data",
"keras.layers.MaxPooling2D",
"keras.models.Sequential",
"numpy.savetxt",
"numpy.std",
"keras.regularizers.l2",
"numpy.transpose",
"keras.layers.Dropout",
"numpy.random.randint",
"numpy.zeros",
"matplotlib.pyplot.figure"
] |
[((71, 129), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/liangjiang/code/keras-jl-mean/"""'], {}), "(0, '/home/liangjiang/code/keras-jl-mean/')\n", (86, 129), False, 'import sys\n'), ((734, 759), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (757, 759), False, 'import argparse\n'), ((1282, 1338), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]'], {'size': 'num_samples'}), '(0, X_train.shape[0], size=num_samples)\n', (1299, 1338), True, 'import numpy as np\n'), ((1408, 1466), 'numpy.random.randint', 'np.random.randint', (['(0)', '(row - crop_row + 1)'], {'size': 'num_samples'}), '(0, row - crop_row + 1, size=num_samples)\n', (1425, 1466), True, 'import numpy as np\n'), ((1543, 1597), 'numpy.random.randint', 'np.random.randint', (['(0)', '(col - crop_col)'], {'size': 'num_samples'}), '(0, col - crop_col, size=num_samples)\n', (1560, 1597), True, 'import numpy as np\n'), ((1748, 1809), 'numpy.zeros', 'np.zeros', (['(num_samples, X_train.shape[1], crop_row, crop_col)'], {}), '((num_samples, X_train.shape[1], crop_row, crop_col))\n', (1756, 1809), True, 'import numpy as np\n'), ((2375, 2387), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2385, 2387), False, 'from keras.models import Sequential\n'), ((3837, 3891), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (3840, 3891), False, 'from keras.optimizers import SGD\n'), ((4587, 4606), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (4604, 4606), False, 'from keras.datasets import cifar10\n'), ((5214, 5226), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5224, 5226), True, 'import matplotlib.pyplot as plt\n'), ((7873, 7907), 'numpy.savetxt', 'np.savetxt', (['"""mean"""', 'mean'], {'fmt': '"""%f"""'}), "('mean', mean, fmt='%f')\n", (7883, 7907), True, 'import numpy as np\n'), ((7914, 7946), 'numpy.savetxt', 'np.savetxt', (['"""std"""', 'std'], {'fmt': '"""%f"""'}), "('std', std, fmt='%f')\n", (7924, 7946), True, 'import numpy as np\n'), ((7953, 7999), 'numpy.savetxt', 'np.savetxt', (['"""covariance"""', 'covariance'], {'fmt': '"""%f"""'}), "('covariance', covariance, fmt='%f')\n", (7963, 7999), True, 'import numpy as np\n'), ((2683, 2701), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2693, 2701), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2869, 2887), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2879, 2887), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((2903, 2933), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2915, 2933), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3153, 3171), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3163, 3171), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3339, 3357), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3349, 3357), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3373, 3403), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3385, 3403), False, 'from keras.layers import Convolution2D, MaxPooling2D\n'), ((3451, 3460), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3458, 3460), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3558, 3576), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3568, 3576), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3592, 3604), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3599, 3604), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3709, 3730), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3719, 3730), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((6636, 6681), 'numpy.reshape', 'np.reshape', (['activation', '(num, col * map_size)'], {}), '(activation, (num, col * map_size))\n', (6646, 6681), True, 'import numpy as np\n'), ((6911, 6962), 'numpy.reshape', 'np.reshape', (['trans_activation', '(col, num * map_size)'], {}), '(trans_activation, (col, num * map_size))\n', (6921, 6962), True, 'import numpy as np\n'), ((7182, 7231), 'numpy.mean', 'np.mean', (['matrix_activation'], {'axis': '(0)', 'keepdims': '(True)'}), '(matrix_activation, axis=0, keepdims=True)\n', (7189, 7231), True, 'import numpy as np\n'), ((7300, 7348), 'numpy.std', 'np.std', (['matrix_activation'], {'axis': '(0)', 'keepdims': '(True)'}), '(matrix_activation, axis=0, keepdims=True)\n', (7306, 7348), True, 'import numpy as np\n'), ((7571, 7613), 'numpy.mean', 'np.mean', (['activation'], {'axis': '(0)', 'keepdims': '(True)'}), '(activation, axis=0, keepdims=True)\n', (7578, 7613), True, 'import numpy as np\n'), ((7682, 7723), 'numpy.std', 'np.std', (['activation'], {'axis': '(0)', 'keepdims': '(True)'}), '(activation, axis=0, keepdims=True)\n', (7688, 7723), True, 'import numpy as np\n'), ((2599, 2608), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (2601, 2608), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((2656, 2665), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (2658, 2665), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((2785, 2794), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (2787, 2794), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((2842, 2851), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (2844, 2851), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3069, 3078), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3071, 3078), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3126, 3135), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3128, 3135), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3255, 3264), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3257, 3264), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3312, 3321), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3314, 3321), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3503, 3512), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3505, 3512), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3531, 3540), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3533, 3540), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3654, 3663), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3656, 3663), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((3682, 3691), 'keras.regularizers.l2', 'l2', ([], {'l': '(0.0)'}), '(l=0.0)\n', (3684, 3691), False, 'from keras.regularizers import l2, activity_l1l2\n'), ((4504, 4522), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (4520, 4522), True, 'from keras import backend as K\n'), ((7810, 7841), 'numpy.transpose', 'np.transpose', (['normalized_output'], {}), '(normalized_output)\n', (7822, 7841), True, 'import numpy as np\n'), ((7442, 7473), 'numpy.transpose', 'np.transpose', (['normalized_output'], {}), '(normalized_output)\n', (7454, 7473), True, 'import numpy as np\n')]
|
"""
paw_structure.ion
-----------------
Ion complex detection using geometric :ref:`algorithm<Control_ION_algorithm>`.
Main routine is :func:`.ion_find_parallel`.
Dependencies:
:py:mod:`functools`
:py:mod:`miniutils`
:py:mod:`numpy`
:py:mod:`pandas`
:mod:`.neighbor`
:mod:`.utility`
:class:`.Snap`
.. autosummary::
ion_find_parallel
ion_load
ion_save
ion_single
"""
import numpy as np
import pandas as pd
from functools import partial
import miniutils.progress_bar as progress
# MODULES WITHIN PROJECT
from . import neighbor
from . import utility
from .tra import Snap
########################################################################################################################
# FIND ION COMPLEX FOR A SINGLE SNAPSHOT
########################################################################################################################
# INPUT
# class Snap snap snapshot containing all information
# str id1 identifier for atom used as center (e.g. 'MN'); only one allowed to be in snap
# str id2 identifier for atoms as possible first neighbors (e.g. 'O_')
# str id3 identifier for atoms as possible neighbors of first neighbors (e.g. 'H_')
# float cut1 cutoff distance for first neighbor search
# float cut2 cutoff distance for second neighbor search
#####
# OUTPUT
# pandas DataFrame contains the whole complex centered around id1
########################################################################################################################
def ion_single(snap, id1, id2, id3, cut1, cut2):
"""
Find ion complex of a single snapshot of atomic positions.
Args:
snap (:class:`.Snap`): single snapshot containing the atomic information
id1 (str): identifier for atom used as center (e.g. 'MN')
id2 (str): identifier for atoms as possible first neighbors (e.g. 'O\_')
id3 (str): identifier for atoms as possible neighbors of first neighbors (e.g. 'H\_')
cut1 (float): cutoff distance for first neighbor search
cut2 (float): cutoff distance for second neighbor search
Returns:
:class:`.Snap`: snapshot containing an ion complex
Todo:
Implement possibility for more atoms of type id1 or allow selection by name.
"""
# check if only one atom is selected as ion
if len(snap.atoms[snap.atoms['id'] == id1]) != 1:
utility.err('ion_single', 0, [len(snap.atoms[snap.atoms['id'] == id1])])
# check if all three are different species
if id1 == id2 or id2 == id3 or id1 == id3:
utility.err('ion_single', 1, [id1, id2, id3])
# search first neighbors
next1 = neighbor.neighbor_name(snap, id1, id2, cut1)
# extract name lists
id1_list = [atom[0] for atom in next1]
id2_list = [y for x in [atom[1:] for atom in next1] for y in x]
# search second neighbors
next2 = neighbor.neighbor_name(snap, id2, id3, cut2, names=id2_list)
# extract name list
id3_list = [y for x in [atom[1:] for atom in next2] for y in x]
# extract correct atom information
id1_list = snap.atoms.loc[snap.atoms['name'].isin(id1_list)]
id2_list = snap.atoms.loc[snap.atoms['name'].isin(id2_list)]
id3_list = snap.atoms.loc[snap.atoms['name'].isin(id3_list)]
comp = pd.concat([id1_list, id2_list, id3_list])
return Snap(snap.iter, snap.time, snap.cell, None, None, dataframe=comp)
########################################################################################################################
# SAVE INFORMATION FROM ion_find TO FILE <root>.ext FOR LATER ANALYSIS
# TODO: check if snapshots is empty
########################################################################################################################
# INPUT
# str root root name for saving file
# list class Snap snapshots list with information to be saved
# str id1 identifier for atom used as center (e.g. 'MN'); only one allowed to be in snap
# str id2 identifier for atoms as possible first neighbors (e.g. 'O_')
# str id3 identifier for atoms as possible neighbors of first neighbors (e.g. 'H_')
# float cut1 cutoff distance for first neighbor search
# float cut2 cutoff distance for second neighbor search
# str ext (optional) extension for the saved file: name = root + ext
########################################################################################################################
def ion_save(root, snapshots, id1, id2, id3, cut1, cut2, ext='.ion'):
"""
Save results to file :ref:`Output_ion`.
Args:
root (str): root name for saving file
snapshots (list[:class:`.Snap`]): list of snapshots containing an ion complex
id1 (str): identifier for atom used as center (e.g. 'MN')
id2 (str): identifier for atoms as possible first neighbors (e.g. 'O\_')
id3 (str): identifier for atoms as possible neighbors of first neighbors (e.g. 'H\_')
cut1 (float): cutoff distance for first neighbor search
cut2 (float): cutoff distance for second neighbor search
ext (str, optional): default ".ion" - extension for the saved file: name = root + ext
Todo:
Check if snapshots is empty.
"""
# open file
path = root + ext
try:
f = open(path, 'w')
except IOError:
utility.err_file('ion_save', path)
# write header
f.write(utility.write_header())
f.write("ION COMPLEXES\n")
f.write("%-14s%14.8f\n" % ("T1", snapshots[0].time))
f.write("%-14s%14.8f\n" % ("T2", snapshots[-1].time))
f.write("%-14s%14d\n" % ("SNAPSHOTS", len(snapshots)))
f.write("%-14s%14s\n" % ("ID1", id1))
f.write("%-14s%14s\n" % ("ID2", id2))
f.write("%-14s%14s\n" % ("ID3", id3))
f.write("%-14s%14.8f\n" % ("CUT1", cut1))
f.write("%-14s%14.8f\n" % ("CUT2", cut2))
f.write("%-14s\n" % ("UNIT CELL"))
np.savetxt(f, snapshots[0].cell, fmt="%14.8f")
# write structure information
for i in range(len(snapshots)):
f.write("-" * 84 + "\n")
f.write("%-14s%-14.8f%-14s%-14d%-14s%-14d\n" %
("TIME", snapshots[i].time, "ITERATION", snapshots[i].iter, "ATOMS", len(snapshots[i].atoms)))
f.write("%-14s%-14s%-14s%14s%14s%14s\n" % ('NAME', 'ID', 'INDEX', 'X', 'Y', 'Z'))
np.savetxt(f, snapshots[i].atoms, fmt="%-14s%-14s%-14d%14.8f%14.8f%14.8f")
f.close()
return
########################################################################################################################
# LOAD INFORMATION PREVIOUSLY SAVED BY ion_save()
# WARNING: READING IS LINE SENSITIVE! ONLY USE ON UNCHANGED FILES WRITTEN BY ion_save()
########################################################################################################################
# INPUT
# str root root name for the file to be loaded
# str ext (optional) extension for the file to be loaded: name = root + ext
#####
# OUTPUT
# list class Snap snapshots list of all information
########################################################################################################################
def ion_load(root, ext='.ion'):
"""
Load information from the :ref:`Output_ion` file previously created by :func:`.ion_save`.
Args:
root (str): root name for the file to be loaded
ext (str, optional): default ".ion" - extension for the file to be loaded: name = root + ext
Returns:
list[:class:`.Snap`]: list of snapshots containing an ion complex
Note:
Reading is line sensitive. Do not alter the output file before loading.
"""
path = root + ext
try:
f = open(path, 'r')
except IOError:
utility.err_file('ion_load', path)
text = f.readlines() # read text as lines
for i in range(len(text)):
text[i] = text[i].split() # split each line into list with strings as elements
snapshots = [] # storage list
for i in range(len(text)):
if len(text[i]) > 1:
if text[i][0] == 'UNIT':
cell = np.array(text[i+1:i+4], dtype=float) # get unit cell
if text[i][0] == "TIME": # search for trigger of new snapshot
iter = int(text[i][3])
time = float(text[i][1])
n_atoms = int(text[i][5])
test = np.array(text[i + 2:i + 2 + n_atoms])
atoms = {}
atoms['name'] = test[:, 0]
atoms['id'] = test[:, 1]
atoms['index'] = np.array(test[:, 2], dtype=int)
df = pd.DataFrame(data=atoms)
# save information as class Snap
snapshots.append(Snap(iter, time, cell, np.array(test[:, 3:6], dtype=np.float64), df))
return snapshots
########################################################################################################################
# FIND ION COMPLEXES IN MULTIPLE SNAPSHOTS
# WARNING: NOT IN USE BECAUSE NO PARALLEL COMPUTING
########################################################################################################################
# INPUT
# str root root name for saving file
# list class Snap snapshots list with information to be saved
# str id1 identifier for atom used as center (e.g. 'MN'); only one allowed to be in snap
# str id2 identifier for atoms as possible first neighbors (e.g. 'O_')
# str id3 identifier for atoms as possible neighbors of first neighbors (e.g. 'H_')
# float cut1 (optional) cutoff distance for first neighbor search
# float cut2 (optional) cutoff distance for second neighbor search
#####
# OUTPUT
# list class Snap complex list with all ion complexes found
########################################################################################################################
# def ion_find(root, snapshots, id1, id2, id3, cut1=3.0, cut2=1.4):
# complex = []
# # loop through different snapshots
# for snap in snapshots:
# # get complex information
# comp = ion_single(snap, id1, id2, id3, cut1, cut2)
# # append Snap object for data storage
# complex.append(Snap(snap.iter, snap.time, snap.cell, None, None, dataframe=comp))
# # save information to file
# ion_save(root, complex, id1, id2, id3, cut1, cut2)
# return complex
########################################################################################################################
# ROUTINE TO FIND ION COMPLEXES FOR MULTIPLE SNAPSHOTS
# PARALLEL VERSION OF ion_find() WITH PROGRESS BAR IN CONSOLE
########################################################################################################################
# INPUT
# str root root name for saving file
# list class Snap snapshots list with information to be saved
# str id1 identifier for atom used as center (e.g. 'MN'); only one allowed to be in snap
# str id2 identifier for atoms as possible first neighbors (e.g. 'O_')
# str id3 identifier for atoms as possible neighbors of first neighbors (e.g. 'H_')
# float cut1 (optional) cutoff distance for first neighbor search
# float cut2 (optional) cutoff distance for second neighbor search
#####
# OUTPUT
# list class Snap ion_comp list of ion complexes found
########################################################################################################################
def ion_find_parallel(root, snapshots, id1, id2, id3, cut1, cut2):
"""
Find ion complexes for multiple snapshots of atomic configurations.
Args:
root (str): root name of the files
snapshots (list[:class:`.Snap`]): list of snapshots containing the atomic information
id1 (str): identifier for atom used as center (e.g. 'MN')
id2 (str): identifier for atoms as possible first neighbors (e.g. 'O\_')
id3 (str): identifier for atoms as possible neighbors of first neighbors (e.g. 'H\_')
cut1 (float): cutoff distance for first neighbor search
cut2 (float): cutoff distance for second neighbor search
Returns:
list[:class:`.Snap`]: list of snapshots containing an ion complex
Parallelization based on :py:mod:`multiprocessing`.
Note:
Only one atom of type :data:`id1` allowed to be in a snapshot at the moment.
"""
print("ION COMPLEX DETECTION IN PROGRESS")
# set other arguments (necessary for parallel computing)
multi_one = partial(ion_single, id1=id1, id2=id2, id3=id3, cut1=cut1, cut2=cut2)
# run data extraction
ion_comp = progress.parallel_progbar(multi_one, snapshots)
# create output file
ion_save(root, ion_comp, id1, id2, id3, cut1, cut2)
print("ION COMPLEX DETECTION FINISHED")
return ion_comp
|
[
"pandas.DataFrame",
"numpy.array",
"functools.partial",
"numpy.savetxt",
"miniutils.progress_bar.parallel_progbar",
"pandas.concat"
] |
[((3330, 3371), 'pandas.concat', 'pd.concat', (['[id1_list, id2_list, id3_list]'], {}), '([id1_list, id2_list, id3_list])\n', (3339, 3371), True, 'import pandas as pd\n'), ((6036, 6082), 'numpy.savetxt', 'np.savetxt', (['f', 'snapshots[0].cell'], {'fmt': '"""%14.8f"""'}), "(f, snapshots[0].cell, fmt='%14.8f')\n", (6046, 6082), True, 'import numpy as np\n'), ((12748, 12816), 'functools.partial', 'partial', (['ion_single'], {'id1': 'id1', 'id2': 'id2', 'id3': 'id3', 'cut1': 'cut1', 'cut2': 'cut2'}), '(ion_single, id1=id1, id2=id2, id3=id3, cut1=cut1, cut2=cut2)\n', (12755, 12816), False, 'from functools import partial\n'), ((12858, 12905), 'miniutils.progress_bar.parallel_progbar', 'progress.parallel_progbar', (['multi_one', 'snapshots'], {}), '(multi_one, snapshots)\n', (12883, 12905), True, 'import miniutils.progress_bar as progress\n'), ((6450, 6524), 'numpy.savetxt', 'np.savetxt', (['f', 'snapshots[i].atoms'], {'fmt': '"""%-14s%-14s%-14d%14.8f%14.8f%14.8f"""'}), "(f, snapshots[i].atoms, fmt='%-14s%-14s%-14d%14.8f%14.8f%14.8f')\n", (6460, 6524), True, 'import numpy as np\n'), ((8219, 8259), 'numpy.array', 'np.array', (['text[i + 1:i + 4]'], {'dtype': 'float'}), '(text[i + 1:i + 4], dtype=float)\n', (8227, 8259), True, 'import numpy as np\n'), ((8492, 8529), 'numpy.array', 'np.array', (['text[i + 2:i + 2 + n_atoms]'], {}), '(text[i + 2:i + 2 + n_atoms])\n', (8500, 8529), True, 'import numpy as np\n'), ((8674, 8705), 'numpy.array', 'np.array', (['test[:, 2]'], {'dtype': 'int'}), '(test[:, 2], dtype=int)\n', (8682, 8705), True, 'import numpy as np\n'), ((8727, 8751), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'atoms'}), '(data=atoms)\n', (8739, 8751), True, 'import pandas as pd\n'), ((8857, 8897), 'numpy.array', 'np.array', (['test[:, 3:6]'], {'dtype': 'np.float64'}), '(test[:, 3:6], dtype=np.float64)\n', (8865, 8897), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.colors import LinearSegmentedColormap
ms_color = [0.12156863, 0.46666667, 0.70588235, 1]
hc_color = [1., 0.49803922, 0.05490196, 1]
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# set serif font
plt.rc('font', family='serif')
def generate_transparanet_cm(base='coolwarm', name="TransCoWa"):
# copy from existing colormap
ncolors = 256
color_array = plt.get_cmap(base)(range(ncolors))
# create parabolic decrease
decr = [-1*(x**2)+1 for x in range(int(ncolors/2))]
# normalize
decr = (decr - np.min(decr))/(np.max(decr - np.min(decr)))
# use inverted parabola as increase
incr = np.copy(decr)[::-1]
alphas = np.concatenate((decr, incr))
# update alpha values
color_array[:,-1] = alphas
# create new colormap and register it
transparent_coolwarm = LinearSegmentedColormap.from_list(name, color_array)
plt.register_cmap(cmap=transparent_coolwarm)
def get_labels_dict(path):
import xmltodict
with open(path) as f:
labels_xml = xmltodict.parse(f.read())['atlas']['data']['label']
labels_dict = {}
for row in labels_xml:
labels_dict[int(row['index'])] = row['name']
return labels_dict
def heatmap_per_region(hm, atlas, positive=True, size_normalize=False, signed=False):
# get heatmap mean per region
# use only positive values
signed_hm = np.copy(hm)
if signed:
if positive:
signed_hm[signed_hm<0] = 0
else:
signed_hm[signed_hm>0] = 0
regional_hm = {}
for lbl_idx in np.unique(atlas):
# skip outside area
if lbl_idx != 0:
atlas_lbl = atlas.copy()
# get region mask for each label
atlas_lbl[lbl_idx!=atlas] = 0
atlas_lbl[lbl_idx==atlas] = 1
# multiply region mask with heatmap
region_intensity = np.mean(atlas_lbl * np.squeeze(signed_hm))
if size_normalize:
region_size = np.sum(atlas_lbl).item()
region_intensity /= region_size
regional_hm[lbl_idx] = region_intensity
return regional_hm
def aggregate_regions(regional_hm, all_areas):
# aggregate atlas regions to previously defined areas
area_hm = {}
for name, (min_idx, max_idx) in all_areas.items():
regions_fit = []
for key in regional_hm.keys():
if key in range(min_idx, max_idx+1):
regions_fit.append(regional_hm[key])
region_mean = np.mean(regions_fit)
area_hm[name] = region_mean
return area_hm
def get_area_relevance(heatmaps, atlas, area_dict, positive=True, size_normalize=True):
keys = []
values = []
for hm in heatmaps:
regional_hm = heatmap_per_region(hm, atlas, positive=positive, size_normalize=size_normalize)
area_hm = aggregate_regions(regional_hm, area_dict)
# sort by values
area_hm_sorted = sorted(area_hm.items(), key=lambda kv: kv[1])
keys_sorted = [row[0] for row in area_hm_sorted]
values_sorted = [row[1] for row in area_hm_sorted]
keys.append(keys_sorted)
values.append(values_sorted)
return keys, values
def translate_keys(keys):
names_list = []
for key_list in keys:
name_list = []
for key in key_list:
name_list.append(short_name_map[key])
names_list.append(name_list)
return names_list
def wrap_as_df(keys, values):
df_ms = pd.DataFrame({"values_ms": values[0]}, keys[0])
df_hc = pd.DataFrame({"values_hc": values[1]}, keys[1])
df = pd.merge(df_ms, df_hc, left_index=True, right_index=True, how='outer')
return df
def reduce_df(df, take=30):
# get order based on relevance sum
abs_order = (np.abs(df["values_hc"]) + np.abs(df["values_ms"])).sort_values().index
most = abs_order[-take:]
short_df = df.loc[most]
order = (short_df["values_hc"] + short_df["values_ms"]).sort_values().index
short_df = df.loc[order]
return short_df
def reduce_two_dfs(df_zero, df_one, take=30):
abs_order = (df_zero.abs().sum() + df_one.abs().sum()).sort_values().index
most = abs_order[-take:]
# columns are keys so use [:, key]
short_df_zero = df_zero.loc[:,most]
short_df_one = df_one.loc[:,most]
order = (short_df_zero.sum() + short_df_one.sum()).sort_values().index
short_df_zero = short_df_zero.reindex(order, axis=1)
short_df_one = short_df_one.reindex(order, axis=1)
return short_df_zero, short_df_one
def plot_key_value_pairs(keys, values, title, loc="center left"):
plt.figure(figsize=(10, 6))
plt.plot(keys[0], values[0], 'o', color=ms_color, label="CDMS")
plt.plot(keys[1], values[1], 'o', color=hc_color, label="HC")
plt.xticks(rotation='vertical')
plt.legend(loc=loc)
plt.title(title)
plt.show()
def plot_dataframe(df, title, loc="center left"):
plt.figure(figsize=(10, 6))
plt.plot(df["values_ms"], 'o', color=ms_color, label="CDMS")
plt.plot(df["values_hc"], 'o', color=hc_color, label="HC")
plt.xticks(rotation='vertical')
plt.legend(loc=loc)
plt.title(title)
plt.show()
# Modified areas from Visualizing evidence for AD paper by
# Boehle et al. Based on Neuromorphometrics atlas from SPM12
# Name: (min, max)
gm_areas= {
"Accumbens": (23, 30),
"Amygdala": (31, 32),
"Brain Stem": (35, 35),
"Caudate": (36, 37),
"Cerebellum": (38, 41),
"Hippocampus": (47, 48),
"Parahippocampal gyrus": (170, 171),
"Pallidum": (55, 56),
"Putamen": (57, 58),
"Thalamus": (59, 60),
"CWM": (44, 45),
"ACG": (100, 101),
"Ant. Insula": (102, 103),
"Post. Insula": (172, 173),
"AOG": (104, 105),
"AG": (106, 107),
"Cuneus": (114, 115),
"Central operculum": (112, 113),
"Frontal operculum": (118, 119),
"Frontal pole": (120, 121),
"Fusiform gyrus": (122, 123),
"Temporal pole": (202, 203),
"TrIFG": (204, 205),
"TTG": (206, 207),
"Entorh. cortex": (116, 117),
"Parietal operculum": (174, 175),
"SPL": (198, 199),
"CSF": (46, 46),
"3rd Ventricle": (4, 4),
"4th Ventricle": (11, 11),
"Lateral Ventricles": (49, 52),
"Diencephalon": (61, 62),
"Vessels": (63, 64),
"Optic Chiasm": (69, 69),
"Vermal Lobules": (71, 73),
"Basal Forebrain": (75, 76),
"Calc": (108, 109),
"GRe": (124, 125),
"IOG": (128, 129),
"ITG": (132, 133),
"LiG": (134, 135),
"LOrG": (136, 137),
"MCgG": (138, 139),
"MFC": (140, 141),
"MFG": (142, 143),
"MOG": (144, 145),
"MOrG": (146, 147),
"MPoG": (148, 149),
"MPrG": (150, 151),
"MSFG": (152, 153),
"MTG": (154, 155),
"OCP": (156, 157),
"OFuG": (160, 161),
"OpIFG": (162, 163),
"OrIFG": (164, 165),
"PCgG": (166, 167),
"PCu": (168, 169),
"PoG": (176, 177),
"POrG": (178, 179),
"PP": (180, 181),
"PrG": (182, 183),
"PT": (184, 185),
"SCA": (186, 187),
"SFG": (190, 191),
"SMC": (192, 193),
"SMG": (194, 195),
"SOG": (196, 197),
"STG": (200, 201),
}
short_name_map = {
'Accumbens': 'Accumbens',
'Amygdala': 'Amygdala',
'Brain Stem': 'Brain Stem',
'Caudate': 'Caudate',
'Cerebellum': 'Cerebellum',
'Hippocampus': 'Hippocampus',
'Parahippocampal gyrus': 'Parahippocampal gyr.',
'Pallidum': 'Pallidum',
'Putamen': 'Putamen',
'Thalamus': 'Thalamus',
'Diencephalon': 'Diencephalon',
'CWM': 'Cerebral white matter',
'ACG': 'Ant. cingulate gyr.',
'Ant. Insula': 'Ant. insula',
'Post. Insula': 'Post. insula',
'AOG': 'Ant. orbital gyr.',
'AG': 'Angular gyr.',
'Cuneus': 'Cuneus',
'Central operculum': 'Central operculum',
'Frontal operculum': 'Frontal operculum',
'Frontal pole': 'Frontal pole',
'Fusiform gyrus': 'Fusiform gyr.',
'Temporal pole': 'Temporal pole',
'TrIFG': 'Triangular part of IFG',
'TTG': 'Trans. temporal gyr.',
'Entorh. cortex': 'Entorhinal area',
'Parietal operculum': 'Parietal operculum',
'SPL': 'Sup. parietal lobule',
'CSF': 'CSF',
'3rd Ventricle': '3rd Ventricle',
'4th Ventricle': '4th Ventricle',
'Lateral Ventricles': 'Inf. Lat. Ventricles',
'Vessels': 'Vessels',
'Optic Chiasm': 'Optic Chiasm',
'Vermal Lobules': 'Cereb. Verm. Lob.',
'Basal Forebrain': 'Basal Forebrain',
'Calc': 'Calcarine cortex',
'GRe': 'Gyrus rectus',
'IOG': 'Inf. occipital gyr.',
'ITG': 'Inf. temporal gyr.',
'LiG': 'Lingual gyr.',
'LOrG': 'Lat. orbital gyr.',
'MCgG': 'Mid. cingulate gyr.',
'MFC': 'Med. frontal cortex',
'MFG': 'Mid. frontal gyr.',
'MOG': 'Mid. occipital gyr.',
'MOrG': 'Med. orbital gyr.',
'MPoG': 'Post. gyr. med. seg.',
'MPrG': 'Pre. gyr. med. seg.',
'MSFG': 'Sup. frontal gyr. med. seg.',
'MTG': 'Mid. temporal gyr.',
'OCP': 'Occipital pole',
'OFuG': 'Occipital fusiform gyr.',
'OpIFG': 'Opercular part of IFG',
'OrIFG': 'Orbital part of IFG',
'PCgG': 'Post. cingulate gyr.',
'PCu': 'Precuneus',
'PoG': 'Postcentral gyr.',
'POrG': 'Post. orbital gyr.',
'PP': 'Planum polare',
'PrG': 'Precentral gyr.',
'PT': 'Planum temporale',
'SCA': 'Subcallosal area',
'SFG': 'Sup. frontal gyr.',
'SMC': 'Supp. motor cortex',
'SMG': 'Supramarginal gyr.',
'SOG': 'Sup. occipital gyr.',
'STG': 'Sup. temporal gyr.'
}
# Aggregated white matter areas from JHU ICBM DTI atlas from FSL
# Name: (min, max)
wm_areas= {
"Middle cerebellar peduncle": (1, 2),
"Corpus callosum": (3, 5),
"Fornix": (6, 6),
"Corticospinal tract": (7, 8),
"Medial lemniscus": (9, 10),
"Inferior cerebellar peduncle": (11, 12),
"Superior cerebellar peduncle": (13, 14),
"Cerebral peduncle": (15, 16),
"Anterior limb of internal capsule": (17, 18),
"Posterior limb of internal capsule": (19, 20),
"Retrolenticular part of internal capsule": (21, 22),
"Anterior corona radiata": (23, 24),
"Superior corona radiata": (25, 26),
"Posterior corona radiata": (27, 28),
"Posterior thalamic radiation": (29, 30),
"Sagittal stratum": (31, 32),
"External capsule": (33, 34),
"Cingulum": (35, 38),
"Superior longitudinal fasciculus": (41, 42),
"Superior fronto-occipital fasciculus": (43, 44),
"Uncinate fasciculus": (45, 46),
"Tapetum": (47, 48),
}
|
[
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.min",
"pandas.DataFrame",
"numpy.abs",
"matplotlib.pyplot.xticks",
"pandas.merge",
"numpy.squeeze",
"matplotlib.pyplot.register_cmap",
"matplotlib.pyplot.title",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.rc",
"numpy.copy",
"matplotlib.pyplot.show",
"numpy.unique",
"numpy.sum",
"matplotlib.pyplot.figure"
] |
[((272, 303), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'SMALL_SIZE'}), "('font', size=SMALL_SIZE)\n", (278, 303), True, 'import matplotlib.pyplot as plt\n'), ((343, 380), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'BIGGER_SIZE'}), "('axes', titlesize=BIGGER_SIZE)\n", (349, 380), True, 'import matplotlib.pyplot as plt\n'), ((414, 451), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'MEDIUM_SIZE'}), "('axes', labelsize=MEDIUM_SIZE)\n", (420, 451), True, 'import matplotlib.pyplot as plt\n'), ((488, 525), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'SMALL_SIZE'}), "('xtick', labelsize=SMALL_SIZE)\n", (494, 525), True, 'import matplotlib.pyplot as plt\n'), ((559, 596), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'SMALL_SIZE'}), "('ytick', labelsize=SMALL_SIZE)\n", (565, 596), True, 'import matplotlib.pyplot as plt\n'), ((630, 667), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'SMALL_SIZE'}), "('legend', fontsize=SMALL_SIZE)\n", (636, 667), True, 'import matplotlib.pyplot as plt\n'), ((689, 728), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'BIGGER_SIZE'}), "('figure', titlesize=BIGGER_SIZE)\n", (695, 728), True, 'import matplotlib.pyplot as plt\n'), ((779, 809), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (785, 809), True, 'import matplotlib.pyplot as plt\n'), ((1235, 1263), 'numpy.concatenate', 'np.concatenate', (['(decr, incr)'], {}), '((decr, incr))\n', (1249, 1263), True, 'import numpy as np\n'), ((1391, 1443), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['name', 'color_array'], {}), '(name, color_array)\n', (1424, 1443), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((1448, 1492), 'matplotlib.pyplot.register_cmap', 'plt.register_cmap', ([], {'cmap': 'transparent_coolwarm'}), '(cmap=transparent_coolwarm)\n', (1465, 1492), True, 'import matplotlib.pyplot as plt\n'), ((1934, 1945), 'numpy.copy', 'np.copy', (['hm'], {}), '(hm)\n', (1941, 1945), True, 'import numpy as np\n'), ((2123, 2139), 'numpy.unique', 'np.unique', (['atlas'], {}), '(atlas)\n', (2132, 2139), True, 'import numpy as np\n'), ((4030, 4077), 'pandas.DataFrame', 'pd.DataFrame', (["{'values_ms': values[0]}", 'keys[0]'], {}), "({'values_ms': values[0]}, keys[0])\n", (4042, 4077), True, 'import pandas as pd\n'), ((4090, 4137), 'pandas.DataFrame', 'pd.DataFrame', (["{'values_hc': values[1]}", 'keys[1]'], {}), "({'values_hc': values[1]}, keys[1])\n", (4102, 4137), True, 'import pandas as pd\n'), ((4148, 4218), 'pandas.merge', 'pd.merge', (['df_ms', 'df_hc'], {'left_index': '(True)', 'right_index': '(True)', 'how': '"""outer"""'}), "(df_ms, df_hc, left_index=True, right_index=True, how='outer')\n", (4156, 4218), True, 'import pandas as pd\n'), ((5155, 5182), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (5165, 5182), True, 'import matplotlib.pyplot as plt\n'), ((5187, 5250), 'matplotlib.pyplot.plot', 'plt.plot', (['keys[0]', 'values[0]', '"""o"""'], {'color': 'ms_color', 'label': '"""CDMS"""'}), "(keys[0], values[0], 'o', color=ms_color, label='CDMS')\n", (5195, 5250), True, 'import matplotlib.pyplot as plt\n'), ((5255, 5316), 'matplotlib.pyplot.plot', 'plt.plot', (['keys[1]', 'values[1]', '"""o"""'], {'color': 'hc_color', 'label': '"""HC"""'}), "(keys[1], values[1], 'o', color=hc_color, label='HC')\n", (5263, 5316), True, 'import matplotlib.pyplot as plt\n'), ((5321, 5352), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (5331, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5376), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'loc'}), '(loc=loc)\n', (5367, 5376), True, 'import matplotlib.pyplot as plt\n'), ((5381, 5397), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5390, 5397), True, 'import matplotlib.pyplot as plt\n'), ((5402, 5412), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5410, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5468, 5495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (5478, 5495), True, 'import matplotlib.pyplot as plt\n'), ((5500, 5560), 'matplotlib.pyplot.plot', 'plt.plot', (["df['values_ms']", '"""o"""'], {'color': 'ms_color', 'label': '"""CDMS"""'}), "(df['values_ms'], 'o', color=ms_color, label='CDMS')\n", (5508, 5560), True, 'import matplotlib.pyplot as plt\n'), ((5565, 5623), 'matplotlib.pyplot.plot', 'plt.plot', (["df['values_hc']", '"""o"""'], {'color': 'hc_color', 'label': '"""HC"""'}), "(df['values_hc'], 'o', color=hc_color, label='HC')\n", (5573, 5623), True, 'import matplotlib.pyplot as plt\n'), ((5628, 5659), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '"""vertical"""'}), "(rotation='vertical')\n", (5638, 5659), True, 'import matplotlib.pyplot as plt\n'), ((5664, 5683), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'loc'}), '(loc=loc)\n', (5674, 5683), True, 'import matplotlib.pyplot as plt\n'), ((5688, 5704), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5697, 5704), True, 'import matplotlib.pyplot as plt\n'), ((5709, 5719), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5717, 5719), True, 'import matplotlib.pyplot as plt\n'), ((946, 964), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['base'], {}), '(base)\n', (958, 964), True, 'import matplotlib.pyplot as plt\n'), ((1202, 1215), 'numpy.copy', 'np.copy', (['decr'], {}), '(decr)\n', (1209, 1215), True, 'import numpy as np\n'), ((3057, 3077), 'numpy.mean', 'np.mean', (['regions_fit'], {}), '(regions_fit)\n', (3064, 3077), True, 'import numpy as np\n'), ((1106, 1118), 'numpy.min', 'np.min', (['decr'], {}), '(decr)\n', (1112, 1118), True, 'import numpy as np\n'), ((1135, 1147), 'numpy.min', 'np.min', (['decr'], {}), '(decr)\n', (1141, 1147), True, 'import numpy as np\n'), ((2459, 2480), 'numpy.squeeze', 'np.squeeze', (['signed_hm'], {}), '(signed_hm)\n', (2469, 2480), True, 'import numpy as np\n'), ((4318, 4341), 'numpy.abs', 'np.abs', (["df['values_hc']"], {}), "(df['values_hc'])\n", (4324, 4341), True, 'import numpy as np\n'), ((4344, 4367), 'numpy.abs', 'np.abs', (["df['values_ms']"], {}), "(df['values_ms'])\n", (4350, 4367), True, 'import numpy as np\n'), ((2543, 2560), 'numpy.sum', 'np.sum', (['atlas_lbl'], {}), '(atlas_lbl)\n', (2549, 2560), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Provides functions for data transformation (currently only LLS) and
normalization.
"""
import numpy as np
def transform(raw_data, mode, direction='direct', **kwargs):
"""
Apply mathematical transformations to data.
Parameters
----------
raw_data : ndarray
2D numpy array with the shape (N, M) containing N data rows to be
smoothed. Each data row is represented by row in numpy array and
contains M values. If only one data row is present, raw_data has the
shape (1, M).
mode : str
Maths used for transformation. Allowed mode is 'log_log_sqrt' only at
the moment which first takes the square root and then does the
logarithm twice.
direction : str, optional
Gives the direction of the tranformation. If 'direct', the data is
transformed, if 'inverse', the inverse of the transformation is
calculated. The default is 'direct'.
**kwargs for the different modes
mode is 'log_log_sqrt' and direction is 'inverse':
min_value : float
Original minimum value of the data before transformation. Has
to be known because it is lost upon transformation. Default is
1.
Raises
------
ValueError
If the value passed as mode or direction is not understood.
Returns
-------
raw_data : ndarray
Transformed data with the same shape as raw_data.
"""
# list of allowed modes for data transformation
transform_modes = ['log_log_sqrt']
if direction == 'direct':
if mode == transform_modes[0]:
minimum_value = np.min(raw_data)
raw_data -= minimum_value
raw_data = np.log(np.log(np.sqrt(raw_data + 1) + 1) + 1)
else:
raise ValueError('No valid transform mode entered. Allowed modes '
'are {0}'.format(transform_modes))
elif direction == 'inverse':
if mode == transform_modes[0]:
minimum_value = kwargs.get('min_value', 1)
raw_data = (np.exp(np.exp(raw_data) - 1) - 1)**2 - 1
raw_data += minimum_value
else:
raise ValueError('No valid transform mode entered. Allowed modes '
'are {0}'.format(transform_modes))
else:
raise ValueError('No valid transform direction entered. Allowed '
'directions are [\'direct\', \'inverse\']')
return raw_data
def normalize(raw_data, mode, factor=1, **kwargs):
raw_data = np.asarray(raw_data)
# list of allowed modes for normalization
normalize_modes = ['total_intensity']
if mode == normalize_modes[0]:
x_data_points = raw_data.shape[1]
x_data = kwargs.get('x_data', np.arange(x_data_points))
conversion_factor = 1/np.repeat(np.trapz(raw_data, x=x_data, axis=1),
x_data_points).reshape(
(-1, x_data_points))
normalized_data = raw_data * conversion_factor * factor
else:
raise ValueError('No valid normalization mode entered. Allowed modes '
'are {0}'.format(normalize_modes))
return normalized_data
|
[
"numpy.trapz",
"numpy.sqrt",
"numpy.asarray",
"numpy.exp",
"numpy.min",
"numpy.arange"
] |
[((2588, 2608), 'numpy.asarray', 'np.asarray', (['raw_data'], {}), '(raw_data)\n', (2598, 2608), True, 'import numpy as np\n'), ((1677, 1693), 'numpy.min', 'np.min', (['raw_data'], {}), '(raw_data)\n', (1683, 1693), True, 'import numpy as np\n'), ((2814, 2838), 'numpy.arange', 'np.arange', (['x_data_points'], {}), '(x_data_points)\n', (2823, 2838), True, 'import numpy as np\n'), ((2880, 2916), 'numpy.trapz', 'np.trapz', (['raw_data'], {'x': 'x_data', 'axis': '(1)'}), '(raw_data, x=x_data, axis=1)\n', (2888, 2916), True, 'import numpy as np\n'), ((1769, 1790), 'numpy.sqrt', 'np.sqrt', (['(raw_data + 1)'], {}), '(raw_data + 1)\n', (1776, 1790), True, 'import numpy as np\n'), ((2117, 2133), 'numpy.exp', 'np.exp', (['raw_data'], {}), '(raw_data)\n', (2123, 2133), True, 'import numpy as np\n')]
|
#Predictions performed by this module
#dependencies
import base64
import numpy as np
import io
from PIL import Image
import keras
from keras import backend as K
from keras.models import Sequential
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator, img_to_array
from model import Model, DecoderType
from main import infer2
from flask import request
from flask import jsonify
from flask import Flask
from imageio import imread
app = Flask(__name__)
"""
def get_model():
This function loads the already-built keras model
global model
model = load_model('model.h5')
print("Model loaded!")"""
def preprocess_image(image, target_size):
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize(target_size)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
return image
"""print(" * Loading Keras model ... ")
get_model()"""
@app.route("/predict", methods=["POST"])
def predict():
"""
whenever something is posted from /predict,
this function will process the info posted through POST http method
message: json from POST method
encoded: key is 'image', value is base64encoded image sent from client
decoded: as it says
image: decoded is bytes in a file, not an actual image,
image.open converts those bytes into PIL file
"""
message = request.get_json(force=True)
encoded = message['image']
encoded = encoded.replace("data:image/jpeg;base64,", "")
print(encoded)
decoded = base64.b64decode(encoded)
image = imread(io.BytesIO(decoded))
"""
processed_image = preprocess_image(image, target_size=(224,224))"""
"""prediction = model.predict(processed_image).tolist()"""
model = Model(list(open("/home/shikhar/Desktop/simpleHTR/SimpleHTR/model/charList.txt").read()), decoder_type=0, must_restore=True, dump=True)
response = infer2(model, image)
response = {
'text': response['text'],
'probability': str(response['probability'])
}
return jsonify(response)
@app.route("/", methods=["GET"])
def hello():
return 'Hello'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
[
"keras.preprocessing.image.img_to_array",
"flask.Flask",
"io.BytesIO",
"base64.b64decode",
"flask.request.get_json",
"numpy.expand_dims",
"main.infer2",
"flask.jsonify"
] |
[((478, 493), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (483, 493), False, 'from flask import Flask\n'), ((815, 834), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (827, 834), False, 'from keras.preprocessing.image import ImageDataGenerator, img_to_array\n'), ((847, 876), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (861, 876), True, 'import numpy as np\n'), ((1412, 1440), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (1428, 1440), False, 'from flask import request\n'), ((1566, 1591), 'base64.b64decode', 'base64.b64decode', (['encoded'], {}), '(encoded)\n', (1582, 1591), False, 'import base64\n'), ((1938, 1958), 'main.infer2', 'infer2', (['model', 'image'], {}), '(model, image)\n', (1944, 1958), False, 'from main import infer2\n'), ((2085, 2102), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (2092, 2102), False, 'from flask import jsonify\n'), ((1611, 1630), 'io.BytesIO', 'io.BytesIO', (['decoded'], {}), '(decoded)\n', (1621, 1630), False, 'import io\n')]
|
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
from scipy import stats
class QuantizeLayer:
def __init__(self, name="None", num_bin=2001):
self.name = name
self.min = 0.0
self.max = 0.0
self.edge = 0.0
self.num_bins = num_bin
self.distribution_interval = 0.0
self.data_distribution = []
@staticmethod
def get_max_min_edge(blob_data):
max_val = np.max(blob_data)
min_val = np.min(blob_data)
data_edge = max(abs(max_val), abs(min_val))
return max_val, min_val, data_edge
def initial_histograms(self, blob_data):
max_val, min_val, data_edge = self.get_max_min_edge(blob_data)
hist, hist_edges = np.histogram(blob_data, bins=self.num_bins, range=(-data_edge, data_edge))
self.distribution_interval = 2 * data_edge / len(hist)
self.data_distribution = hist
self.edge = data_edge
self.min = min_val
self.max = max_val
def combine_histograms(self, blob_data):
"""
:param blob_data:
:return:
"""
# hist is the num of each bin, the edge of each bin is [)
max_val, min_val, data_edge = self.get_max_min_edge(blob_data)
if data_edge <= self.edge:
hist, _ = np.histogram(blob_data, bins=len(self.data_distribution), range=(-self.edge, self.edge))
self.data_distribution += hist
else:
old_num_bins = len(self.data_distribution)
old_step = 2 * self.edge / old_num_bins
half_increased_bins = int((data_edge - self.edge) // old_step + 1)
new_num_bins = half_increased_bins * 2 + old_num_bins
data_edge = half_increased_bins * old_step + self.edge
hist, hist_edges = np.histogram(blob_data, bins=new_num_bins, range=(-data_edge, data_edge))
hist[half_increased_bins:new_num_bins - half_increased_bins] += self.data_distribution
self.data_distribution = hist
self.edge = data_edge
self.min = min(min_val, self.min)
self.max = max(max_val, self.max)
self.distribution_interval = 2 * self.edge / len(self.data_distribution)
@staticmethod
def smooth_distribution(p, eps=0.0001):
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
@property
def threshold_distribution(self, target_bin=256):
"""
:param quantized_dtype:
:param target_bin:
:return:
"""
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
# if min_val >= 0 and quantized_dtype in ['auto', 'uint8']:
# target_bin = 128
threshold_sum = sum(distribution[target_bin:])
kl_divergence = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
sliced_nd_hist = copy.deepcopy(distribution[:threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
p[threshold - 1] += threshold_sum
threshold_sum = threshold_sum - distribution[threshold]
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
min_kl_divergence = np.argmin(kl_divergence)
threshold_bin = min_kl_divergence + target_bin
threshold_value = (threshold_bin + 0.5) * self.distribution_interval + (-self.edge)
return threshold_value
@staticmethod
def max_slide_window(seq, m):
num = len(seq)
seq = seq.tolist()
assert isinstance(seq, (list, tuple, set)) and isinstance(m, int), "seq array"
assert len(seq) > m, "len(seq) must >m"
max_seq = 0
loc = 0
for i in range(0, num):
if (i + m) <= num:
temp_seq = seq[i:i + m]
temp_sum = sum(temp_seq)
if max_seq <= temp_sum:
max_seq = temp_sum
loc = i
else:
return max_seq, loc
@property
def distribution_min_max(self, target_bin=256):
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
kl_divergence = np.zeros(num_bins - target_bin)
kl_loc = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
#print("num:", threshold)
_, loc = self.max_slide_window(distribution, threshold)
sliced_nd_hist = copy.deepcopy(distribution[loc:loc + threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
right_sum = sum(distribution[loc + threshold:])
left_sum = sum(distribution[:loc])
p[threshold - 1] += right_sum
p[0] += left_sum
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
kl_loc[threshold - target_bin] = loc
min_kl_divergence = np.argmin(kl_divergence)
min = kl_loc[min_kl_divergence]
max = min + target_bin + min_kl_divergence
min = (min + 0.5) * self.distribution_interval + (-self.edge)
max = (max + 0.5) * self.distribution_interval + (-self.edge)
return min, max
@property
def distribution_test(self, target_bin=256):
num_bins = len(self.data_distribution)
distribution = self.data_distribution
assert (num_bins % 2 == 1)
kl_divergence = np.zeros(num_bins - target_bin)
kl_loc = np.zeros(num_bins - target_bin)
for threshold in range(target_bin, num_bins):
#print("num:", threshold)
_, loc = self.max_slide_window(distribution, threshold)
sliced_nd_hist = copy.deepcopy(distribution[loc:loc + threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
right_sum = sum(distribution[loc + threshold:])
left_sum = sum(distribution[:loc])
p[threshold - 1] += right_sum
p[0] += left_sum
# is_nonzeros[k] indicates whether hist[k] is nonzero
p = np.array(p)
nonzero_loc = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = len(sliced_nd_hist) // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = nonzero_loc[start:stop].sum()
if norm != 0:
q[start:stop] = quantized_bins[j] / norm
q[p == 0] = 0.0001
p = self.smooth_distribution(p)
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.wasserstein_distance(p, q)
kl_loc[threshold - target_bin] = loc
min_kl_divergence = np.argmin(kl_divergence)
min = kl_loc[min_kl_divergence]
max = min + target_bin + min_kl_divergence
min = (min + 0.5) * self.distribution_interval + (-self.edge)
max = (max + 0.5) * self.distribution_interval + (-self.edge)
return min, max
data = np.random.randn(10000,)
print(data)
layer = QuantizeLayer(name="con_1")
layer.initial_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
data = np.random.randn(10000,).astype()
layer.combine_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
data = np.random.randn(10000,)
data[9999] = 20
layer.combine_histograms(data)
print("min:", layer.min)
print("max:", layer.max)
print("edge:", layer.edge)
print("distribution_interval:", layer.distribution_interval)
print("bins:", len(layer.data_distribution))
import matplotlib.pyplot as plt
plt.plot(layer.data_distribution)
plt.show()
print(layer.threshold_distribution)
print(layer.distribution_min_max)
#print(layer.distribution_test)
|
[
"numpy.histogram",
"scipy.stats.entropy",
"copy.deepcopy",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.array",
"numpy.zeros",
"scipy.stats.wasserstein_distance",
"numpy.min",
"numpy.argmin",
"numpy.random.randn",
"matplotlib.pyplot.show"
] |
[((11155, 11177), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (11170, 11177), True, 'import numpy as np\n'), ((11703, 11725), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (11718, 11725), True, 'import numpy as np\n'), ((11990, 12023), 'matplotlib.pyplot.plot', 'plt.plot', (['layer.data_distribution'], {}), '(layer.data_distribution)\n', (11998, 12023), True, 'import matplotlib.pyplot as plt\n'), ((12024, 12034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12032, 12034), True, 'import matplotlib.pyplot as plt\n'), ((477, 494), 'numpy.max', 'np.max', (['blob_data'], {}), '(blob_data)\n', (483, 494), True, 'import numpy as np\n'), ((513, 530), 'numpy.min', 'np.min', (['blob_data'], {}), '(blob_data)\n', (519, 530), True, 'import numpy as np\n'), ((770, 844), 'numpy.histogram', 'np.histogram', (['blob_data'], {'bins': 'self.num_bins', 'range': '(-data_edge, data_edge)'}), '(blob_data, bins=self.num_bins, range=(-data_edge, data_edge))\n', (782, 844), True, 'import numpy as np\n'), ((3386, 3417), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (3394, 3417), True, 'import numpy as np\n'), ((5188, 5212), 'numpy.argmin', 'np.argmin', (['kl_divergence'], {}), '(kl_divergence)\n', (5197, 5212), True, 'import numpy as np\n'), ((6192, 6223), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (6200, 6223), True, 'import numpy as np\n'), ((6241, 6272), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (6249, 6272), True, 'import numpy as np\n'), ((8274, 8298), 'numpy.argmin', 'np.argmin', (['kl_divergence'], {}), '(kl_divergence)\n', (8283, 8298), True, 'import numpy as np\n'), ((8772, 8803), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (8780, 8803), True, 'import numpy as np\n'), ((8821, 8852), 'numpy.zeros', 'np.zeros', (['(num_bins - target_bin)'], {}), '(num_bins - target_bin)\n', (8829, 8852), True, 'import numpy as np\n'), ((10867, 10891), 'numpy.argmin', 'np.argmin', (['kl_divergence'], {}), '(kl_divergence)\n', (10876, 10891), True, 'import numpy as np\n'), ((11448, 11470), 'numpy.random.randn', 'np.random.randn', (['(10000)'], {}), '(10000)\n', (11463, 11470), True, 'import numpy as np\n'), ((1834, 1907), 'numpy.histogram', 'np.histogram', (['blob_data'], {'bins': 'new_num_bins', 'range': '(-data_edge, data_edge)'}), '(blob_data, bins=new_num_bins, range=(-data_edge, data_edge))\n', (1846, 1907), True, 'import numpy as np\n'), ((3502, 3541), 'copy.deepcopy', 'copy.deepcopy', (['distribution[:threshold]'], {}), '(distribution[:threshold])\n', (3515, 3541), False, 'import copy\n'), ((3826, 3837), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (3834, 3837), True, 'import numpy as np\n'), ((3933, 3969), 'numpy.zeros', 'np.zeros', (['target_bin'], {'dtype': 'np.int64'}), '(target_bin, dtype=np.int64)\n', (3941, 3969), True, 'import numpy as np\n'), ((4537, 4584), 'numpy.zeros', 'np.zeros', (['sliced_nd_hist.size'], {'dtype': 'np.float64'}), '(sliced_nd_hist.size, dtype=np.float64)\n', (4545, 4584), True, 'import numpy as np\n'), ((5139, 5158), 'scipy.stats.entropy', 'stats.entropy', (['p', 'q'], {}), '(p, q)\n', (5152, 5158), False, 'from scipy import stats\n'), ((6465, 6513), 'copy.deepcopy', 'copy.deepcopy', (['distribution[loc:loc + threshold]'], {}), '(distribution[loc:loc + threshold])\n', (6478, 6513), False, 'import copy\n'), ((6863, 6874), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (6871, 6874), True, 'import numpy as np\n'), ((6970, 7006), 'numpy.zeros', 'np.zeros', (['target_bin'], {'dtype': 'np.int64'}), '(target_bin, dtype=np.int64)\n', (6978, 7006), True, 'import numpy as np\n'), ((7574, 7621), 'numpy.zeros', 'np.zeros', (['sliced_nd_hist.size'], {'dtype': 'np.float64'}), '(sliced_nd_hist.size, dtype=np.float64)\n', (7582, 7621), True, 'import numpy as np\n'), ((8176, 8195), 'scipy.stats.entropy', 'stats.entropy', (['p', 'q'], {}), '(p, q)\n', (8189, 8195), False, 'from scipy import stats\n'), ((9045, 9093), 'copy.deepcopy', 'copy.deepcopy', (['distribution[loc:loc + threshold]'], {}), '(distribution[loc:loc + threshold])\n', (9058, 9093), False, 'import copy\n'), ((9443, 9454), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (9451, 9454), True, 'import numpy as np\n'), ((9550, 9586), 'numpy.zeros', 'np.zeros', (['target_bin'], {'dtype': 'np.int64'}), '(target_bin, dtype=np.int64)\n', (9558, 9586), True, 'import numpy as np\n'), ((10154, 10201), 'numpy.zeros', 'np.zeros', (['sliced_nd_hist.size'], {'dtype': 'np.float64'}), '(sliced_nd_hist.size, dtype=np.float64)\n', (10162, 10201), True, 'import numpy as np\n'), ((10756, 10788), 'scipy.stats.wasserstein_distance', 'stats.wasserstein_distance', (['p', 'q'], {}), '(p, q)\n', (10782, 10788), False, 'from scipy import stats\n')]
|
import numpy as np
import torch
from scipy.stats import truncnorm
from pymoo.factory import get_sampling, get_crossover, get_mutation
from pymoo.operators.mixed_variable_operator import MixedVariableSampling, MixedVariableMutation, MixedVariableCrossover
from pymoo.model.sampling import Sampling
class TruncatedNormalRandomSampling(Sampling):
def __init__(self, var_type=np.float):
super().__init__()
self.var_type = var_type
def _do(self, problem, n_samples, **kwargs):
return truncnorm.rvs(-2, 2, size=(n_samples, problem.n_var)).astype(np.float32)
class NormalRandomSampling(Sampling):
def __init__(self, mu=0, std=1, var_type=np.float):
super().__init__()
self.mu = mu
self.std = std
self.var_type = var_type
def _do(self, problem, n_samples, **kwargs):
return np.random.normal(self.mu, self.std, size=(n_samples, problem.n_var))
class BinaryRandomSampling(Sampling):
def __init__(self, prob=0.5):
super().__init__()
self.prob = prob
def _do(self, problem, n_samples, **kwargs):
val = np.random.random((n_samples, problem.n_var))
return (val < self.prob).astype(np.bool)
def get_operators(config):
if config.config == "DeepMindBigGAN256" or config.config == "DeepMindBigGAN512":
mask = ["real"]*config.dim_z + ["bool"]*config.num_classes
real_sampling = None
if config.config == "DeepMindBigGAN256" or config.config == "DeepMindBigGAN512":
real_sampling = TruncatedNormalRandomSampling()
sampling = MixedVariableSampling(mask, {
"real": real_sampling,
"bool": BinaryRandomSampling(prob=5/1000)
})
crossover = MixedVariableCrossover(mask, {
"real": get_crossover("real_sbx", prob=1.0, eta=3.0),
"bool": get_crossover("bin_hux", prob=0.2)
})
mutation = MixedVariableMutation(mask, {
"real": get_mutation("real_pm", prob=0.5, eta=3.0),
"bool": get_mutation("bin_bitflip", prob=10/1000)
})
return dict(
sampling=sampling,
crossover=crossover,
mutation=mutation
)
elif config.config.split("_")[0] == "StyleGAN2":
return dict(
sampling=NormalRandomSampling(),
crossover=get_crossover("real_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("real_pm", prob=0.5, eta=3.0)
)
elif config.config.split("_")[0] == "Adaily":
return dict(
sampling=NormalRandomSampling(),
crossover=get_crossover("real_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("real_pm", prob=0.5, eta=3.0)
)
elif config.config == "GPT2":
return dict(
sampling=get_sampling("int_random"),
crossover=get_crossover("int_sbx", prob=1.0, eta=3.0),
mutation=get_mutation("int_pm", prob=0.5, eta=3.0)
)
else:
raise Exception("Unknown config")
|
[
"numpy.random.normal",
"numpy.random.random",
"pymoo.factory.get_mutation",
"pymoo.factory.get_sampling",
"pymoo.factory.get_crossover",
"scipy.stats.truncnorm.rvs"
] |
[((851, 919), 'numpy.random.normal', 'np.random.normal', (['self.mu', 'self.std'], {'size': '(n_samples, problem.n_var)'}), '(self.mu, self.std, size=(n_samples, problem.n_var))\n', (867, 919), True, 'import numpy as np\n'), ((1109, 1153), 'numpy.random.random', 'np.random.random', (['(n_samples, problem.n_var)'], {}), '((n_samples, problem.n_var))\n', (1125, 1153), True, 'import numpy as np\n'), ((514, 567), 'scipy.stats.truncnorm.rvs', 'truncnorm.rvs', (['(-2)', '(2)'], {'size': '(n_samples, problem.n_var)'}), '(-2, 2, size=(n_samples, problem.n_var))\n', (527, 567), False, 'from scipy.stats import truncnorm\n'), ((1785, 1829), 'pymoo.factory.get_crossover', 'get_crossover', (['"""real_sbx"""'], {'prob': '(1.0)', 'eta': '(3.0)'}), "('real_sbx', prob=1.0, eta=3.0)\n", (1798, 1829), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((1851, 1885), 'pymoo.factory.get_crossover', 'get_crossover', (['"""bin_hux"""'], {'prob': '(0.2)'}), "('bin_hux', prob=0.2)\n", (1864, 1885), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((1967, 2009), 'pymoo.factory.get_mutation', 'get_mutation', (['"""real_pm"""'], {'prob': '(0.5)', 'eta': '(3.0)'}), "('real_pm', prob=0.5, eta=3.0)\n", (1979, 2009), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2031, 2074), 'pymoo.factory.get_mutation', 'get_mutation', (['"""bin_bitflip"""'], {'prob': '(10 / 1000)'}), "('bin_bitflip', prob=10 / 1000)\n", (2043, 2074), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2352, 2396), 'pymoo.factory.get_crossover', 'get_crossover', (['"""real_sbx"""'], {'prob': '(1.0)', 'eta': '(3.0)'}), "('real_sbx', prob=1.0, eta=3.0)\n", (2365, 2396), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2419, 2461), 'pymoo.factory.get_mutation', 'get_mutation', (['"""real_pm"""'], {'prob': '(0.5)', 'eta': '(3.0)'}), "('real_pm', prob=0.5, eta=3.0)\n", (2431, 2461), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2611, 2655), 'pymoo.factory.get_crossover', 'get_crossover', (['"""real_sbx"""'], {'prob': '(1.0)', 'eta': '(3.0)'}), "('real_sbx', prob=1.0, eta=3.0)\n", (2624, 2655), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2678, 2720), 'pymoo.factory.get_mutation', 'get_mutation', (['"""real_pm"""'], {'prob': '(0.5)', 'eta': '(3.0)'}), "('real_pm', prob=0.5, eta=3.0)\n", (2690, 2720), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2808, 2834), 'pymoo.factory.get_sampling', 'get_sampling', (['"""int_random"""'], {}), "('int_random')\n", (2820, 2834), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2858, 2901), 'pymoo.factory.get_crossover', 'get_crossover', (['"""int_sbx"""'], {'prob': '(1.0)', 'eta': '(3.0)'}), "('int_sbx', prob=1.0, eta=3.0)\n", (2871, 2901), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n'), ((2924, 2965), 'pymoo.factory.get_mutation', 'get_mutation', (['"""int_pm"""'], {'prob': '(0.5)', 'eta': '(3.0)'}), "('int_pm', prob=0.5, eta=3.0)\n", (2936, 2965), False, 'from pymoo.factory import get_sampling, get_crossover, get_mutation\n')]
|
import config as cfg
import cv2
import numpy as np
from keras.models import load_model
from keras.preprocessing.image import img_to_array
from keras import backend as K
import tensorflow as tf
import keras
'''
esto es necesario para que no haya errores a la hora de exponer el servicio con flask
info --> https://github.com/tensorflow/tensorflow/issues/28287#issuecomment-495005162
'''
from keras.backend import set_session
sess = tf.Session()
graph = tf.get_default_graph()
set_session(sess)
model_emotions = load_model(cfg.path_model)
class predict_emotions():
'''
def __init__(self):
# cargo modelo de deteccion de emociones
global graph
self.graph = tf.get_default_graph()
self.model_emotions = load_model(cfg.path_model)
'''
def preprocess_img(self,face_image,rgb=True,w=48,h=48):
face_image = cv2.resize(face_image, (w,h))
if rgb == False:
face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
face_image = face_image.astype("float") / 255.0
face_image= img_to_array(face_image)
face_image = np.expand_dims(face_image, axis=0)
return face_image
def get_emotion(self,img,boxes_face):
emotions = []
if len(boxes_face)!=0:
for box in boxes_face:
y0,x0,y1,x1 = box
face_image = img[x0:x1,y0:y1]
# preprocesar data
face_image = self.preprocess_img(face_image ,cfg.rgb, cfg.w, cfg.h)
# predecir imagen
global sess
global graph
with graph.as_default():
set_session(sess)
prediction = model_emotions.predict(face_image)
emotion = cfg.labels[prediction.argmax()]
emotions.append(emotion)
else:
emotions = []
boxes_face = []
return boxes_face,emotions
|
[
"keras.preprocessing.image.img_to_array",
"keras.models.load_model",
"tensorflow.Session",
"keras.backend.set_session",
"numpy.expand_dims",
"cv2.cvtColor",
"cv2.resize",
"tensorflow.get_default_graph"
] |
[((432, 444), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (442, 444), True, 'import tensorflow as tf\n'), ((453, 475), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (473, 475), True, 'import tensorflow as tf\n'), ((477, 494), 'keras.backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (488, 494), False, 'from keras.backend import set_session\n'), ((512, 538), 'keras.models.load_model', 'load_model', (['cfg.path_model'], {}), '(cfg.path_model)\n', (522, 538), False, 'from keras.models import load_model\n'), ((860, 890), 'cv2.resize', 'cv2.resize', (['face_image', '(w, h)'], {}), '(face_image, (w, h))\n', (870, 890), False, 'import cv2\n'), ((1061, 1085), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['face_image'], {}), '(face_image)\n', (1073, 1085), False, 'from keras.preprocessing.image import img_to_array\n'), ((1107, 1141), 'numpy.expand_dims', 'np.expand_dims', (['face_image'], {'axis': '(0)'}), '(face_image, axis=0)\n', (1121, 1141), True, 'import numpy as np\n'), ((940, 984), 'cv2.cvtColor', 'cv2.cvtColor', (['face_image', 'cv2.COLOR_BGR2GRAY'], {}), '(face_image, cv2.COLOR_BGR2GRAY)\n', (952, 984), False, 'import cv2\n'), ((1650, 1667), 'keras.backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (1661, 1667), False, 'from keras.backend import set_session\n')]
|
import rclpy
import json,numpy
from numpy import clip
from rclpy.node import Node
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import JointState
from diagnostic_msgs.msg import DiagnosticStatus, KeyValue
import can
from tinymovr import Tinymovr
from tinymovr.iface.can import CAN
from tinymovr.units import get_registry
from math import pi
ureg = get_registry()
amps = ureg.ampere
s = ureg.second
minute = ureg.minute
tick = ureg.tick
rad = ureg.radian
turn = ureg.turn
deg = ureg.degree
class HardwareAbstractionLayer(Node):
def __init__(self):
super().__init__('HardwareAbstractionLayer')
# Lecture du fichier de configuration des moteurs
f = open("/home/vanille/ros2_ws/src/hal/config.json","r")
self.config = json.load(f)
f.close()
self.can_bus = can.Bus(bustype='slcan',channel='/dev/ttyACM0',bitrate=1000000)
self.iface = CAN(self.can_bus)
for kmotor,motor in self.config['motors'].items():
if "id_can" in motor :
motor["tm"]=Tinymovr(node_id=int(motor["id_can"]), iface=self.iface)
assert(motor["tm"].motor_config.flags == 1)
motor["offset"] = motor["tm"].encoder_estimates.position
self.declare_parameter(kmotor+"_max_speed",motor["max_speed"])
self.declare_parameter(kmotor+"_max_current",motor["max_current"])
motor["tm"].set_limits(motor["max_speed"]*turn/minute,motor["max_current"]*amps)
self.declare_parameter(kmotor+"_gain_integrator",motor["gain_integrator"])
motor["tm"].set_integrator_gains(motor["gain_integrator"])
self.publisherJoint_ = self.create_publisher(JointState, '/vanille/joint_states', 1)
self.publisherDiag_ = self.create_publisher(DiagnosticStatus, 'diagnostic',1)
self.subscription = self.create_subscription(
JointState,
'/vanille/joint_position_cmd',
self.update_position_cmd,
1)
timer_period = 0.01 # seconds
timer_period_diag = 2 # seconds
self.timer = self.create_timer(timer_period, self.routine)
self.timerDiag = self.create_timer(timer_period_diag, self.updateDiagnostic)
def update_position_cmd(self, msg : JointState):
for imotor in range(len(msg.name)):
kmotor = msg.name[imotor]
if kmotor in self.config['motors']:
motor = self.config['motors'][kmotor]
position_target = msg.position[imotor]*rad
if numpy.isnan(position_target) :
motor["tm"].current_control()
motor["tm"].set_cur_setpoint(0.0*amps)
else:
position_target = clip(position_target,motor["limit_lower"]*deg, motor["limit_upper"]*deg)
if motor["orientation"] == "direct":
motor["tm"].position_control()
# motor["tm"].set_pos_setpoint(motor["offset"]+position_target*float(motor["ratio"]))
motor["tm"].set_pos_setpoint(motor["offset"]+position_target*motor["ratio"])
elif motor["orientation"] == "indirect":
motor["tm"].position_control()
# motor["tm"].set_pos_setpoint(motor["offset"]-position_target*float(motor["ratio"]))
motor["tm"].set_pos_setpoint(motor["offset"]-position_target*motor["ratio"])
def read_positions(self):
msg = JointState()
msg.header.stamp = super().get_clock().now().to_msg()
msg.name = []
msg.position = []
msg.velocity = []
msg.effort = []
for kmotor,motor in self.config['motors'].items():
msg.name.append(motor["joint_name"])
if motor["orientation"] == "direct":
msg.position.append(float((motor["tm"].encoder_estimates.position-motor["offset"])/float(motor["ratio"])))
msg.velocity.append(motor["tm"].encoder_estimates.velocity.to(rad/s).m/float(motor["ratio"]))
msg.effort.append(motor["tm"].Iq.estimate.m*float(motor["ratio"]))
elif motor["orientation"] == "indirect":
msg.position.append(float(-(motor["tm"].encoder_estimates.position-motor["offset"])/float(motor["ratio"])))
msg.velocity.append(-motor["tm"].encoder_estimates.velocity.to(rad/s).m/float(motor["ratio"]))
msg.effort.append(-motor["tm"].Iq.estimate.m*float(motor["ratio"]))
self.publisherJoint_.publish(msg)
def updateDiagnostic(self):
# tmx.device_info = {"device_id": 99999, "fw_major": 0, "fw_minor": 7, "fw_patch": 1, "temp": 45}
# tmx.motor_config = {"flags": 1, "R": 200, "pole_pairs": 11, "L": 100}
msg = DiagnosticStatus()
msg1 = KeyValue()
for kmotor,motor in self.config['motors'].items():
msg.values= []
msg.hardware_id = kmotor
msg.name = kmotor
msg.message = "device_info motor_config"
for kinfo,info in motor["tm"].device_info.items():
msg1 = KeyValue()
msg1.key=kinfo
msg1.value=str(info)
msg.values.append(msg1)
for kinfo,info in motor["tm"].motor_config.items():
msg1 = KeyValue()
msg1.key=kinfo
msg1.value=str(info)
msg.values.append(msg1)
self.publisherDiag_.publish(msg)
def routine(self):
self.read_positions()
def stop(self):
self.get_logger().info(f'Stopping HAL Node')
for kmotor,motor in self.config['motors'].items():
motor["tm"].idle()
def main(args=None):
print('Hi from hal.')
rclpy.init(args=args)
hal_node = HardwareAbstractionLayer()
try:
rclpy.spin(hal_node)
except KeyboardInterrupt:
pass
hal_node.stop()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
hal_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
[
"diagnostic_msgs.msg.KeyValue",
"numpy.clip",
"tinymovr.units.get_registry",
"rclpy.spin",
"diagnostic_msgs.msg.DiagnosticStatus",
"sensor_msgs.msg.JointState",
"can.Bus",
"numpy.isnan",
"json.load",
"rclpy.init",
"rclpy.shutdown",
"tinymovr.iface.can.CAN"
] |
[((370, 384), 'tinymovr.units.get_registry', 'get_registry', ([], {}), '()\n', (382, 384), False, 'from tinymovr.units import get_registry\n'), ((5874, 5895), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (5884, 5895), False, 'import rclpy\n'), ((6223, 6239), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (6237, 6239), False, 'import rclpy\n'), ((783, 795), 'json.load', 'json.load', (['f'], {}), '(f)\n', (792, 795), False, 'import json, numpy\n'), ((846, 911), 'can.Bus', 'can.Bus', ([], {'bustype': '"""slcan"""', 'channel': '"""/dev/ttyACM0"""', 'bitrate': '(1000000)'}), "(bustype='slcan', channel='/dev/ttyACM0', bitrate=1000000)\n", (853, 911), False, 'import can\n'), ((931, 948), 'tinymovr.iface.can.CAN', 'CAN', (['self.can_bus'], {}), '(self.can_bus)\n', (934, 948), False, 'from tinymovr.iface.can import CAN\n'), ((3582, 3594), 'sensor_msgs.msg.JointState', 'JointState', ([], {}), '()\n', (3592, 3594), False, 'from sensor_msgs.msg import JointState\n'), ((4888, 4906), 'diagnostic_msgs.msg.DiagnosticStatus', 'DiagnosticStatus', ([], {}), '()\n', (4904, 4906), False, 'from diagnostic_msgs.msg import DiagnosticStatus, KeyValue\n'), ((4922, 4932), 'diagnostic_msgs.msg.KeyValue', 'KeyValue', ([], {}), '()\n', (4930, 4932), False, 'from diagnostic_msgs.msg import DiagnosticStatus, KeyValue\n'), ((5956, 5976), 'rclpy.spin', 'rclpy.spin', (['hal_node'], {}), '(hal_node)\n', (5966, 5976), False, 'import rclpy\n'), ((2605, 2633), 'numpy.isnan', 'numpy.isnan', (['position_target'], {}), '(position_target)\n', (2616, 2633), False, 'import json, numpy\n'), ((5225, 5235), 'diagnostic_msgs.msg.KeyValue', 'KeyValue', ([], {}), '()\n', (5233, 5235), False, 'from diagnostic_msgs.msg import DiagnosticStatus, KeyValue\n'), ((5431, 5441), 'diagnostic_msgs.msg.KeyValue', 'KeyValue', ([], {}), '()\n', (5439, 5441), False, 'from diagnostic_msgs.msg import DiagnosticStatus, KeyValue\n'), ((2805, 2882), 'numpy.clip', 'clip', (['position_target', "(motor['limit_lower'] * deg)", "(motor['limit_upper'] * deg)"], {}), "(position_target, motor['limit_lower'] * deg, motor['limit_upper'] * deg)\n", (2809, 2882), False, 'from numpy import clip\n')]
|
#!/usr/bin/python
from __future__ import print_function
from __future__ import absolute_import
from past.builtins import basestring
import sys
import numpy as np
import moby2
trace = moby2.util.log.logger.trace
# transitional...
_fp_formats = {
'det_uid': '%4d',
'ok': '%1d',
'x0': '%9.6f',
'x0_err': '%9.6f',
'y0': '%9.6f',
'y0_err': '%9.6f',
'tau': '%8.5f',
'tau_err': '%8.5f',
'h': '%.4e',
'w': '%9.6f',
'sn': '%9.1f',
'base': '%.5e',
'n_obs': '%3d',
}
_fp_fields = ['ok', 'x0', 'x0_err', 'y0', 'y0_err', 'tau', 'tau_err',
'h', 'w', 'sn', 'base', 'n_obs']
_fp_columns_format_str = ' '.join(['{%s:%s}'%(k, _fp_formats[k][1:])
for k in _fp_fields]) + '\n'
class FPFitFile(moby2.detectors._SimpleDetData):
fields = _fp_fields
dtypes = {'ok': bool, 'n_obs': int}
columns_format_str = _fp_columns_format_str
xcfs = '{det_uid:4d} {ok:1d} '\
'{x0:9.6f} {x0_err:9.6f} {y0:9.6f} {y0_err:9.6f} '\
'{tau:8.5f} {tau_err:8.5f} '\
'{h:.4e} {w:9.6f} {sn:9.1f} {n_obs:3d}\n'
header = '# det_uid ok x0 x0_err y0 y0_err '\
'tau tau_err h w sn n_obs'
def __init__(self, det_uid=None):
if det_uid is not None:
self.det_uid = np.array(det_uid, dtype='int64')
n = len(det_uid)
for f in self.fields:
setattr(self, f, np.zeros(n, self.dtypes.get(f, 'float64')))
def __repr__(self):
name = repr(self.__class__)
return '%s with %i det_uid for fields ' % (name, len(self.det_uid)) + \
','.join(self.fields)
def update_row(self, row, data):
for k in self.fields:
if k in data:
getattr(self, k)[row] = data[k]
@classmethod
def from_columns_file(cls, filename):
data = np.loadtxt(filename, unpack=1)
det_uid = data[0].astype('int')
self = cls(det_uid)
self.ok = data[1].astype('int').astype('bool')
if len(data[2:]) == 11:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.w, self.sn, self.base, self.n_obs = data[2:]
elif len(data[2:-1]) == 9:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.w, self.sn = data[2:-1]
self.base = 0 * self.w
elif len(data[2:-1]) == 8:
self.x0, self.x0_err, self.y0, self.y0_err, self.tau, self.tau_err, self.h, self.sn = data[2:-1]
self.w = 0 * self.x0
self.base = 0 * self.x0
elif len(data[2:-1]) == 4:
self.x0, self.x0_err, self.y0, self.y0_err = data[2:-1]
self.base = 0
else:
raise ValueError("Strange number of columns in %s" % filename)
self.n_obs = data[-1].astype('int')
return self
@classmethod
def from_file(cls, filename):
if filename.endswith('fits') or filename.endswith('fits.gz'):
return cls.from_fits_table(filename)
return cls.from_columns_file(filename)
# This supercedes _SimpleDetData.write
def write(self, filename, format=None):
if format is None:
if filename.endswith('fits') or filename.endswith('fits.gz'):
format = 'fits'
else:
format = 'txt'
data = [('det_uid', self.det_uid)]
for k in self.fields:
v = getattr(self, k)
if v.dtype == bool:
v = v.astype('int8')
data.append((k, v))
odb = moby2.util.StructDB.from_data(data,formats=_fp_formats)
if format == 'fits':
odb.to_fits_table(filename)
elif format == 'txt':
odb.to_column_file(filename)
else:
raise ValueError("Unknown format request, %s." % format)
def write_reduced(self, filename, scale_amp=1.):
format = 'txt'
if filename.endswith('.fits') or filename.endswith('.fits.gz'):
format = 'fits'
s = self.ok.astype(bool)
# det_uid peak_DAC SN tau
data = [('det_uid', self.det_uid[s]),
('peak_dac', self.h[s] * scale_amp),
('time_const', self.tau[s]),
('sn', self.sn[s]),
]
odb = moby2.util.StructDB.from_data(
data, formats={'peak_dac': '%12.3f',
'time_const': '%12.5f',
'sn': '%12.3f'})
if format == 'txt':
odb.to_column_file(filename)
elif format == 'fits':
odb.to_fits_table(filename)
@classmethod
def from_focal_plane(cls, fp):
"""
Initialize from a FocalPlane object.
"""
self = cls(fp.det_uid)
self.x0 = fp.x.copy()
self.y0 = fp.y.copy()
self.ok = fp.mask.copy()
zeros = np.zeros(self.ok.shape)
self.tau, self.h, self.w = zeros.copy(), zeros.copy(), zeros.copy()
self.base = zeros
return self
@classmethod
def combine_fits(cls, fits, template=None, params={}):
"""
Combine fits by shifting each one to match a template, and
averaging the good fits for each detector.
If a template is not provided, match to the first one.
"""
trace(1, 'Fitting and averaging %i fits' % len(fits))
if template is None:
template = fits[0]
# Start by shifting each fit to match the template.
orig_fits, fits = fits, []
fitter = FPTemplateFitter()
fitter.set_template(template)
fit_params = {'shift': True,
'rotation': False}
fit_params.update(params)
fit_results = [None for fi in range(len(orig_fits))]
for fi,f0 in enumerate(orig_fits):
if f0.ok.sum() < params.get('min_dets', 50):
trace(2, 'Discarding fit with only %i good fits' % f0.ok.sum())
continue
ok, result = fitter.fit(f0, fit_params)
if not ok:
trace(2, 'Discarding fit due to failed template match')
continue
f1 = f0.copy()
f1.x0 += result[0]
f1.y0 += result[1]
fits.append(f1)
fit_results[fi] = result
trace(1, 'Cut %i of %i fits (increase verbosity to see why).' % \
(len(orig_fits) - len(fits), len(orig_fits)))
if len(fits) == 0:
return None, None
print([len(f.det_uid) for f in fits])
n_det_uid = max([f.det_uid.max() for f in fits]) + 1
output = cls(np.arange(n_det_uid))
output.ok[:] = False
ARCMIN = np.pi/180/60
trace(1, 'Combining data for %i detectors' % n_det_uid)
for uid in output.det_uid:
ok = np.array([f.get_property('ok', det_uid=uid)[1]
for f in fits])
x, y, tau = np.transpose([f.get_property(['x0','y0','tau'], det_uid=uid)[1]
for f in fits])
for _x in [x, y, tau]:
# Yes, this happens...
ok *= ~np.isnan(_x) * ~np.isinf(_x)
x, y, tau = [_x[ok] for _x in [x,y,tau]]
if ok.sum() < params.get('min_obs', 1):
trace(2, 'Discarding det_uid=%i due to only %i contributors'
% (uid, ok.sum()))
continue
# Majority rules.
x0, y0 = np.median(x), np.median(y)
for iteration in [0,1,2]:
d0 = ((x - x0)**2 + (y-y0)**2)**.5
s0 = d0 < params.get('max_separation', 1)*ARCMIN
if s0.sum() == 0:
break
x0, y0 = x[s0].mean(), y[s0].mean()
if s0.sum() <= 0:
trace(2, 'Discarding det_uid=%i due to only %i items in '\
' combination' % (uid, s0.sum()))
continue
vals = {
'x0': x0, 'y0': y0,
'x0_err': x[s0].std(),
'y0_err': y[s0].std(),
'tau': tau[s0].mean(),
'tau_err': tau[s0].std(),
'n_obs': s0.sum(),
'ok': s0.sum() >= params.get('min_obs', 1) }
output.update_row(uid, vals)
trace(2, 'Result for det_uid=%i' % uid)
for k in ['x0', 'y0', 'tau']:
trace(2, ' %s = %10.5f +- %10.5f' % (k, vals[k], vals[k+'_err']))
return output, fit_results
def plot_positions(self, filename, auto_zoom=True, params={},
title='', fig=None):
import pylab as pl
if fig is None:
pl.figure()
pl.gcf().set_size_inches(6., 6.)
else:
pl.figure(fig.number)
s = self.ok
if s.sum() == 0:
pl.title(title + ' - no good fits')
pl.savefig(filename)
pl.clf()
units = params.get('units', 'deg')
scale = {'rad': 1., 'deg': 180/np.pi, 'arcmin': 60*180/np.pi}[units]
x, y = self.x0[s]*scale, self.y0[s]*scale
x0, y0 = np.median(x), np.median(y)
r = ((x-x0)**2 + (y-y0)**2)**.5
window = np.median(r)*3
inside = r < params.get('zoom', scale*window)
pl.scatter(x, y, alpha=0.5)
if params.get('limits') is None:
if np.any(inside):
for vect,limiter in [(x,pl.xlim), (y,pl.ylim)]:
lo, hi = limiter()
lo = min(lo, vect[inside].min())
hi = max(hi, vect[inside].max())
limiter(lo, hi)
else:
xlims, ylims = params['limits']
pl.xlim(*xlims), pl.ylim(*ylims)
pl.title(title + ' - %i dets outside window' % (~inside).sum())
pl.xlabel('X (%s)' % units)
pl.ylabel('Y (%s)' % units)
def smart_locate(ax, n_max, bases=[1,2,5]):
x0, x1 = ax.get_view_interval()
if x1 == x0:
return
delta = (x1-x0) / (n_max-1)
# Find smallest base and p such delta < base*10^p
log_spacing = min([
np.ceil(np.log10(delta) - np.log10(b)) + np.log10(b)
for b in bases])
loc = pl.MultipleLocator(10**log_spacing)
ax.set_major_locator(loc)
smart_locate(pl.gca().xaxis, 6)
smart_locate(pl.gca().yaxis, 9)
pl.savefig(filename)
pl.clf()
pl.figure()
def plot_rowcol_summaries(self, filename, array_data):
import pylab as pl
def x_eyes(bads=None):
# Mark bad fits with an x.
if bads is None:
bads = ~s
pl.scatter(cols[bads], rows[bads], marker='x', edgecolor='gray')
def limit_args(data, kw={}):
lo, hi = data.min(), data.max()
if s.sum() > 1:
lo, hi = data[s].min(), data[s].max()
if hi == lo:
hi = lo + 1
kw.update({'vmin': lo, 'vmax': hi})
return kw
def bin(data, dtype='float'):
out = np.zeros((n_rows, n_cols), dtype)
out[rows, cols] = data
return out
def imshow_reformat():
# Tighten boundaries, add labels...
pl.xlabel('Column')
pl.ylabel('Row')
pl.xlim(-0.5, n_cols-0.5)
pl.ylim(-0.5, n_rows-0.5)
s = self.ok
rows, cols = array_data.get_property(['row', 'col'], det_uid=self.det_uid)
n_rows, n_cols = rows.max()+1, cols.max()+1
# Init plotting
pl.figure()
pl.gcf().set_size_inches(6., 6.)
pl.subplots_adjust(left=.1, right=.95, top=.95, bottom=.1,
hspace=.2, wspace=.3)
title_fs = 12
# Time constants...
#
pl.subplot(2,2,1)
z = self.tau * 1e3
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
pl.title('Time constants (ms)', fontsize=title_fs)
imshow_reformat()
pl.subplot(2,2,2)
z = self.tau_err * 1e3
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
pl.title('Time constant errors (ms)', fontsize=title_fs)
imshow_reformat()
if self.ok.sum() > 10:
pl.subplot(2,2,3)
pl.hist(self.tau[self.ok]*1e3, bins=20) #min(20,self.ok.sum()//10)
pl.xlabel('Time constant (ms)')
pl.ylabel('N_dets')
pl.subplot(2,2,4)
pl.hist(self.tau_err[self.ok]*1e3, bins=self.ok.sum()//10)
pl.xlabel('Time constant errors (ms)')
pl.ylabel('N_dets')
pl.savefig(filename+'time_const.png')
pl.clf()
# Positions and stuff
#
for i in [0,1]:
pl.subplot(2,2,1+i)
z = {0: self.x0_err, 1:self.y0_err}[i]
z = z * 180*3600/np.pi # to arcseconds
pl.imshow(bin(z), interpolation='nearest', **limit_args(z))
pl.colorbar()
x_eyes()
imshow_reformat()
pl.title('%s position RMS' % {0: 'X', 1: 'Y'}[i],
fontsize=title_fs)
pl.subplot(2,2,3)
z = self.n_obs
pl.imshow(bin(z), interpolation='nearest')
pl.colorbar()
imshow_reformat()
pl.title('N_obs', fontsize=title_fs)
pl.savefig(filename+'positions.png')
pl.clf()
# Destroy our subplot adjustments
pl.figure()
class FPTemplateFitter:
"""
Class for shift/rotate/shearing a template FPFitFile to match a
target FPFitFile.
After initializing, set the template to use:
fitter = FPTemplateFitter()
fitter.set_template(my_template_fp)
ok, params = fitter.fit(my_target_fp)
Those params are stored internally, so you can get the model FP:
model_for_target = fitter.get_modeled(my_target_fp)
"""
param_names = ['dx', 'dy', 'theta', 'scale', 'shear_theta', 'shear_scale']
formats = {'dx': '%9.6f',
'dy': '%9.6f',
'scale': '%11.4e',
'n_dets': '%4i',
'theta': '%9.6f',
'shear_scale': '%11.4e',
'shear_theta': '%9.6f',
}
@classmethod
def from_params(cls, opts, tod_info=None):
if '_execcfg' in opts:
tod_id = moby2.scripting.products.get_tod_id(tod_info=tod_info)
ic = moby2.scripting.execcfg.InputChooser()
opts1 = ic.get_config(opts['_execcfg'], tod_id=tod_id)
for k,v in list(opts1.items()):
if not k in opts:
opts[k] = v
if 'depot' in opts:
depot = moby2.scripting.get_depot(opts['depot'])
if not 'structure' in opts:
opts['structure'] = '{tag}'
filename = depot.get_full_path(**opts)
else:
filename = opts['filename']
trace(2, 'Loading as template: %s' % filename)
load_args = opts['column_def']
pos_data = moby2.util.StructDB.from_column_file(filename, load_args)
r = opts.get('template_rescale', (1.,1.))
if 'ok' in pos_data.dtype.names:
mask = (pos_data['ok'].astype(int) != 0)
else:
mask = np.ones(pos_data['x'].shape, bool)
template_fits = FPFitFile(det_uid=pos_data['det_uid'][mask])
template_fits.x0[:] = pos_data['x'][mask] * r[0]
template_fits.y0[:] = pos_data['y'][mask] * r[1]
template_fits.ok[:] = True
self = cls()
self.set_template(template_fits)
return self
def set_template(self, template):
self.template = template
self.pivot = self.template.x0[self.template.ok].mean(), \
self.template.y0[self.template.ok].mean()
@staticmethod
def _rotate(theta, x, y):
c, s = np.cos(theta), np.sin(theta)
return x*c - y*s, y*c + x*s
def model(self, params, x=None, y=None):
"""
Shift, rotate, shear the current template according to params
dict. Return the resulting offsets (x, y).
"""
dx, dy, theta, scale, sh_theta, sh_scale = params
scale, sh_scale = np.exp(scale), np.exp(sh_scale)
# Shift away array center and rescale
if x is None:
tp = self.template
x, y = tp.x0, tp.y0
out_x, out_y = scale*(x - self.pivot[0]), scale*(y - self.pivot[1])
# Shear
out_x, out_y = self._rotate(+sh_theta, out_x, out_y)
out_x *= sh_scale
out_x, out_y = self._rotate(-sh_theta, out_x, out_y)
# Rotate
out_x, out_y = self._rotate(theta, out_x, out_y)
# Restore array center and apply additional shift.
return out_x + self.pivot[0] - dx, out_y + self.pivot[1] - dy
def model_inverse(self, params, out_x, out_y):
"""
Inverse of self.model. Keep it up to date!
"""
dx, dy, theta, scale, sh_theta, sh_scale = params
scale, sh_scale = np.exp(scale), np.exp(sh_scale)
# Remove additional shift.
x, y = out_x - self.pivot[0] + dx, out_y - self.pivot[1] + dy
# Unrotate
x, y = self._rotate(-theta, x, y)
# Unshear
x, y = self._rotate(+sh_theta, x, y)
x /= sh_scale
x, y = self._rotate(-sh_theta, x, y)
x, y = x/scale + self.pivot[0], y/scale + self.pivot[1]
return x, y
def fit(self, fp, params, trace_level=0):
"""
Fit positions to a template, which is also an FPFitFile but
may represent different det_uid. 'params' should be a dict
like this one:
params = {
'shift': True,
'rotation': True,
'scale': True,
'shear': True,
}
Returns (ok, params). The fitted_template has the same
det_uid as self.
"""
template = self.template
# Get mask of items that are ok in both the template and fits
fp_ok = fp.ok.astype('bool').copy()
_, temp_ok = template.get_property('ok', fp.det_uid)
fp_ok *= temp_ok
# Get the template and fits positions for those ok items
_, x0 = template.get_property('x0', fp.det_uid[fp_ok])
_, y0 = template.get_property('y0', fp.det_uid[fp_ok])
x1, y1 = fp.x0[fp_ok], fp.y0[fp_ok]
self.A = x0,y0
self.B = x1,y1
# Identify parameters we want to vary
free_params = [params.get('shift', True)]*2
free_params.append(params.get('rotation', True))
free_params.append(params.get('scale', False))
free_params.extend([params.get('shear', False)]*2)
if fp.ok.sum() == 0:
trace(trace_level+0, 'No items for template fit')
self.result = False, [0. for f in free_params]
return self.result
trace(trace_level+0, 'Fitting template using %i items' % fp_ok.sum())
# Start fit with shift based on mean displacement
params0 = [x1.mean()-self.pivot[0], y1.mean()-self.pivot[1],
0., 0., 0., 0.]
trace(trace_level+1, 'Starting parameters: %s' % str(params0))
trace(trace_level+1, 'Free parameters: %s' % str(free_params))
def fit_chi2(params):
x_model, y_model = self.model(params, x0, y0)
var = (x1 - x_model)**2 + (y1 - y_model)**2
#return var.sum()
# Attenuate contribution of outliers? Not clear this works...
mvar = np.median(var)
var_roll = var * (10*mvar / (10*mvar + var))
return var_roll.sum()
# Minimize... start with position or all is lost.
params1 = params0
for iters in [0,1]:
for free_mask in [
# Fit position only...
[True , True , False, False, False, False],
# Fit rotation and scale
[False, False, True , True , False, False],
# Fit skew
[False, False, False, False, True , True ],
# Fit skew and position
[True , True , False, False, True , True ],
# Let everything float
[True , True , True , True , True , True ]]:
free = np.array(free_params) * free_mask
if free.sum() > 0:
params1 = moby2.util.fitting.multi_fmin(
fit_chi2, params1, free=free, disp=0,
xtol=1e-6, ftol=1e-6)
trace(trace_level+2, 'params snapshot: %s' % str(params1))
trace(trace_level+1, 'Final parameters: %s' % str(params1))
self.result = True, params1
return self.result
def check_result(self, opts):
"""
Check self.result against ranges passed in by user. User
passes in a dict with keys like "<name>_range", where <name>
is one of self.param_names. The values are the range (lo, hi) of
acceptable values. If any range checks fail, the function
returns false.
"""
ok, params = self.result
if not ok:
return False
for k, v in zip(self.param_names, params):
k = '%s_range' % k
if not k in opts: continue
if not ((opts[k][0] <= v) and (v < opts[k][1])):
return False
return True
def get_modeled(self, det_uid=None):
"""
Return a FPFitFile with the modeled detector positions. Pass
in the desired det_uid, or the template det_uid will be
used.
"""
if det_uid is None:
det_uid = self.det_uid
matched = FPFitFile(det_uid=det_uid)
_, ok = self.template.get_property('ok', matched.det_uid)
_, x0 = self.template.get_property('x0', matched.det_uid)
_, y0 = self.template.get_property('y0', matched.det_uid)
matched.ok = ok
params = self.result[1]
matched.x0, matched.y0 = self.model(params, x0, y0)
return matched
def make_plots(self, fp, modeled, plot_prefix='./',
title=None):
"""
Show fit quality in a few plots.
"""
import pylab as pl
def sane_axes():
fig.gca().xaxis.set_major_locator(pl.MaxNLocator(4))
fig.gca().yaxis.set_major_locator(pl.MaxNLocator(5))
fig.gca().set_aspect('equal', 'datalim')
DEG = 180./np.pi
fig = pl.figure()
fig.set_size_inches(8., 4.)
pl.subplots_adjust(left=.1, right=.98, top=.85, bottom=.1,
hspace=.2, wspace=.3)
pl.subplot(121)
tp = self.template
s, x, y = tp.ok, tp.x0, tp.y0
pl.scatter(x[s], y[s], marker='o', s=4, alpha=.5)
pl.xlabel('X')
pl.ylabel('Y')
pl.title('Input template')
sane_axes()
# The model positions
pl.subplot(122)
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
# And the fit positions
s, x, y = fp.ok, fp.x0 * DEG, fp.y0 * DEG
pl.scatter(x[s], y[s], marker='x')
# Now connect them with lines...
u = fp.det_uid[s]
ok1, (x1, y1) = modeled.get_property(['x0','y0'], det_uid=u)
x, y = x[s], y[s]
for i in ok1.nonzero()[0]:
pl.plot([x1[i]*DEG, x[i]], [y1[i]*DEG, y[i]], color='k', alpha=.4)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
pl.title('Fitted result')
sane_axes()
if title != None:
pl.figtext(0.5, 0.93, title, va='bottom', ha='center')
pl.savefig(plot_prefix + 'fit.png')
pl.figure() # destroy our settings...
def old_make_plots(self, fp, modeled, plot_prefix='./',
title=None):
"""
Show fit quality in a few plots.
"""
import pylab as pl
DEG = 180./np.pi
pl.figure()
pl.gcf().set_size_inches(6., 6.)
pl.subplots_adjust(left=.15, right=.95, top=.90, bottom=.1,
hspace=.2, wspace=.3)
tp = self.template
s, x, y = tp.ok, tp.x0, tp.y0
pl.scatter(x[s], y[s], marker='x')
pl.savefig(plot_prefix + '0template.png')
pl.clf()
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
pl.savefig(plot_prefix + '1model.png')
pl.clf()
# The model positions
s, x, y = modeled.ok, modeled.x0 * DEG, modeled.y0 * DEG
pl.scatter(x[s], y[s], alpha=.2)
# And the fit positions
s, x, y = fp.ok, fp.x0 * DEG, fp.y0 * DEG
pl.scatter(x[s], y[s], marker='x')
# Now connect them with lines...
u = fp.det_uid[s]
ok1, (x1, y1) = modeled.get_property(['x0','y0'], det_uid=u)
x, y = x[s], y[s]
for i in ok1.nonzero()[0]:
pl.plot([x1[i]*DEG, x[i]], [y1[i]*DEG, y[i]], color='k', alpha=.4)
pl.xlabel('X (deg)')
pl.ylabel('Y (deg)')
if title is not None:
pl.title(title)
pl.savefig(plot_prefix + '2fit.png')
pl.figure() # destroy our settings...
# Formatted output...
def get_ascii(self, names=None, params=None):
if names is None:
names = self.param_names
if params is None:
params = self.result[1]
idx = [self.param_names.index(f) for f in names]
text = [ self.formats.get(n, '%11.4e') % params[i]
for n,i in zip(names,idx) ]
return ' '.join(text)
@staticmethod
def write_fit_list(filename, keys, fits, format=None):
if format == 'fits':
columns = list(zip(*[f.result[1] for f in fits]))
col_defs = ([('id', keys), ('ok', [int(f.result[0]) for f in fits])] +
list(zip(fits[0].param_names, columns)))
db_out = moby2.util.StructDB.from_data(
col_defs, formats=fits[0].formats)
db_out.to_fits_table(filename)
else:
if isinstance(filename, basestring):
filename = open(filename, 'w')
names = fits[0].param_names
filename.write('# %s\n' % ' '.join(names))
for key, fit in zip(keys, fits):
text = fit.get_ascii(names=names)
filename.write('%s %s\n' % (key, text))
|
[
"pylab.title",
"pylab.scatter",
"numpy.log10",
"pylab.subplots_adjust",
"moby2.scripting.get_depot",
"pylab.MaxNLocator",
"pylab.savefig",
"pylab.xlabel",
"moby2.scripting.execcfg.InputChooser",
"numpy.array",
"numpy.sin",
"moby2.util.StructDB.from_column_file",
"pylab.gca",
"numpy.arange",
"pylab.ylabel",
"pylab.ylim",
"pylab.plot",
"pylab.figtext",
"numpy.exp",
"pylab.xlim",
"numpy.isinf",
"numpy.ones",
"pylab.subplot",
"numpy.any",
"pylab.figure",
"numpy.isnan",
"numpy.cos",
"moby2.util.StructDB.from_data",
"pylab.gcf",
"numpy.median",
"pylab.hist",
"pylab.MultipleLocator",
"moby2.util.fitting.multi_fmin",
"numpy.zeros",
"pylab.colorbar",
"moby2.scripting.products.get_tod_id",
"numpy.loadtxt",
"pylab.clf"
] |
[((1905, 1935), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'unpack': '(1)'}), '(filename, unpack=1)\n', (1915, 1935), True, 'import numpy as np\n'), ((3620, 3676), 'moby2.util.StructDB.from_data', 'moby2.util.StructDB.from_data', (['data'], {'formats': '_fp_formats'}), '(data, formats=_fp_formats)\n', (3649, 3676), False, 'import moby2\n'), ((4358, 4469), 'moby2.util.StructDB.from_data', 'moby2.util.StructDB.from_data', (['data'], {'formats': "{'peak_dac': '%12.3f', 'time_const': '%12.5f', 'sn': '%12.3f'}"}), "(data, formats={'peak_dac': '%12.3f',\n 'time_const': '%12.5f', 'sn': '%12.3f'})\n", (4387, 4469), False, 'import moby2\n'), ((4935, 4958), 'numpy.zeros', 'np.zeros', (['self.ok.shape'], {}), '(self.ok.shape)\n', (4943, 4958), True, 'import numpy as np\n'), ((9370, 9397), 'pylab.scatter', 'pl.scatter', (['x', 'y'], {'alpha': '(0.5)'}), '(x, y, alpha=0.5)\n', (9380, 9397), True, 'import pylab as pl\n'), ((9894, 9921), 'pylab.xlabel', 'pl.xlabel', (["('X (%s)' % units)"], {}), "('X (%s)' % units)\n", (9903, 9921), True, 'import pylab as pl\n'), ((9930, 9957), 'pylab.ylabel', 'pl.ylabel', (["('Y (%s)' % units)"], {}), "('Y (%s)' % units)\n", (9939, 9957), True, 'import pylab as pl\n'), ((10528, 10548), 'pylab.savefig', 'pl.savefig', (['filename'], {}), '(filename)\n', (10538, 10548), True, 'import pylab as pl\n'), ((10557, 10565), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (10563, 10565), True, 'import pylab as pl\n'), ((10574, 10585), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (10583, 10585), True, 'import pylab as pl\n'), ((11714, 11725), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (11723, 11725), True, 'import pylab as pl\n'), ((11775, 11865), 'pylab.subplots_adjust', 'pl.subplots_adjust', ([], {'left': '(0.1)', 'right': '(0.95)', 'top': '(0.95)', 'bottom': '(0.1)', 'hspace': '(0.2)', 'wspace': '(0.3)'}), '(left=0.1, right=0.95, top=0.95, bottom=0.1, hspace=0.2,\n wspace=0.3)\n', (11793, 11865), True, 'import pylab as pl\n'), ((11952, 11971), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (11962, 11971), True, 'import pylab as pl\n'), ((12073, 12086), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (12084, 12086), True, 'import pylab as pl\n'), ((12112, 12162), 'pylab.title', 'pl.title', (['"""Time constants (ms)"""'], {'fontsize': 'title_fs'}), "('Time constants (ms)', fontsize=title_fs)\n", (12120, 12162), True, 'import pylab as pl\n'), ((12198, 12217), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (12208, 12217), True, 'import pylab as pl\n'), ((12323, 12336), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (12334, 12336), True, 'import pylab as pl\n'), ((12362, 12418), 'pylab.title', 'pl.title', (['"""Time constant errors (ms)"""'], {'fontsize': 'title_fs'}), "('Time constant errors (ms)', fontsize=title_fs)\n", (12370, 12418), True, 'import pylab as pl\n'), ((12855, 12894), 'pylab.savefig', 'pl.savefig', (["(filename + 'time_const.png')"], {}), "(filename + 'time_const.png')\n", (12865, 12894), True, 'import pylab as pl\n'), ((12901, 12909), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (12907, 12909), True, 'import pylab as pl\n'), ((13370, 13389), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (13380, 13389), True, 'import pylab as pl\n'), ((13470, 13483), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (13481, 13483), True, 'import pylab as pl\n'), ((13518, 13554), 'pylab.title', 'pl.title', (['"""N_obs"""'], {'fontsize': 'title_fs'}), "('N_obs', fontsize=title_fs)\n", (13526, 13554), True, 'import pylab as pl\n'), ((13564, 13602), 'pylab.savefig', 'pl.savefig', (["(filename + 'positions.png')"], {}), "(filename + 'positions.png')\n", (13574, 13602), True, 'import pylab as pl\n'), ((13609, 13617), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (13615, 13617), True, 'import pylab as pl\n'), ((13669, 13680), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (13678, 13680), True, 'import pylab as pl\n'), ((15252, 15309), 'moby2.util.StructDB.from_column_file', 'moby2.util.StructDB.from_column_file', (['filename', 'load_args'], {}), '(filename, load_args)\n', (15288, 15309), False, 'import moby2\n'), ((22738, 22749), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (22747, 22749), True, 'import pylab as pl\n'), ((22794, 22884), 'pylab.subplots_adjust', 'pl.subplots_adjust', ([], {'left': '(0.1)', 'right': '(0.98)', 'top': '(0.85)', 'bottom': '(0.1)', 'hspace': '(0.2)', 'wspace': '(0.3)'}), '(left=0.1, right=0.98, top=0.85, bottom=0.1, hspace=0.2,\n wspace=0.3)\n', (22812, 22884), True, 'import pylab as pl\n'), ((22910, 22925), 'pylab.subplot', 'pl.subplot', (['(121)'], {}), '(121)\n', (22920, 22925), True, 'import pylab as pl\n'), ((22999, 23049), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'marker': '"""o"""', 's': '(4)', 'alpha': '(0.5)'}), "(x[s], y[s], marker='o', s=4, alpha=0.5)\n", (23009, 23049), True, 'import pylab as pl\n'), ((23057, 23071), 'pylab.xlabel', 'pl.xlabel', (['"""X"""'], {}), "('X')\n", (23066, 23071), True, 'import pylab as pl\n'), ((23080, 23094), 'pylab.ylabel', 'pl.ylabel', (['"""Y"""'], {}), "('Y')\n", (23089, 23094), True, 'import pylab as pl\n'), ((23103, 23129), 'pylab.title', 'pl.title', (['"""Input template"""'], {}), "('Input template')\n", (23111, 23129), True, 'import pylab as pl\n'), ((23189, 23204), 'pylab.subplot', 'pl.subplot', (['(122)'], {}), '(122)\n', (23199, 23204), True, 'import pylab as pl\n'), ((23278, 23311), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'alpha': '(0.2)'}), '(x[s], y[s], alpha=0.2)\n', (23288, 23311), True, 'import pylab as pl\n'), ((23401, 23435), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'marker': '"""x"""'}), "(x[s], y[s], marker='x')\n", (23411, 23435), True, 'import pylab as pl\n'), ((23720, 23740), 'pylab.xlabel', 'pl.xlabel', (['"""X (deg)"""'], {}), "('X (deg)')\n", (23729, 23740), True, 'import pylab as pl\n'), ((23749, 23769), 'pylab.ylabel', 'pl.ylabel', (['"""Y (deg)"""'], {}), "('Y (deg)')\n", (23758, 23769), True, 'import pylab as pl\n'), ((23778, 23803), 'pylab.title', 'pl.title', (['"""Fitted result"""'], {}), "('Fitted result')\n", (23786, 23803), True, 'import pylab as pl\n'), ((23927, 23962), 'pylab.savefig', 'pl.savefig', (["(plot_prefix + 'fit.png')"], {}), "(plot_prefix + 'fit.png')\n", (23937, 23962), True, 'import pylab as pl\n'), ((23971, 23982), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (23980, 23982), True, 'import pylab as pl\n'), ((24227, 24238), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (24236, 24238), True, 'import pylab as pl\n'), ((24288, 24378), 'pylab.subplots_adjust', 'pl.subplots_adjust', ([], {'left': '(0.15)', 'right': '(0.95)', 'top': '(0.9)', 'bottom': '(0.1)', 'hspace': '(0.2)', 'wspace': '(0.3)'}), '(left=0.15, right=0.95, top=0.9, bottom=0.1, hspace=0.2,\n wspace=0.3)\n', (24306, 24378), True, 'import pylab as pl\n'), ((24470, 24504), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'marker': '"""x"""'}), "(x[s], y[s], marker='x')\n", (24480, 24504), True, 'import pylab as pl\n'), ((24513, 24554), 'pylab.savefig', 'pl.savefig', (["(plot_prefix + '0template.png')"], {}), "(plot_prefix + '0template.png')\n", (24523, 24554), True, 'import pylab as pl\n'), ((24563, 24571), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (24569, 24571), True, 'import pylab as pl\n'), ((24646, 24679), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'alpha': '(0.2)'}), '(x[s], y[s], alpha=0.2)\n', (24656, 24679), True, 'import pylab as pl\n'), ((24687, 24707), 'pylab.xlabel', 'pl.xlabel', (['"""X (deg)"""'], {}), "('X (deg)')\n", (24696, 24707), True, 'import pylab as pl\n'), ((24716, 24736), 'pylab.ylabel', 'pl.ylabel', (['"""Y (deg)"""'], {}), "('Y (deg)')\n", (24725, 24736), True, 'import pylab as pl\n'), ((24745, 24783), 'pylab.savefig', 'pl.savefig', (["(plot_prefix + '1model.png')"], {}), "(plot_prefix + '1model.png')\n", (24755, 24783), True, 'import pylab as pl\n'), ((24792, 24800), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (24798, 24800), True, 'import pylab as pl\n'), ((24905, 24938), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'alpha': '(0.2)'}), '(x[s], y[s], alpha=0.2)\n', (24915, 24938), True, 'import pylab as pl\n'), ((25028, 25062), 'pylab.scatter', 'pl.scatter', (['x[s]', 'y[s]'], {'marker': '"""x"""'}), "(x[s], y[s], marker='x')\n", (25038, 25062), True, 'import pylab as pl\n'), ((25347, 25367), 'pylab.xlabel', 'pl.xlabel', (['"""X (deg)"""'], {}), "('X (deg)')\n", (25356, 25367), True, 'import pylab as pl\n'), ((25376, 25396), 'pylab.ylabel', 'pl.ylabel', (['"""Y (deg)"""'], {}), "('Y (deg)')\n", (25385, 25396), True, 'import pylab as pl\n'), ((25463, 25499), 'pylab.savefig', 'pl.savefig', (["(plot_prefix + '2fit.png')"], {}), "(plot_prefix + '2fit.png')\n", (25473, 25499), True, 'import pylab as pl\n'), ((25508, 25519), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (25517, 25519), True, 'import pylab as pl\n'), ((1340, 1372), 'numpy.array', 'np.array', (['det_uid'], {'dtype': '"""int64"""'}), "(det_uid, dtype='int64')\n", (1348, 1372), True, 'import numpy as np\n'), ((6684, 6704), 'numpy.arange', 'np.arange', (['n_det_uid'], {}), '(n_det_uid)\n', (6693, 6704), True, 'import numpy as np\n'), ((8769, 8780), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (8778, 8780), True, 'import pylab as pl\n'), ((8852, 8873), 'pylab.figure', 'pl.figure', (['fig.number'], {}), '(fig.number)\n', (8861, 8873), True, 'import pylab as pl\n'), ((8932, 8967), 'pylab.title', 'pl.title', (["(title + ' - no good fits')"], {}), "(title + ' - no good fits')\n", (8940, 8967), True, 'import pylab as pl\n'), ((8980, 9000), 'pylab.savefig', 'pl.savefig', (['filename'], {}), '(filename)\n', (8990, 9000), True, 'import pylab as pl\n'), ((9013, 9021), 'pylab.clf', 'pl.clf', ([], {}), '()\n', (9019, 9021), True, 'import pylab as pl\n'), ((9209, 9221), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (9218, 9221), True, 'import numpy as np\n'), ((9223, 9235), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (9232, 9235), True, 'import numpy as np\n'), ((9293, 9305), 'numpy.median', 'np.median', (['r'], {}), '(r)\n', (9302, 9305), True, 'import numpy as np\n'), ((9454, 9468), 'numpy.any', 'np.any', (['inside'], {}), '(inside)\n', (9460, 9468), True, 'import numpy as np\n'), ((10365, 10402), 'pylab.MultipleLocator', 'pl.MultipleLocator', (['(10 ** log_spacing)'], {}), '(10 ** log_spacing)\n', (10383, 10402), True, 'import pylab as pl\n'), ((10810, 10874), 'pylab.scatter', 'pl.scatter', (['cols[bads]', 'rows[bads]'], {'marker': '"""x"""', 'edgecolor': '"""gray"""'}), "(cols[bads], rows[bads], marker='x', edgecolor='gray')\n", (10820, 10874), True, 'import pylab as pl\n'), ((11217, 11250), 'numpy.zeros', 'np.zeros', (['(n_rows, n_cols)', 'dtype'], {}), '((n_rows, n_cols), dtype)\n', (11225, 11250), True, 'import numpy as np\n'), ((11400, 11419), 'pylab.xlabel', 'pl.xlabel', (['"""Column"""'], {}), "('Column')\n", (11409, 11419), True, 'import pylab as pl\n'), ((11432, 11448), 'pylab.ylabel', 'pl.ylabel', (['"""Row"""'], {}), "('Row')\n", (11441, 11448), True, 'import pylab as pl\n'), ((11461, 11488), 'pylab.xlim', 'pl.xlim', (['(-0.5)', '(n_cols - 0.5)'], {}), '(-0.5, n_cols - 0.5)\n', (11468, 11488), True, 'import pylab as pl\n'), ((11499, 11526), 'pylab.ylim', 'pl.ylim', (['(-0.5)', '(n_rows - 0.5)'], {}), '(-0.5, n_rows - 0.5)\n', (11506, 11526), True, 'import pylab as pl\n'), ((12489, 12508), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (12499, 12508), True, 'import pylab as pl\n'), ((12519, 12563), 'pylab.hist', 'pl.hist', (['(self.tau[self.ok] * 1000.0)'], {'bins': '(20)'}), '(self.tau[self.ok] * 1000.0, bins=20)\n', (12526, 12563), True, 'import pylab as pl\n'), ((12598, 12629), 'pylab.xlabel', 'pl.xlabel', (['"""Time constant (ms)"""'], {}), "('Time constant (ms)')\n", (12607, 12629), True, 'import pylab as pl\n'), ((12642, 12661), 'pylab.ylabel', 'pl.ylabel', (['"""N_dets"""'], {}), "('N_dets')\n", (12651, 12661), True, 'import pylab as pl\n'), ((12674, 12693), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (12684, 12693), True, 'import pylab as pl\n'), ((12775, 12813), 'pylab.xlabel', 'pl.xlabel', (['"""Time constant errors (ms)"""'], {}), "('Time constant errors (ms)')\n", (12784, 12813), True, 'import pylab as pl\n'), ((12826, 12845), 'pylab.ylabel', 'pl.ylabel', (['"""N_dets"""'], {}), "('N_dets')\n", (12835, 12845), True, 'import pylab as pl\n'), ((12988, 13011), 'pylab.subplot', 'pl.subplot', (['(2)', '(2)', '(1 + i)'], {}), '(2, 2, 1 + i)\n', (12998, 13011), True, 'import pylab as pl\n'), ((13194, 13207), 'pylab.colorbar', 'pl.colorbar', ([], {}), '()\n', (13205, 13207), True, 'import pylab as pl\n'), ((13271, 13343), 'pylab.title', 'pl.title', (["('%s position RMS' % {(0): 'X', (1): 'Y'}[i])"], {'fontsize': 'title_fs'}), "('%s position RMS' % {(0): 'X', (1): 'Y'}[i], fontsize=title_fs)\n", (13279, 13343), True, 'import pylab as pl\n'), ((14570, 14624), 'moby2.scripting.products.get_tod_id', 'moby2.scripting.products.get_tod_id', ([], {'tod_info': 'tod_info'}), '(tod_info=tod_info)\n', (14605, 14624), False, 'import moby2\n'), ((14642, 14680), 'moby2.scripting.execcfg.InputChooser', 'moby2.scripting.execcfg.InputChooser', ([], {}), '()\n', (14678, 14680), False, 'import moby2\n'), ((14907, 14947), 'moby2.scripting.get_depot', 'moby2.scripting.get_depot', (["opts['depot']"], {}), "(opts['depot'])\n", (14932, 14947), False, 'import moby2\n'), ((15488, 15522), 'numpy.ones', 'np.ones', (["pos_data['x'].shape", 'bool'], {}), "(pos_data['x'].shape, bool)\n", (15495, 15522), True, 'import numpy as np\n'), ((16080, 16093), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (16086, 16093), True, 'import numpy as np\n'), ((16095, 16108), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (16101, 16108), True, 'import numpy as np\n'), ((16425, 16438), 'numpy.exp', 'np.exp', (['scale'], {}), '(scale)\n', (16431, 16438), True, 'import numpy as np\n'), ((16440, 16456), 'numpy.exp', 'np.exp', (['sh_scale'], {}), '(sh_scale)\n', (16446, 16456), True, 'import numpy as np\n'), ((17243, 17256), 'numpy.exp', 'np.exp', (['scale'], {}), '(scale)\n', (17249, 17256), True, 'import numpy as np\n'), ((17258, 17274), 'numpy.exp', 'np.exp', (['sh_scale'], {}), '(sh_scale)\n', (17264, 17274), True, 'import numpy as np\n'), ((19773, 19787), 'numpy.median', 'np.median', (['var'], {}), '(var)\n', (19782, 19787), True, 'import numpy as np\n'), ((23645, 23716), 'pylab.plot', 'pl.plot', (['[x1[i] * DEG, x[i]]', '[y1[i] * DEG, y[i]]'], {'color': '"""k"""', 'alpha': '(0.4)'}), "([x1[i] * DEG, x[i]], [y1[i] * DEG, y[i]], color='k', alpha=0.4)\n", (23652, 23716), True, 'import pylab as pl\n'), ((23863, 23917), 'pylab.figtext', 'pl.figtext', (['(0.5)', '(0.93)', 'title'], {'va': '"""bottom"""', 'ha': '"""center"""'}), "(0.5, 0.93, title, va='bottom', ha='center')\n", (23873, 23917), True, 'import pylab as pl\n'), ((25272, 25343), 'pylab.plot', 'pl.plot', (['[x1[i] * DEG, x[i]]', '[y1[i] * DEG, y[i]]'], {'color': '"""k"""', 'alpha': '(0.4)'}), "([x1[i] * DEG, x[i]], [y1[i] * DEG, y[i]], color='k', alpha=0.4)\n", (25279, 25343), True, 'import pylab as pl\n'), ((25439, 25454), 'pylab.title', 'pl.title', (['title'], {}), '(title)\n', (25447, 25454), True, 'import pylab as pl\n'), ((26281, 26345), 'moby2.util.StructDB.from_data', 'moby2.util.StructDB.from_data', (['col_defs'], {'formats': 'fits[0].formats'}), '(col_defs, formats=fits[0].formats)\n', (26310, 26345), False, 'import moby2\n'), ((7539, 7551), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (7548, 7551), True, 'import numpy as np\n'), ((7553, 7565), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (7562, 7565), True, 'import numpy as np\n'), ((9781, 9796), 'pylab.xlim', 'pl.xlim', (['*xlims'], {}), '(*xlims)\n', (9788, 9796), True, 'import pylab as pl\n'), ((9798, 9813), 'pylab.ylim', 'pl.ylim', (['*ylims'], {}), '(*ylims)\n', (9805, 9813), True, 'import pylab as pl\n'), ((10461, 10469), 'pylab.gca', 'pl.gca', ([], {}), '()\n', (10467, 10469), True, 'import pylab as pl\n'), ((10501, 10509), 'pylab.gca', 'pl.gca', ([], {}), '()\n', (10507, 10509), True, 'import pylab as pl\n'), ((11734, 11742), 'pylab.gcf', 'pl.gcf', ([], {}), '()\n', (11740, 11742), True, 'import pylab as pl\n'), ((22561, 22578), 'pylab.MaxNLocator', 'pl.MaxNLocator', (['(4)'], {}), '(4)\n', (22575, 22578), True, 'import pylab as pl\n'), ((22626, 22643), 'pylab.MaxNLocator', 'pl.MaxNLocator', (['(5)'], {}), '(5)\n', (22640, 22643), True, 'import pylab as pl\n'), ((24247, 24255), 'pylab.gcf', 'pl.gcf', ([], {}), '()\n', (24253, 24255), True, 'import pylab as pl\n'), ((8793, 8801), 'pylab.gcf', 'pl.gcf', ([], {}), '()\n', (8799, 8801), True, 'import pylab as pl\n'), ((20533, 20554), 'numpy.array', 'np.array', (['free_params'], {}), '(free_params)\n', (20541, 20554), True, 'import numpy as np\n'), ((20632, 20728), 'moby2.util.fitting.multi_fmin', 'moby2.util.fitting.multi_fmin', (['fit_chi2', 'params1'], {'free': 'free', 'disp': '(0)', 'xtol': '(1e-06)', 'ftol': '(1e-06)'}), '(fit_chi2, params1, free=free, disp=0, xtol=\n 1e-06, ftol=1e-06)\n', (20661, 20728), False, 'import moby2\n'), ((7211, 7223), 'numpy.isnan', 'np.isnan', (['_x'], {}), '(_x)\n', (7219, 7223), True, 'import numpy as np\n'), ((7227, 7239), 'numpy.isinf', 'np.isinf', (['_x'], {}), '(_x)\n', (7235, 7239), True, 'import numpy as np\n'), ((10298, 10309), 'numpy.log10', 'np.log10', (['b'], {}), '(b)\n', (10306, 10309), True, 'import numpy as np\n'), ((10265, 10280), 'numpy.log10', 'np.log10', (['delta'], {}), '(delta)\n', (10273, 10280), True, 'import numpy as np\n'), ((10283, 10294), 'numpy.log10', 'np.log10', (['b'], {}), '(b)\n', (10291, 10294), True, 'import numpy as np\n')]
|
from numpy.random import seed
seed(5393)
from tensorflow import set_random_seed
set_random_seed(12011)
import os
import numpy as np
import pandas as pd
from scipy import sparse
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split, StratifiedKFold
from joblib import Parallel, delayed
from tqdm import tqdm
import logging
logging.basicConfig(level = logging.INFO)
EMBED_DIM = 300
VOCAB_SIZE = 5000
max_len = 1000
batch_size = 16
n_folds = 5
fold_dir = "/data/victor/violence-workshop/batches/reversefolds"
data_pkl = "../../data/dataframe_with_scores_withdoc2vec.pkl"
def pad_csr(a, newshape):
""" Pads csr_matrix with zeros. Modifies a inplace. """
n, m = a.shape
a._shape = newshape
a.indptr = np.pad(a.indptr, (0, newshape[0] - n), 'edge')
def filter_nans(seq):
""" Filters out floats (np.nan) from list """
return np.array([x for x in seq if not isinstance(x, float)])
def pad_or_trim(seq, max_len=1000):
""" Pads or trims seq to have max_len rows """
n, m = seq.shape
if n > max_len:
seq = seq[-max_len:, :]
elif n < max_len:
if sparse.issparse(seq):
pad_csr(seq, (max_len, m))
else:
seq = np.r_[seq, np.zeros((max_len - n, m))]
return seq
def process_ngrams(batch_features, ngram_features):
""" Transform batch_features into tensor of dims:
(n, max_len, #features) where n is len(batch_features)"""
n = batch_features.shape[0]
batch_features = batch_features.apply(ngram_features.transform)\
.apply(pad_or_trim)
batch_features = sparse.vstack(batch_features)
batch_features = batch_features.toarray()\
.reshape(n, max_len, -1)
return batch_features
def process_scores(X):
""" Transforms X into tensor of dims:
(n, max_len, #features) where n is len(X).
This is a special case of process for lists of scores"""
batch_scores = X.apply(np.array)\
.apply(lambda x: x.reshape(-1, 1))\
.apply(pad_or_trim)
batch_scores = np.concatenate(batch_scores.values, axis = 0)\
.reshape(-1, max_len, 1)
return batch_scores
############################################################
# Load Data
############################################################
data = pd.read_pickle(data_pkl)
# Encode genre
lb_genre = LabelEncoder()
data['genre'] = lb_genre.fit_transform(data['genre'])
############################################################
# 3 to 5 chars w/ spaces
# unigrams + bigrams
############################################################
# This defines the analyzer to be used with Countvectorizer
def char_ngram_tokenizer(text, ngram_range):
def aux(text, ngram_size):
for i in range(len(text) - ngram_size):
yield text[i : i + ngram_size]
for n in range(*ngram_range):
for ngram in aux(text, n):
yield ngram
ngram_features = FeatureUnion([
("char_ngrams", CountVectorizer(analyzer = lambda text: char_ngram_tokenizer(text, ngram_range=(3, 6)),
max_features = VOCAB_SIZE)),
("token_ngrams", CountVectorizer(ngram_range=(1, 2),
max_features=VOCAB_SIZE))
])
tfidf_ = TfidfVectorizer(ngram_range=(1, 2), max_features=VOCAB_SIZE)
############################################################
# Batch generation
############################################################
def process(X, Y, i, ngram_features, batch_dir, tfidf_transformer = None):
# Features
## ngrams
#logging.info("ngrams")
#batch_ngrams = process_ngrams(X['sentences'].iloc[i : i + batch_size], ngram_features)
#np.savez(os.path.join(batch_dir, "{}_ngrams".format(i)),
# features = batch_ngrams)
#batch_ngrams = None
## tfidf
#logging.info("tfidf")
#batch_tfidf = process_ngrams(X['sentences'].iloc[i : i + batch_size], tfidf_transformer)
#np.savez(os.path.join(batch_dir, "{}_tfidf".format(i)),
# features = batch_tfidf)
#batch_tfidf = None
# ## Word2vec
#logging.info("word2vec")
#batch_word2vec = X['word2vec_sent_mean_vec'].iloc[i : i + batch_size]\
# .apply(filter_nans)\
# .apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_word2vec".format(i)),
# features = batch_word2vec)
#batch_word2vec = None
# paragraph2vec
logging.info("paragraph2vec")
batch_paragraph2vec = X['doc2vec_vectors'].iloc[i : i + batch_size]\
.apply(filter_nans)\
.apply(pad_or_trim)
np.savez(os.path.join(batch_dir, "{}_doc2vec".format(i)),
features = batch_paragraph2vec)
batch_paragraph2vec = None
# ## Lexicons
#logging.info("Empath")
#batch_empath = X['empath_sentence'].iloc[i : i + batch_size]\
# .apply(np.array)\
# .apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_empath".format(i)),
# empath = batch_empath)
#logging.info("Lexicons")
#batch_lexicon = process_scores(X['abusive_scores'].iloc[i : i + batch_size])
#batch_vader = process_scores(X['vader_scores'].iloc[i : i + batch_size])
#batch_afinn = process_scores(X['afinn_scores'].iloc[i : i + batch_size])
#batch_hatebase = X['hatebase_sentence'].iloc[i : i + batch_size].apply(pad_or_trim)
#np.savez(os.path.join(batch_dir, "{}_lexicon".format(i)),
# abusive_scores = batch_lexicon,
# vader = batch_vader,
# afinn = batch_afinn,
# hatebase = batch_hatebase)
# batch_lexicon = None
#batch_vader = None
#batch_afinn = None
#batch_hatebase = None
## Save labels
#logging.info("Labels")
#batch_labels = Y[i : i + batch_size]
#np.savez(os.path.join(batch_dir, "{}_labels".format(i)),
# labels = batch_labels)
## Save metadata
#logging.info("Metadata")
#batch_genre = X['genre'][i : i + batch_size]
#np.savez(os.path.join(batch_dir, "{}_meta".format(i)),
# genre = batch_genre)
logging.info("Done for {}".format(i))
skf = StratifiedKFold(n_splits = n_folds, random_state = 42)
lb = LabelBinarizer()
Y = lb.fit_transform(data['violence_rating'])
for k, (train, test) in enumerate(skf.split(data.violence_rating, data.violence_rating)):
train_dir = os.path.join(fold_dir, str(k), "train")
test_dir = os.path.join(fold_dir, str(k), "test")
eval_dir = os.path.join(fold_dir, str(k), "eval")
for t in [train_dir, test_dir, eval_dir]:
os.makedirs(t, exist_ok = True)
X_train, X_test = data.iloc[train], data.iloc[test]
Y_train, Y_test = Y[train], Y[test]
X_train, X_eval, Y_train, Y_eval = train_test_split(X_train, Y_train, test_size = 64, random_state = 666)
# Fit vocab
ngram_features.fit(data.iloc[train]['text'], Y_train)
tfidf_.fit(data.iloc[train]['text'], Y_train)
# Create batches
for i in tqdm(range(0, X_train.shape[0], batch_size)):
process(X_train, Y_train, i, ngram_features = ngram_features, batch_dir = train_dir, tfidf_transformer = tfidf_)
for i in tqdm(range(0, X_eval.shape[0], batch_size)):
process(X_eval, Y_eval, i, ngram_features = ngram_features, batch_dir = eval_dir, tfidf_transformer = tfidf_)
for i in tqdm(range(0, X_test.shape[0], batch_size)):
process(X_test, Y_test, i, ngram_features = ngram_features, batch_dir = test_dir, tfidf_transformer = tfidf_)
|
[
"logging.basicConfig",
"pandas.read_pickle",
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.LabelBinarizer",
"scipy.sparse.vstack",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.text.CountVectorizer",
"scipy.sparse.issparse",
"sklearn.model_selection.StratifiedKFold",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.zeros",
"numpy.random.seed",
"numpy.concatenate",
"numpy.pad",
"tensorflow.set_random_seed",
"logging.info"
] |
[((30, 40), 'numpy.random.seed', 'seed', (['(5393)'], {}), '(5393)\n', (34, 40), False, 'from numpy.random import seed\n'), ((80, 102), 'tensorflow.set_random_seed', 'set_random_seed', (['(12011)'], {}), '(12011)\n', (95, 102), False, 'from tensorflow import set_random_seed\n'), ((509, 548), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (528, 548), False, 'import logging\n'), ((2543, 2567), 'pandas.read_pickle', 'pd.read_pickle', (['data_pkl'], {}), '(data_pkl)\n', (2557, 2567), True, 'import pandas as pd\n'), ((2595, 2609), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2607, 2609), False, 'from sklearn.preprocessing import LabelEncoder, LabelBinarizer\n'), ((3525, 3585), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)', 'max_features': 'VOCAB_SIZE'}), '(ngram_range=(1, 2), max_features=VOCAB_SIZE)\n', (3540, 3585), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((6470, 6520), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_folds', 'random_state': '(42)'}), '(n_splits=n_folds, random_state=42)\n', (6485, 6520), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((6530, 6546), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (6544, 6546), False, 'from sklearn.preprocessing import LabelEncoder, LabelBinarizer\n'), ((901, 947), 'numpy.pad', 'np.pad', (['a.indptr', '(0, newshape[0] - n)', '"""edge"""'], {}), "(a.indptr, (0, newshape[0] - n), 'edge')\n", (907, 947), True, 'import numpy as np\n'), ((1781, 1810), 'scipy.sparse.vstack', 'sparse.vstack', (['batch_features'], {}), '(batch_features)\n', (1794, 1810), False, 'from scipy import sparse\n'), ((4746, 4775), 'logging.info', 'logging.info', (['"""paragraph2vec"""'], {}), "('paragraph2vec')\n", (4758, 4775), False, 'import logging\n'), ((7076, 7142), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'Y_train'], {'test_size': '(64)', 'random_state': '(666)'}), '(X_train, Y_train, test_size=64, random_state=666)\n', (7092, 7142), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((6908, 6937), 'os.makedirs', 'os.makedirs', (['t'], {'exist_ok': '(True)'}), '(t, exist_ok=True)\n', (6919, 6937), False, 'import os\n'), ((1285, 1305), 'scipy.sparse.issparse', 'sparse.issparse', (['seq'], {}), '(seq)\n', (1300, 1305), False, 'from scipy import sparse\n'), ((2282, 2325), 'numpy.concatenate', 'np.concatenate', (['batch_scores.values'], {'axis': '(0)'}), '(batch_scores.values, axis=0)\n', (2296, 2325), True, 'import numpy as np\n'), ((3409, 3469), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 2)', 'max_features': 'VOCAB_SIZE'}), '(ngram_range=(1, 2), max_features=VOCAB_SIZE)\n', (3424, 3469), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((1389, 1415), 'numpy.zeros', 'np.zeros', (['(max_len - n, m)'], {}), '((max_len - n, m))\n', (1397, 1415), True, 'import numpy as np\n')]
|
""" 参考自https://github.com/bojone/crf/ """
import tensorflow as tf
k = tf.keras
kl = tf.keras.layers
K = tf.keras.backend
from sklearn.model_selection import train_test_split
import numpy as np
import re
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
class CRF(kl.Layer):
"""
CRF层本质上是一个带训练参数的loss计算层,因此CRF层只用来训练模型,
而预测则需要另外建立模型。
"""
def __init__(self, ignore_last_label=False, lr_mult=1., **kwargs):
"""ignore_last_label:定义要不要忽略最后一个标签,起到mask的效果
"""
super().__init__(**kwargs)
self.ignore_last_label = 1 if ignore_last_label else 0
self.lr_mult = lr_mult
def build(self, input_shape):
self.num_labels = input_shape[-1] - self.ignore_last_label
self._trans: tf.Variable = self.add_weight(name='crf_trans',
shape=(self.num_labels, self.num_labels),
initializer='glorot_uniform',
trainable=True)
self._trans.assign(self._trans / self.lr_mult)
self.trans = lambda: self._trans * self.lr_mult
def get_weights(self):
weights = super().get_weights()
return [w * self.lr_mult for w in weights]
def log_norm_step(self, inputs, states):
"""递归计算归一化因子
要点:1、递归计算;2、用logsumexp避免溢出。
技巧:通过expand_dims来对齐张量。
"""
inputs, mask = inputs[:, :-1], inputs[:, -1:]
states = K.expand_dims(states[0], 2) # (batch_size, output_dim, 1)
trans = K.expand_dims(self.trans(), 0) # (1, output_dim, output_dim)
outputs = tf.math.reduce_logsumexp(states + trans, 1) # (batch_size, output_dim)
outputs = outputs + inputs
outputs = mask * outputs + (1 - mask) * states[:, :, 0]
return outputs, [outputs]
def path_score(self, inputs, labels):
"""计算目标路径的相对概率(还没有归一化)
要点:逐标签得分,加上转移概率得分。
技巧:用“预测”点乘“目标”的方法抽取出目标路径的得分。
"""
point_score = K.sum(K.sum(inputs * labels, 2), 1, keepdims=True) # 逐标签得分
labels1 = K.expand_dims(labels[:, :-1], 3)
labels2 = K.expand_dims(labels[:, 1:], 2)
labels = labels1 * labels2 # 两个错位labels,负责从转移矩阵中抽取目标转移得分
trans = K.expand_dims(K.expand_dims(self.trans(), 0), 0)
trans_score = K.sum(K.sum(trans * labels, [2, 3]), 1, keepdims=True)
return point_score + trans_score # 两部分得分之和
def call(self, inputs): # CRF本身不改变输出,它只是一个loss
return inputs
def loss(self, y_true, y_pred): # 目标y_pred需要是one hot形式
if self.ignore_last_label:
mask = 1 - y_true[:, :, -1:]
else:
mask = K.ones_like(y_pred[:, :, :1])
y_true, y_pred = y_true[:, :, :self.num_labels], y_pred[:, :, :self.num_labels]
path_score = self.path_score(y_pred, y_true) # 计算分子(对数)
init_states = [y_pred[:, 0]] # 初始状态
y_pred = K.concatenate([y_pred, mask])
log_norm, _, _ = K.rnn(self.log_norm_step, y_pred[:, 1:], init_states) # 计算Z向量(对数)
log_norm = tf.math.reduce_logsumexp(log_norm, 1, keepdims=True) # 计算Z(对数)
return log_norm - path_score # 即log(分子/分母)
def accuracy(self, y_true, y_pred): # 训练过程中显示逐帧准确率的函数,排除了mask的影响
mask = 1 - y_true[:, :, -1] if self.ignore_last_label else None
y_true, y_pred = y_true[:, :, :self.num_labels], y_pred[:, :, :self.num_labels]
isequal = K.equal(K.argmax(y_true, 2), K.argmax(y_pred, 2))
isequal = K.cast(isequal, 'float32')
if mask == None:
return K.mean(isequal)
else:
return K.sum(isequal * mask) / K.sum(mask)
def max_in_dict(d): # 定义一个求字典中最大值的函数
dict_items = list(d.items())
key, value = dict_items[0]
for i, j in dict_items[1:]:
if j > value:
key, value = i, j
return key, value
def viterbi(nodes, trans): # viterbi算法,跟前面的HMM一致
paths = nodes[0] # 初始化起始路径
for l in range(1, len(nodes)): # 遍历后面的节点
paths_old, paths = paths, {}
for n, ns in nodes[l].items(): # 当前时刻的所有节点
max_path, max_score = '', -1e10
for p, ps in paths_old.items(): # 截止至前一时刻的最优路径集合
score = ns + ps + trans[p[-1] + n] # 计算新分数
if score > max_score: # 如果新分数大于已有的最大分
max_path, max_score = p + n, score # 更新路径
paths[max_path] = max_score # 储存到当前时刻所有节点的最优路径
return max_in_dict(paths)
def cut(s, trans, char2id): # 分词函数,也跟前面的HMM基本一致
if not s: # 空字符直接返回
return []
# 字序列转化为id序列。注意,经过我们前面对语料的预处理,字符集是没有空格的,
# 所以这里简单将空格的id跟句号的id等同起来
sent_ids = np.array([[char2id.get(c, 0) if c != ' ' else char2id[u'。']
for c in s]])
probas = model.predict(sent_ids)[0] # [n,5]
nodes = [dict(zip('sbme', i)) for i in probas[:, :4]] # 只取前4个,因为最后一个是mask
nodes[0] = {i: j for i, j in nodes[0].items() if i in 'bs'} # 首字标签只能是b或s
nodes[-1] = {i: j for i, j in nodes[-1].items() if i in 'es'} # 末字标签只能是e或s
tags = viterbi(nodes, trans)[0]
result = [s[0]]
for i, j in zip(s[1:], tags[1:]):
if j in 'bs': # 词的开始
result.append(i)
else: # 接着原来的词
result[-1] += i
return result
class Evaluate(k.callbacks.Callback):
def __init__(self, tag2id, char2id):
self.highest = 0.
self.tag2id = tag2id
self.char2id = char2id
self.history = []
def on_train_batch_end(self, batch, logs=None):
A = self.model.get_layer('crf').get_weights()[0][:4, :4] # 从训练模型中取出最新得到的转移矩阵
self.history.append(A)
# def on_epoch_end(self, epoch, logs=None):
# A = self.model.get_weights()[-1][:4, :4] # 从训练模型中取出最新得到的转移矩阵
# trans = {}
# for i in 'sbme':
# for j in 'sbme':
# trans[i + j] = A[self.tag2id[i], self.tag2id[j]]
# right = 0.
# total = 0.
# for s in tqdm(iter(valid_sents), desc=u'验证模型中'):
# result = cut(''.join(s), trans, self.char2id)
# total += len(set(s))
# right += len(set(s) & set(result)) # 直接将词集的交集作为正确数。该指标比较简单,
# # 也许会导致估计偏高。读者可以考虑自定义指标
# acc = right / total
# if acc > self.highest:
# self.highest = acc
# print('val acc: %s, highest: %s' % (acc, self.highest))
def show_anime(self, save_path='gif/crf.gif'):
fig, ax = plt.subplots()
fig.set_tight_layout(True)
ax: plt.Axes
A = self.history[0]
c = ax.pcolor(A, cmap='RdBu_r', vmin=A.min(), vmax=A.max(),
edgecolors='w', linewidths=30)
ax.set_xticks(np.arange(4) + 0.5)
ax.set_yticks(np.arange(4) + 0.5)
ax.set_xticklabels(list('sbme'))
ax.set_yticklabels(list('sbme'))
for i in range(4):
for j in range(4):
text = ax.text(j + 0.5, i + 0.5,
f'{A[i, j]:^4.2f}',
ha="center", va="center", color="w")
def update(t):
ax.cla()
ax.set_title(f'iter {t}')
ax.set_xticks(np.arange(4) + 0.5)
ax.set_yticks(np.arange(4) + 0.5)
ax.set_xticklabels(list('sbme'))
ax.set_yticklabels(list('sbme'))
A = self.history[t]
c = ax.pcolor(A, cmap='RdBu_r', vmin=A.min(), vmax=A.max(),
edgecolors='w', linewidths=30)
for i in range(4):
for j in range(4):
text = ax.text(j + 0.5, i + 0.5,
f'{A[i, j]:^4.2f}',
ha="center", va="center", color="w")
anim = FuncAnimation(fig, update, frames=len(self.history), interval=100)
anim.save(save_path, writer='imagemagick', fps=5)
plt.show()
if __name__ == "__main__":
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
sents = []
with open('CRF/msr_training.utf8', 'r') as f:
for line in f.readlines():
sents.append(line.strip())
sents = [re.split(' +', s) for s in sents] # 词之间以空格隔开
sents = [[w for w in s if w] for s in sents] # 去掉空字符串
np.random.shuffle(sents) # 打乱语料,以便后面划分验证集
chars = {} # 统计字表
for s in sents:
for c in ''.join(s):
if c in chars:
chars[c] += 1
else:
chars[c] = 1
# 过滤低频字
min_count = 2
chars = {i: j for i, j in chars.items() if j >= min_count}
id2char = {i + 1: j for i, j in enumerate(chars)} # id到字的映射
char2id = {j: i for i, j in id2char.items()} # 字到id的映射
id2tag = {0: 's', 1: 'b', 2: 'm', 3: 'e'} # 标签(sbme)与id之间的映射
tag2id = {j: i for i, j in id2tag.items()}
train_sents, valid_sents = train_test_split(sents, test_size=0.05)
batch_size = 128
def train_generator():
while True:
X, Y = [], []
for i, s in enumerate(train_sents): # 遍历每个句子
sx, sy = [], []
for w in s: # 遍历句子中的每个词
sx.extend([char2id.get(c, 0) for c in w]) # 遍历词中的每个字
if len(w) == 1:
sy.append(0) # 单字词的标签
elif len(w) == 2:
sy.extend([1, 3]) # 双字词的标签
else:
sy.extend([1] + [2] * (len(w) - 2) + [3]) # 多于两字的词的标签
X.append(sx)
Y.append(sy)
if len(X) == batch_size or i == len(train_sents) - 1: # 如果达到一个batch
maxlen = max([len(x) for x in X]) # 找出最大字数
X = [x + [0] * (maxlen - len(x)) for x in X] # 不足则补零
Y = [y + [4] * (maxlen - len(y)) for y in Y] # 不足则补第五个标签
yield np.array(X), tf.keras.utils.to_categorical(Y, 5)
X, Y = [], []
embedding_size = 128
sequence = kl.Input(shape=(None,), dtype='int32') # 建立输入层,输入长度设为None
embedding = kl.Embedding(len(chars) + 1, embedding_size)(sequence) # 去掉了mask_zero=True
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(embedding)
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(cnn)
cnn = kl.Conv1D(128, 3, activation='relu', padding='same')(cnn) # 层叠了3层CNN
crf = CRF(True, lr_mult=100.) # 定义crf层,参数为True,自动mask掉最后一个标签,同时增大crf学习率100倍
tag_score = kl.Dense(5)(cnn) # 变成了5分类,第五个标签用来mask掉
tag_score = crf(tag_score) # 包装一下原来的tag_score
model = k.Model(inputs=sequence, outputs=tag_score)
model.summary()
model.compile(loss=crf.loss, # 用crf自带的loss
optimizer=k.optimizers.Adam(0.001),
metrics=[crf.accuracy] # 用crf自带的accuracy
)
evaluator = Evaluate(tag2id, char2id)
model.fit_generator(train_generator(),
steps_per_epoch=100,
epochs=1,
callbacks=[evaluator]) # 训练并将evaluator加入到训练过程
A = model.get_layer('crf').get_weights()[0][:4, :4] # :4是为了去除mask的转义概率
trans = {}
for i in 'sbme':
for j in 'sbme':
trans[i + j] = A[tag2id[i], tag2id[j]]
right = 0.
total = 0.
for s in range(5):
s = valid_sents[s]
result = cut(''.join(s), trans, char2id)
print(''.join(s), '\n', result)
evaluator.show_anime()
|
[
"re.split",
"tensorflow.keras.utils.to_categorical",
"matplotlib.pyplot.show",
"tensorflow.config.experimental.set_memory_growth",
"sklearn.model_selection.train_test_split",
"numpy.array",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.math.reduce_logsumexp",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.random.shuffle"
] |
[((7282, 7333), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (7326, 7333), True, 'import tensorflow as tf\n'), ((7416, 7483), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (7456, 7483), True, 'import tensorflow as tf\n'), ((7726, 7750), 'numpy.random.shuffle', 'np.random.shuffle', (['sents'], {}), '(sents)\n', (7743, 7750), True, 'import numpy as np\n'), ((8258, 8297), 'sklearn.model_selection.train_test_split', 'train_test_split', (['sents'], {'test_size': '(0.05)'}), '(sents, test_size=0.05)\n', (8274, 8297), False, 'from sklearn.model_selection import train_test_split\n'), ((1581, 1624), 'tensorflow.math.reduce_logsumexp', 'tf.math.reduce_logsumexp', (['(states + trans)', '(1)'], {}), '(states + trans, 1)\n', (1605, 1624), True, 'import tensorflow as tf\n'), ((2900, 2952), 'tensorflow.math.reduce_logsumexp', 'tf.math.reduce_logsumexp', (['log_norm', '(1)'], {'keepdims': '(True)'}), '(log_norm, 1, keepdims=True)\n', (2924, 2952), True, 'import tensorflow as tf\n'), ((5971, 5985), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5983, 5985), True, 'import matplotlib.pyplot as plt\n'), ((7221, 7231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7229, 7231), True, 'import matplotlib.pyplot as plt\n'), ((7621, 7638), 're.split', 're.split', (['""" +"""', 's'], {}), "(' +', s)\n", (7629, 7638), False, 'import re\n'), ((6189, 6201), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6198, 6201), True, 'import numpy as np\n'), ((6227, 6239), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6236, 6239), True, 'import numpy as np\n'), ((6600, 6612), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6609, 6612), True, 'import numpy as np\n'), ((6640, 6652), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6649, 6652), True, 'import numpy as np\n'), ((9086, 9097), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (9094, 9097), True, 'import numpy as np\n'), ((9099, 9134), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['Y', '(5)'], {}), '(Y, 5)\n', (9128, 9134), True, 'import tensorflow as tf\n')]
|
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*block{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
if 'unit01' in old_key:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
print("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
# Calculate SD conv weight
w = state_dict[old_key]
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
state_dict[new_key] = w
del state_dict[old_key]
return state_dict
def load_big_format(cfg, f):
model = OrderedDict()
weights = np.load(f)
cmap = {'a':1, 'b':2, 'c':3}
for key, val in weights.items():
old_key = key.replace('resnet/', '')
if 'root_block' in old_key:
new_key = 'root.conv.weight'
elif '/proj/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/proj/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.downsample.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/gamma' in old_key:
key_pattern = old_key.replace('/group_norm/gamma', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/beta' in old_key:
key_pattern = old_key.replace('/group_norm/beta', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.bias'.format(bname,uname,cmap[cidx])
else:
print('Unknown key {}'.format(old_key))
continue
print('Map {} -> {}'.format(key, new_key))
model[new_key] = tf2th(val)
model = _rename_conv_weights_for_deformable_conv_layers(model, cfg)
return dict(model=model)
|
[
"collections.OrderedDict",
"re.match",
"torch.sqrt",
"torch.from_numpy",
"torch.var_mean",
"numpy.load"
] |
[((272, 302), 'torch.from_numpy', 'torch.from_numpy', (['conv_weights'], {}), '(conv_weights)\n', (288, 302), False, 'import torch\n'), ((1609, 1622), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1620, 1622), False, 'from collections import OrderedDict\n'), ((1638, 1648), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1645, 1648), True, 'import numpy as np\n'), ((678, 704), 're.match', 're.match', (['pattern', 'old_key'], {}), '(pattern, old_key)\n', (686, 704), False, 'import re\n'), ((1339, 1401), 'torch.var_mean', 'torch.var_mean', (['w'], {'dim': '[1, 2, 3]', 'keepdim': '(True)', 'unbiased': '(False)'}), '(w, dim=[1, 2, 3], keepdim=True, unbiased=False)\n', (1353, 1401), False, 'import torch\n'), ((1433, 1454), 'torch.sqrt', 'torch.sqrt', (['(v + 1e-10)'], {}), '(v + 1e-10)\n', (1443, 1454), False, 'import torch\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""WCS related utility functions."""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from astropy.wcs import WCS
from astropy.coordinates import Angle
__all__ = [
'linear_wcs_to_arrays',
'linear_arrays_to_wcs',
'get_wcs_ctype',
'get_resampled_wcs'
]
def get_wcs_ctype(wcs):
"""
Get celestial coordinate type of WCS instance.
Parameters
----------
wcs : `~astropy.wcs.WCS`
WCS transformation instance.
Returns
-------
ctype : {'galatic', 'icrs'}
String specifying the coordinate type, that can be used with
`~astropy.coordinates.SkyCoord`
"""
ctype = wcs.wcs.ctype
if 'GLON' in ctype[0] or 'GLON' in ctype[1]:
return 'galactic'
elif 'RA' in ctype[0] or 'RA' in ctype[1]:
return 'icrs'
else:
raise TypeError("Can't determine WCS coordinate type.")
def get_resampled_wcs(wcs, factor, downsampled):
"""
Get resampled WCS object.
"""
wcs = wcs.deepcopy()
if not downsampled:
factor = 1. / factor
wcs.wcs.cdelt *= factor
wcs.wcs.crpix = (wcs.wcs.crpix - 0.5) / factor + 0.5
return wcs
def linear_wcs_to_arrays(wcs, nbins_x, nbins_y):
"""Make a 2D linear binning from a WCS object.
This method gives the correct answer only for linear X, Y binning.
The method expects angular quantities in the WCS object.
X is identified with WCS axis 1, Y is identified with WCS axis 2.
The method needs the number of bins as input, since it is not in
the WCS object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
WCS object describing the bin coordinates
nbins_x : int
number of bins in X coordinate
nbins_y : int
number of bins in Y coordinate
Returns
-------
bin_edges_x : `~astropy.coordinates.Angle`
array with the bin edges for the X coordinate
bin_edges_y : `~astropy.coordinates.Angle`
array with the bin edges for the Y coordinate
"""
# check number of dimensions
if wcs.wcs.naxis != 2:
raise ValueError("Expected exactly 2 dimensions, got {}"
.format(wcs.wcs.naxis))
# check that wcs axes are linear
# TODO: is there an easy way to do this?
# set bins
unit_x, unit_y = wcs.wcs.cunit
delta_x, delta_y = wcs.wcs.cdelt
delta_x = Angle(delta_x, unit_x)
delta_y = Angle(delta_y, unit_y)
bin_edges_x = np.arange(nbins_x + 1) * delta_x
bin_edges_y = np.arange(nbins_y + 1) * delta_y
# translate bins to correct values according to WCS reference
# In FITS, the edge of the image is at pixel coordinate +0.5.
refpix_x, refpix_y = wcs.wcs.crpix
refval_x, refval_y = wcs.wcs.crval
refval_x = Angle(refval_x, unit_x)
refval_y = Angle(refval_y, unit_y)
bin_edges_x += refval_x - (refpix_x - 0.5) * delta_x
bin_edges_y += refval_y - (refpix_y - 0.5) * delta_y
# set small values (compared to delta (i.e. step)) to 0
for i in np.arange(len(bin_edges_x)):
if np.abs(bin_edges_x[i] / delta_x) < 1.e-10:
bin_edges_x[i] = Angle(0., unit_x)
for i in np.arange(len(bin_edges_y)):
if np.abs(bin_edges_y[i] / delta_y) < 1.e-10:
bin_edges_y[i] = Angle(0., unit_y)
return bin_edges_x, bin_edges_y
def linear_arrays_to_wcs(name_x, name_y, bin_edges_x, bin_edges_y):
"""Make a 2D linear WCS object from arrays of bin edges.
This method gives the correct answer only for linear X, Y binning.
X is identified with WCS axis 1, Y is identified with WCS axis 2.
Parameters
----------
name_x : str
name of X coordinate, to be used as 'CTYPE' value
name_y : str
name of Y coordinate, to be used as 'CTYPE' value
bin_edges_x : `~astropy.coordinates.Angle`
array with the bin edges for the X coordinate
bin_edges_y : `~astropy.coordinates.Angle`
array with the bin edges for the Y coordinate
Returns
-------
wcs : `~astropy.wcs.WCS`
WCS object describing the bin coordinates
"""
# check units
unit_x = bin_edges_x.unit
unit_y = bin_edges_y.unit
if unit_x != unit_y:
ss_error = "Units of X ({0}) and Y ({1}) bins do not match!".format(
unit_x, unit_y)
ss_error += " Is this expected?"
raise ValueError(ss_error)
# Create a new WCS object. The number of axes must be set from the start
wcs = WCS(naxis=2)
# Set up DET coordinates in degrees
nbins_x = len(bin_edges_x) - 1
nbins_y = len(bin_edges_y) - 1
range_x = Angle([bin_edges_x[0], bin_edges_x[-1]])
range_y = Angle([bin_edges_y[0], bin_edges_y[-1]])
delta_x = (range_x[1] - range_x[0]) / nbins_x
delta_y = (range_y[1] - range_y[0]) / nbins_y
wcs.wcs.ctype = [name_x, name_y]
wcs.wcs.cunit = [unit_x, unit_y]
wcs.wcs.cdelt = [delta_x.to(unit_x).value, delta_y.to(unit_y).value]
# ref as lower left corner (start of (X, Y) bin coordinates)
# coordinate start at pix = 0.5
wcs.wcs.crpix = [0.5, 0.5]
wcs.wcs.crval = [(bin_edges_x[0] + (wcs.wcs.crpix[0] - 0.5) * delta_x).to(unit_x).value,
(bin_edges_y[0] + (wcs.wcs.crpix[1] - 0.5) * delta_y).to(unit_y).value]
return wcs
|
[
"numpy.abs",
"astropy.wcs.WCS",
"numpy.arange",
"astropy.coordinates.Angle"
] |
[((2472, 2494), 'astropy.coordinates.Angle', 'Angle', (['delta_x', 'unit_x'], {}), '(delta_x, unit_x)\n', (2477, 2494), False, 'from astropy.coordinates import Angle\n'), ((2509, 2531), 'astropy.coordinates.Angle', 'Angle', (['delta_y', 'unit_y'], {}), '(delta_y, unit_y)\n', (2514, 2531), False, 'from astropy.coordinates import Angle\n'), ((2859, 2882), 'astropy.coordinates.Angle', 'Angle', (['refval_x', 'unit_x'], {}), '(refval_x, unit_x)\n', (2864, 2882), False, 'from astropy.coordinates import Angle\n'), ((2898, 2921), 'astropy.coordinates.Angle', 'Angle', (['refval_y', 'unit_y'], {}), '(refval_y, unit_y)\n', (2903, 2921), False, 'from astropy.coordinates import Angle\n'), ((4560, 4572), 'astropy.wcs.WCS', 'WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (4563, 4572), False, 'from astropy.wcs import WCS\n'), ((4698, 4738), 'astropy.coordinates.Angle', 'Angle', (['[bin_edges_x[0], bin_edges_x[-1]]'], {}), '([bin_edges_x[0], bin_edges_x[-1]])\n', (4703, 4738), False, 'from astropy.coordinates import Angle\n'), ((4753, 4793), 'astropy.coordinates.Angle', 'Angle', (['[bin_edges_y[0], bin_edges_y[-1]]'], {}), '([bin_edges_y[0], bin_edges_y[-1]])\n', (4758, 4793), False, 'from astropy.coordinates import Angle\n'), ((2550, 2572), 'numpy.arange', 'np.arange', (['(nbins_x + 1)'], {}), '(nbins_x + 1)\n', (2559, 2572), True, 'import numpy as np\n'), ((2601, 2623), 'numpy.arange', 'np.arange', (['(nbins_y + 1)'], {}), '(nbins_y + 1)\n', (2610, 2623), True, 'import numpy as np\n'), ((3150, 3182), 'numpy.abs', 'np.abs', (['(bin_edges_x[i] / delta_x)'], {}), '(bin_edges_x[i] / delta_x)\n', (3156, 3182), True, 'import numpy as np\n'), ((3222, 3240), 'astropy.coordinates.Angle', 'Angle', (['(0.0)', 'unit_x'], {}), '(0.0, unit_x)\n', (3227, 3240), False, 'from astropy.coordinates import Angle\n'), ((3293, 3325), 'numpy.abs', 'np.abs', (['(bin_edges_y[i] / delta_y)'], {}), '(bin_edges_y[i] / delta_y)\n', (3299, 3325), True, 'import numpy as np\n'), ((3365, 3383), 'astropy.coordinates.Angle', 'Angle', (['(0.0)', 'unit_y'], {}), '(0.0, unit_y)\n', (3370, 3383), False, 'from astropy.coordinates import Angle\n')]
|
import cv2
import numpy as np
import os
under_layer_path = '/home/ubuntu/share/cam_lidar/Tu_indoor/red2'
upper_layer_path = "/home/ubuntu/share/cam_lidar/Tu_indoor/aisle02_dir"
target_files = os.listdir(upper_layer_path)
target_imgs = [f for f in target_files if os.path.isfile(os.path.join(upper_layer_path, f))]
try:
target_imgs.remove(".DS_Store")
except ValueError:
pass
lower = np.array([0, 0, 128])
upper = np.array([0, 0, 128])
target_colors = np.array([
[0, 0, 0],
[192, 0, 0],
[128, 64, 128],
[0, 0, 128],
[0, 64, 64],
[128, 128, 192],
[128, 0, 64],
[128, 128, 128],
])
for img_name in target_imgs:
base_img = cv2.imread(os.path.join(under_layer_path, img_name), cv2.IMREAD_COLOR)
result_img = np.zeros(base_img.shape, dtype=base_img.dtype)
img_mask = cv2.inRange(base_img, lower, upper)
img_mask_color = cv2.bitwise_and(base_img, base_img, mask=img_mask)
result_img = cv2.add(result_img, img_mask_color)
cv2.imwrite("result.png", result_img)
target_img = cv2.imread(os.path.join(upper_layer_path, img_name), cv2.IMREAD_COLOR)
for color in target_colors:
img_mask = cv2.inRange(target_img, color, color)
img_mask_inv = cv2.bitwise_not(img_mask)
img_mask_color = cv2.bitwise_and(target_img, target_img, mask=img_mask)
result_img = cv2.bitwise_and(result_img, result_img, mask=img_mask_inv)
result_img = cv2.add(result_img, img_mask_color)
print(os.path.join(upper_layer_path, img_name[:-3]) + "png")
cv2.imwrite(os.path.join(upper_layer_path, img_name[:-3] + "png"), result_img)
|
[
"cv2.imwrite",
"os.listdir",
"cv2.inRange",
"cv2.bitwise_and",
"os.path.join",
"numpy.array",
"numpy.zeros",
"cv2.bitwise_not",
"cv2.add"
] |
[((193, 221), 'os.listdir', 'os.listdir', (['upper_layer_path'], {}), '(upper_layer_path)\n', (203, 221), False, 'import os\n'), ((393, 414), 'numpy.array', 'np.array', (['[0, 0, 128]'], {}), '([0, 0, 128])\n', (401, 414), True, 'import numpy as np\n'), ((423, 444), 'numpy.array', 'np.array', (['[0, 0, 128]'], {}), '([0, 0, 128])\n', (431, 444), True, 'import numpy as np\n'), ((461, 589), 'numpy.array', 'np.array', (['[[0, 0, 0], [192, 0, 0], [128, 64, 128], [0, 0, 128], [0, 64, 64], [128, \n 128, 192], [128, 0, 64], [128, 128, 128]]'], {}), '([[0, 0, 0], [192, 0, 0], [128, 64, 128], [0, 0, 128], [0, 64, 64],\n [128, 128, 192], [128, 0, 64], [128, 128, 128]])\n', (469, 589), True, 'import numpy as np\n'), ((938, 984), 'numpy.zeros', 'np.zeros', (['base_img.shape'], {'dtype': 'base_img.dtype'}), '(base_img.shape, dtype=base_img.dtype)\n', (946, 984), True, 'import numpy as np\n'), ((1001, 1036), 'cv2.inRange', 'cv2.inRange', (['base_img', 'lower', 'upper'], {}), '(base_img, lower, upper)\n', (1012, 1036), False, 'import cv2\n'), ((1058, 1108), 'cv2.bitwise_and', 'cv2.bitwise_and', (['base_img', 'base_img'], {'mask': 'img_mask'}), '(base_img, base_img, mask=img_mask)\n', (1073, 1108), False, 'import cv2\n'), ((1126, 1161), 'cv2.add', 'cv2.add', (['result_img', 'img_mask_color'], {}), '(result_img, img_mask_color)\n', (1133, 1161), False, 'import cv2\n'), ((1166, 1203), 'cv2.imwrite', 'cv2.imwrite', (['"""result.png"""', 'result_img'], {}), "('result.png', result_img)\n", (1177, 1203), False, 'import cv2\n'), ((861, 901), 'os.path.join', 'os.path.join', (['under_layer_path', 'img_name'], {}), '(under_layer_path, img_name)\n', (873, 901), False, 'import os\n'), ((1233, 1273), 'os.path.join', 'os.path.join', (['upper_layer_path', 'img_name'], {}), '(upper_layer_path, img_name)\n', (1245, 1273), False, 'import os\n'), ((1344, 1381), 'cv2.inRange', 'cv2.inRange', (['target_img', 'color', 'color'], {}), '(target_img, color, color)\n', (1355, 1381), False, 'import cv2\n'), ((1405, 1430), 'cv2.bitwise_not', 'cv2.bitwise_not', (['img_mask'], {}), '(img_mask)\n', (1420, 1430), False, 'import cv2\n'), ((1456, 1510), 'cv2.bitwise_and', 'cv2.bitwise_and', (['target_img', 'target_img'], {'mask': 'img_mask'}), '(target_img, target_img, mask=img_mask)\n', (1471, 1510), False, 'import cv2\n'), ((1532, 1590), 'cv2.bitwise_and', 'cv2.bitwise_and', (['result_img', 'result_img'], {'mask': 'img_mask_inv'}), '(result_img, result_img, mask=img_mask_inv)\n', (1547, 1590), False, 'import cv2\n'), ((1612, 1647), 'cv2.add', 'cv2.add', (['result_img', 'img_mask_color'], {}), '(result_img, img_mask_color)\n', (1619, 1647), False, 'import cv2\n'), ((1731, 1784), 'os.path.join', 'os.path.join', (['upper_layer_path', "(img_name[:-3] + 'png')"], {}), "(upper_layer_path, img_name[:-3] + 'png')\n", (1743, 1784), False, 'import os\n'), ((279, 312), 'os.path.join', 'os.path.join', (['upper_layer_path', 'f'], {}), '(upper_layer_path, f)\n', (291, 312), False, 'import os\n'), ((1659, 1704), 'os.path.join', 'os.path.join', (['upper_layer_path', 'img_name[:-3]'], {}), '(upper_layer_path, img_name[:-3])\n', (1671, 1704), False, 'import os\n')]
|
## -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 13:38:17 2017
@author: Administrator
"""
import dlib
import cv2
import numpy as np
from sklearn.externals import joblib
import os
import pathAttributes
#ap = argparse.ArgumentParser()
#ap.add_argument("-p", "--shape-predictor", metavar="D:\\用户目录\\下载\\shape_predictor_68_face_landmarks.dat\\shape_predictor_68_face_landmarks.dat", required=True,
# help="path to facial landmark predictor")
#ap.add_argument("-r", "--picamera", type=int, default=-1,
#help="whether or not the Raspberry Pi camera should be used")
#args = vars(ap.parse_args())
def faceRecognition():
f = open(pathAttributes.dictionary, 'r')
result = {}
for line in f.readlines():
line = line.strip()
print(line)
if not len(line):
continue
result[line.split(':')[0]] = line.split(':')[1]
f.close()
#face_detection_model = "C:\\Users\\Administrator\\shape_predictor_68_face_landmarks.dat"
#print(result)
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(pathAttributes.face_detection_model)
face_encoder = dlib.face_recognition_model_v1(pathAttributes.face_recognition_model)
print("[INFO] camera sensor warming up...")
#vs = VideoStream().start()
video_capture = cv2.VideoCapture(0) #open camra by calling opencv's function
#time.sleep(2.0)
"""
chris_image = cv2.imread('E:\\49.png')
#chris_image_gray = cv2.cvtColor(chris_image, cv2.COLOR_GRAY2RGB)
chris = detector(chris_image, 1)
chris_shape = predictor(chris_image, chris[0])
chris_face_encoding = face_encoder.compute_face_descriptor(chris_image, chris_shape, 1)
print("Chris:"+str(chris_face_encoding))
julie_image = cv2.imread('E:\\1.png')
#julie_image_gray = cv2.cvtColor(julie_image, cv2.COLOR_GRAY2RGB)
julie = detector(julie_image, 1)
julie_shape = predictor(julie_image, julie[0])
julie_face_encoding = face_encoder.compute_face_descriptor(julie_image, julie_shape, 1)
print("JULIE:"+str(julie_face_encoding))
"""
face_locations = []
face_encodings = []
face_names = []
raw_list = []
while True:
raw_list = []
face_names = []
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
#frame = vs.read()
#frame = imutils.resize(frame, width=400)
ret, frame = video_capture.read()
#dim = (int(frame.shape[1] * 0.25), int(frame.shape[0] * 0.25))
dim = (int(frame.shape[1] * 0.2), int(frame.shape[0] * 0.2))
small_frame = cv2.resize(frame, dim)
gray_one_channel = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)
#face_locations = face_recognition.face_locations(small_frame)
gray = cv2.cvtColor(gray_one_channel, cv2.COLOR_GRAY2RGB)
# detect faces in the grayscale frame
rects = detector(gray, 1)
#print("rects:"+str(rects))
for rect in rects:
#print("rect:"+str(rect))
css = [rect.top(), rect.right(), rect.bottom(), rect.left()]
location = max(css[0], 0), min(css[1], gray.shape[1]), min(css[2], gray.shape[0]), max(css[3], 0)
face_location = dlib.rectangle(location[3], location[0], location[1], location[2])
face_locations.append(face_location)
raw_list.append(css)
shape = predictor(gray, face_location)
face_encoding = face_encoder.compute_face_descriptor(gray, shape, 1)
#print("random:"+str(face_encoding))
"""
match_chris = []
match_julie = []
chris_norm = 0
julie_norm = 0
if len([chris_face_encoding]) == 0:
match_chris = list(0<=0.6)
else:
chris_norm = np.linalg.norm(np.array([chris_face_encoding]) - np.array([face_encoding]), axis=1)
match_chris = list(chris_norm<= 0.6)
print("chris:"+str(chris_norm))
name = "Unknown"
if len([julie_face_encoding]) == 0:
match_julie = list(0<=0.6)
else:
julie_norm = np.linalg.norm(np.array([julie_face_encoding]) - np.array([face_encoding]), axis=1)
match_julie = list(julie_norm <= 0.6)
print("julie:"+str(julie_norm))
if match_chris[0]!=0 and match_julie[0]!=0:
if julie_norm>chris_norm:
name = "Chris"
else:
name = "Julie"
elif match_julie[0] == 0 and match_chris[0] !=0:
name = "Chris"
elif match_julie[0] != 0 and match_chris[0] ==0:
name = "Julie"
else:
name = "Unknown"
"""
threshold = -0.05 #-0.1 for C=0.1 4-8 6 for 0.3
proba = 0.72
clf = joblib.load(pathAttributes.SVM_model)
feeaturesArray = np.array(face_encoding)
ID = clf.predict(feeaturesArray.reshape(1,-1))[0]
name = result[str(ID)]
#scores = clf.decision_function(feeaturesArray.reshape(1,-1))
scores = clf.predict_proba(feeaturesArray.reshape(1,-1))
"""
scores_sorted = np.sort(scores)
second_biggest = scores_sorted[0][-2]
minimum = scores_sorted[0][0]
biggest_score = np.max(scores)
gap = biggest_score - minimum
gap_2 = biggest_score - second_biggest
print(gap_2)
percentage = gap_2/gap *100
print(percentage)
if percentage < 30:
name = "unknown"
""" """
biggest_score = np.max(scores)
if biggest_score < threshold:
name = "unknown"
"""
biggest_score = np.max(scores)
if biggest_score < proba:
name="unknown"
#scores = scores - np.min(scores)
#scores = scores/np.max(scores)
print(scores,name)
face_names.append(name)
#print(face_names)
for (top, right, bottom, left), name in zip(raw_list, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 5
right *= 5
bottom *= 5
left *= 5
# Draw a box around the faceq
cv2.rectangle(frame, (left-10, top-10), (right+10, bottom+10), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left-10, bottom+10), (right+10, bottom+45), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left, bottom + 30), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame) #display the camra
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
faceRecognition()
|
[
"cv2.rectangle",
"dlib.face_recognition_model_v1",
"dlib.rectangle",
"sklearn.externals.joblib.load",
"dlib.shape_predictor",
"cv2.imshow",
"numpy.max",
"dlib.get_frontal_face_detector",
"numpy.array",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.resize",
"cv2.waitKey"
] |
[((1116, 1148), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1146, 1148), False, 'import dlib\n'), ((1166, 1223), 'dlib.shape_predictor', 'dlib.shape_predictor', (['pathAttributes.face_detection_model'], {}), '(pathAttributes.face_detection_model)\n', (1186, 1223), False, 'import dlib\n'), ((1244, 1313), 'dlib.face_recognition_model_v1', 'dlib.face_recognition_model_v1', (['pathAttributes.face_recognition_model'], {}), '(pathAttributes.face_recognition_model)\n', (1274, 1313), False, 'import dlib\n'), ((1423, 1442), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1439, 1442), False, 'import cv2\n'), ((7424, 7447), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7445, 7447), False, 'import cv2\n'), ((2802, 2824), 'cv2.resize', 'cv2.resize', (['frame', 'dim'], {}), '(frame, dim)\n', (2812, 2824), False, 'import cv2\n'), ((2853, 2898), 'cv2.cvtColor', 'cv2.cvtColor', (['small_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(small_frame, cv2.COLOR_BGR2GRAY)\n', (2865, 2898), False, 'import cv2\n'), ((2987, 3037), 'cv2.cvtColor', 'cv2.cvtColor', (['gray_one_channel', 'cv2.COLOR_GRAY2RGB'], {}), '(gray_one_channel, cv2.COLOR_GRAY2RGB)\n', (2999, 3037), False, 'import cv2\n'), ((7219, 7245), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (7229, 7245), False, 'import cv2\n'), ((3435, 3501), 'dlib.rectangle', 'dlib.rectangle', (['location[3]', 'location[0]', 'location[1]', 'location[2]'], {}), '(location[3], location[0], location[1], location[2])\n', (3449, 3501), False, 'import dlib\n'), ((5170, 5207), 'sklearn.externals.joblib.load', 'joblib.load', (['pathAttributes.SVM_model'], {}), '(pathAttributes.SVM_model)\n', (5181, 5207), False, 'from sklearn.externals import joblib\n'), ((5238, 5261), 'numpy.array', 'np.array', (['face_encoding'], {}), '(face_encoding)\n', (5246, 5261), True, 'import numpy as np\n'), ((6161, 6175), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (6167, 6175), True, 'import numpy as np\n'), ((6802, 6893), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left - 10, top - 10)', '(right + 10, bottom + 10)', '(0, 0, 255)', '(2)'], {}), '(frame, (left - 10, top - 10), (right + 10, bottom + 10), (0, \n 0, 255), 2)\n', (6815, 6893), False, 'import cv2\n'), ((6967, 7070), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left - 10, bottom + 10)', '(right + 10, bottom + 45)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(frame, (left - 10, bottom + 10), (right + 10, bottom + 45), (\n 0, 0, 255), cv2.FILLED)\n', (6980, 7070), False, 'import cv2\n'), ((7123, 7199), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(left, bottom + 30)', 'font', '(1.0)', '(255, 255, 255)', '(1)'], {}), '(frame, name, (left, bottom + 30), font, 1.0, (255, 255, 255), 1)\n', (7134, 7199), False, 'import cv2\n'), ((7330, 7344), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7341, 7344), False, 'import cv2\n')]
|
from gamegym.game import Game, Situation
from gamegym.utils import get_rng
from gamegym.distribution import Explicit
from gamegym.value_learning.valuestore import LinearValueStore
import numpy as np
import pytest
from scipy.sparse import csr_matrix
def test_init():
LinearValueStore(shape=(3, 3))
LinearValueStore(np.zeros((4, 3)))
LinearValueStore(np.zeros((4, 3)), shape=(4, 3))
with pytest.raises(Exception):
LinearValueStore((3, 3))
with pytest.raises(Exception):
LinearValueStore(np.zeros((4, 3)), shape=(4, 4))
def test_value_update():
a = np.ones((4, ))
vs = LinearValueStore(a)
f = [0, 2, -1, 3]
assert vs.get(f) == pytest.approx(4.0)
assert vs.get(np.array(f)) == pytest.approx(4.0)
#assert vs.get(csr_matrix(f)) == pytest.approx(4.0)
vs.update(f, -0.5)
assert vs.values == pytest.approx([1, 0, 1.5, -0.5])
assert vs.get(f) == pytest.approx(-3.0)
def test_norm():
vs = LinearValueStore(shape=(2, 3), fix_mean=1.0)
|
[
"pytest.approx",
"numpy.ones",
"gamegym.value_learning.valuestore.LinearValueStore",
"numpy.array",
"numpy.zeros",
"pytest.raises"
] |
[((272, 302), 'gamegym.value_learning.valuestore.LinearValueStore', 'LinearValueStore', ([], {'shape': '(3, 3)'}), '(shape=(3, 3))\n', (288, 302), False, 'from gamegym.value_learning.valuestore import LinearValueStore\n'), ((590, 603), 'numpy.ones', 'np.ones', (['(4,)'], {}), '((4,))\n', (597, 603), True, 'import numpy as np\n'), ((614, 633), 'gamegym.value_learning.valuestore.LinearValueStore', 'LinearValueStore', (['a'], {}), '(a)\n', (630, 633), False, 'from gamegym.value_learning.valuestore import LinearValueStore\n'), ((960, 1004), 'gamegym.value_learning.valuestore.LinearValueStore', 'LinearValueStore', ([], {'shape': '(2, 3)', 'fix_mean': '(1.0)'}), '(shape=(2, 3), fix_mean=1.0)\n', (976, 1004), False, 'from gamegym.value_learning.valuestore import LinearValueStore\n'), ((324, 340), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (332, 340), True, 'import numpy as np\n'), ((363, 379), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (371, 379), True, 'import numpy as np\n'), ((404, 428), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (417, 428), False, 'import pytest\n'), ((438, 462), 'gamegym.value_learning.valuestore.LinearValueStore', 'LinearValueStore', (['(3, 3)'], {}), '((3, 3))\n', (454, 462), False, 'from gamegym.value_learning.valuestore import LinearValueStore\n'), ((472, 496), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (485, 496), False, 'import pytest\n'), ((680, 698), 'pytest.approx', 'pytest.approx', (['(4.0)'], {}), '(4.0)\n', (693, 698), False, 'import pytest\n'), ((733, 751), 'pytest.approx', 'pytest.approx', (['(4.0)'], {}), '(4.0)\n', (746, 751), False, 'import pytest\n'), ((855, 887), 'pytest.approx', 'pytest.approx', (['[1, 0, 1.5, -0.5]'], {}), '([1, 0, 1.5, -0.5])\n', (868, 887), False, 'import pytest\n'), ((912, 931), 'pytest.approx', 'pytest.approx', (['(-3.0)'], {}), '(-3.0)\n', (925, 931), False, 'import pytest\n'), ((523, 539), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (531, 539), True, 'import numpy as np\n'), ((717, 728), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (725, 728), True, 'import numpy as np\n')]
|
import math
from itertools import product
from typing import Tuple, List, Optional, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
class EMA:
"""
Class that keeps track of exponential moving average of model parameters of a particular model.
Also see https://github.com/chrischute/squad/blob/master/util.py#L174-L220.
"""
def __init__(self, model: torch.nn.Module, decay: float):
"""
Initialization method for the EMA class.
Parameters
----------
model: torch.nn.Module
Torch model for which the EMA instance is used to track the exponential moving average of parameter values
decay: float
Decay rate used for exponential moving average of parameters calculation:
ema_t = decay * p_t + (1-decay) * ema_(t-1)
"""
self.decay = decay
self.shadow = {}
self.original = {}
# Register model parameters
for name, param in model.named_parameters():
if param.requires_grad:
self.shadow[name] = param.clone().detach()
def __call__(self, model):
"""
Implements call method of EMA class
Parameters
----------
model: torch.nn.Module
Current model based on which the EMA parameters are updated
"""
with torch.no_grad():
for name, param in model.named_parameters():
if param.requires_grad:
assert name in self.shadow
new_average = (1.0 - self.decay) * param + self.decay * self.shadow[
name
]
self.shadow[name] = new_average
def assign(self, model: torch.nn.Module):
"""
This method assigns the parameter EMAs saved in self.shadow to the given model. The current parameter values
of the model are saved to self.original. These original parameters can be restored using self.resume.
Parameters
----------
model: torch.nn.Module
Model to which the current parameter EMAs are assigned.
"""
for name, param in model.named_parameters():
if param.requires_grad:
self.original[name] = param.clone()
param.data.copy_(self.shadow[name].data)
def resume(self, model: torch.nn.Module):
"""
This method restores the parameters saved in self.original to the given model. It is usually called after
the `assign` method.
Parameters
----------
model: torch.nn.Module
Torch model to which the original parameters are restored
"""
for name, param in model.named_parameters():
if param.requires_grad:
param.data.copy_(self.original[name].data)
class ModelWrapper:
"""
ModelWrapper which can be used to extract outputs of intermediate layer of a network.
"""
def __init__(self, task_model: nn.Module, to_extract: Tuple):
"""
Initializes a model wrapper for the specified task model and layer names to extract.
Parameters
----------
task_model: torch.nn.Module
Torch model to which the original parameters are restored
to_extract: Tuple
Tuple that holds names of layers for which intermediate results should be extracted and returned,
e.g. to_extract=(`avgpool`, `fc`) to extract intermediate results after the avgpool layer and last fully
connected layer in a ResNet for example.
"""
self.task_model = task_model
self.to_extract = to_extract
def __call__(self, x: torch.Tensor):
"""
The __call__ method iterates through all modules of the provided `task_model` separately. It extracts and
returns the intermediate results at layers specified by to_extract
Parameters
----------
x: torch.Tensor
Batch of samples, e.g. images, which are passed through the network and for which specified intermediate
results are extracted
Returns
----------
results: Optional[torch.Tensor, List[torch.Tensor]]
Results of forward pass of input batch through the given task model. If len(to_extract) is 1, only the
single result tensor is returned. Otherwise, a list of tensors is returned, which holds the intermediate
results of specified layers in the order of occurrence in the network.
"""
results = []
for name, child in self.task_model.named_children():
x = child(x)
if name == "avgpool":
x = torch.flatten(x, 1)
if name in self.to_extract:
results.append(x)
return results[-1] if len(results) == 1 else results
def train(self):
self.task_model.train()
def eval(self):
self.task_model.eval()
def cuda(self):
self.task_model.cuda()
def to(self, device: Union[str, torch.device]):
self.task_model.to(device)
def get_embedding_dim(self):
last_layer = list(self.task_model.modules())[-1]
return last_layer.in_features
def model_init(m: torch.nn.Module):
"""
Method that initializes torch modules depending on their type:
- Convolutional Layers: Xavier Uniform Initialization
- BatchNorm Layers: Standard initialization
- Fully connected / linear layers: Xavier Normal Initialization#
Parameters
----------
m: torch.nn.Module
Torch module which to be initialized. The specific initialization used depends on the type of module.
"""
classname = m.__class__.__name__
if classname.find("Conv") != -1:
init.xavier_uniform_(m.weight, gain=math.sqrt(2))
if m.bias is not None:
init.constant_(m.bias, 0)
elif classname.find("BatchNorm") != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif classname.find("Linear") != -1:
init.xavier_normal_(m.weight, gain=math.sqrt(2))
if m.bias is not None:
init.constant_(m.bias, 0)
def wd_check(wd_tuple: Tuple, name: str):
"""
Method that checks if parameter name matches the key words in wd_tuple. This check is used to filter certain
types of parameters independent of the layer, which it belongs to, e.g. `conv1.weight`.
Parameters
----------
wd_tuple: Tuple
Tuple which contains the phrases which are checked for, e.g. (`conv`, `weight`) or (`fc`, `weight`)
name: str
Name of parameter as saved in state dict, e.g. `conv1.weight`
Returns
----------
wd_check: bool
Returns a bool indicating whether all strings in wd_tuple are contained in name.
"""
return all([x in name for x in wd_tuple])
def apply_wd(model: torch.nn.Module, wd: float, param_names: List = ["conv", "fc"], types: List = ["weight"]):
"""
Method that manually applies weight decay to model parameters that match the specified parameter names and types.
Parameters
----------
model: torch.nn.Module
Model to which weight decay is applied
wd: float
Float specifying weight decay. Parameters are updated to: param = (1-wd) * param
param_names: List (default: ["conv", "fc"])
Parameter names (or substring of names) for which the weight decay is applied.
types: List (default: ["weight"])
Parameter types for which weight decay is applied.
"""
with torch.no_grad():
for name, param in model.state_dict().items():
if any(
[wd_check(wd_tuple, name) for wd_tuple in product(param_names, types)]
):
param.mul_(1 - wd)
def set_bn_running_updates(model, enable: bool, bn_momentum: float = 0.001):
"""
Method that enables or disables updates of the running batch norm vars by setting the momentum parameter to 0
"""
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.momentum = bn_momentum if enable else 0.0
def linear_rampup(current: int, rampup_length: int):
if rampup_length == 0:
return 1.0
else:
current = np.clip(current / rampup_length, 0.0, 1.0)
return float(current)
def set_grads(model: torch.nn.Module, trainable_layers: List[str]):
"""
Method that enables or disables gradients of model parameters according to specified layers.
Parameters
----------
model: torch.nn.Module
Torch model for which parameter gradients should be set
trainable_layers: List
List of strings, i.e. layer / parameter names, for which training is enabled. For model parameters, which do not
match any pattern specified in trainable_layers, training is disable by setting requires_grad to False.
"""
def is_trainable(x, trainable_layers):
return any([(layer in x) or ('fc' in x) for layer in trainable_layers])
for p in model.parameters():
p.requires_grad = False
trainable_parameters = [n for n, p in model.named_parameters() if is_trainable(n, trainable_layers)]
for n, p in model.named_parameters():
if n in trainable_parameters:
p.requires_grad = True
|
[
"numpy.clip",
"torch.nn.init.constant_",
"itertools.product",
"math.sqrt",
"torch.no_grad",
"torch.flatten"
] |
[((7623, 7638), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7636, 7638), False, 'import torch\n'), ((8322, 8364), 'numpy.clip', 'np.clip', (['(current / rampup_length)', '(0.0)', '(1.0)'], {}), '(current / rampup_length, 0.0, 1.0)\n', (8329, 8364), True, 'import numpy as np\n'), ((1390, 1405), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1403, 1405), False, 'import torch\n'), ((5931, 5956), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (5945, 5956), True, 'import torch.nn.init as init\n'), ((6009, 6036), 'torch.nn.init.constant_', 'init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (6023, 6036), True, 'import torch.nn.init as init\n'), ((6045, 6070), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (6059, 6070), True, 'import torch.nn.init as init\n'), ((4755, 4774), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (4768, 4774), False, 'import torch\n'), ((5874, 5886), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5883, 5886), False, 'import math\n'), ((6212, 6237), 'torch.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (6226, 6237), True, 'import torch.nn.init as init\n'), ((6155, 6167), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (6164, 6167), False, 'import math\n'), ((7777, 7804), 'itertools.product', 'product', (['param_names', 'types'], {}), '(param_names, types)\n', (7784, 7804), False, 'from itertools import product\n')]
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
import seaborn as sns
import itertools
import pandas as pd
import scipy
from scipy.signal import savgol_filter
from scipy.signal import find_peaks_cwt
from scipy.signal import boxcar
sns.set(font_scale=1.2)
sns.set_style("white")
colors = ["#95a5a6", "amber"]
sns.set_palette(sns.color_palette())
hr_24 = np.loadtxt("MDA DTX_1 4_24hr.txt", skiprows=1)
ctl = np.loadtxt("MDA DTX_1 Ctl.txt", skiprows=1)
hr_4 = np.loadtxt("MDA DTX_1 4hr.txt", skiprows=1)
# hr_2 = np.loadtxt("MDA-DTX-#2hr.txt", skiprows=1)
hr_8 = np.loadtxt("MDA DTX 8hr.txt", skiprows=1)
dmso = np.loadtxt("MDA DTX DMSO.txt", skiprows=1)
def filterDat(data):
num = 9
ones = boxcar(num)/num
result = np.abs(np.convolve(data, ones, mode='same'))
return np.interp(result, (result.min(), result.max()), (0, 100))
def shift(data):
"""
firstIndex = 200
index = np.argmax(data)
if index < firstIndex:
data = np.insert(data, 0, np.zeros(
firstIndex-index))[:-(firstIndex-index)]
elif index > firstIndex:
data = data[index-firstIndex:]
data = np.insert(data, len(data)-1, np.zeros(index-firstIndex))
"""
# Stretch
secondIndex = 400
indexes = find_peaks_cwt(data, np.arange(1, 100))
# find max of indexes
peaks = data[indexes]
secondMax = 0
lastPeak = 0
for x in range(len(peaks)):
if peaks[x] < 95.0:
if peaks[x] > lastPeak:
lastPeak = peaks[x]
secondMax = x
secondMaxIndex = indexes[secondMax]
difference = secondIndex-secondMaxIndex
ratio = secondIndex/(secondIndex-difference)
old_x = np.linspace(0, int(len(data))-1, int(len(data)))
new_x = np.linspace(0, int(len(data))-1, int(len(data)*ratio))
new_data = np.interp(new_x, old_x, data)
return new_data, np.linspace(0, int(len(new_x))-1, int(len(new_x)))
fig, axes = plt.subplots(figsize=(8, 6))
filterData = filterDat(ctl[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="Control", color='black')
axes.fill_between(x, y, alpha=0.3)
"""filterData = filterDat(hr_4[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="4 hour")
axes.fill_between(x, y, alpha=0.3)
filterData = filterDat(hr_8[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="8 hour")
axes.fill_between(x, y, alpha=0.3)
"""
filterData = filterDat(hr_24[:, 2])
y, x = shift(filterData)
axes.plot(x, y, label="24 hour", color='maroon')
axes.fill_between(x, y, alpha=0.3)
axes.legend()
axes.set_ylabel('% of Max')
axes.set_xlabel('Fluorescence')
axes.set_xlim((0, 800))
plt.show()
|
[
"seaborn.set",
"numpy.convolve",
"seaborn.color_palette",
"seaborn.set_style",
"scipy.signal.boxcar",
"numpy.interp",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((264, 287), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)'}), '(font_scale=1.2)\n', (271, 287), True, 'import seaborn as sns\n'), ((288, 310), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (301, 310), True, 'import seaborn as sns\n'), ((387, 433), 'numpy.loadtxt', 'np.loadtxt', (['"""MDA DTX_1 4_24hr.txt"""'], {'skiprows': '(1)'}), "('MDA DTX_1 4_24hr.txt', skiprows=1)\n", (397, 433), True, 'import numpy as np\n'), ((440, 483), 'numpy.loadtxt', 'np.loadtxt', (['"""MDA DTX_1 Ctl.txt"""'], {'skiprows': '(1)'}), "('MDA DTX_1 Ctl.txt', skiprows=1)\n", (450, 483), True, 'import numpy as np\n'), ((491, 534), 'numpy.loadtxt', 'np.loadtxt', (['"""MDA DTX_1 4hr.txt"""'], {'skiprows': '(1)'}), "('MDA DTX_1 4hr.txt', skiprows=1)\n", (501, 534), True, 'import numpy as np\n'), ((594, 635), 'numpy.loadtxt', 'np.loadtxt', (['"""MDA DTX 8hr.txt"""'], {'skiprows': '(1)'}), "('MDA DTX 8hr.txt', skiprows=1)\n", (604, 635), True, 'import numpy as np\n'), ((643, 685), 'numpy.loadtxt', 'np.loadtxt', (['"""MDA DTX DMSO.txt"""'], {'skiprows': '(1)'}), "('MDA DTX DMSO.txt', skiprows=1)\n", (653, 685), True, 'import numpy as np\n'), ((1958, 1986), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (1970, 1986), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2646), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2644, 2646), True, 'import matplotlib.pyplot as plt\n'), ((357, 376), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (374, 376), True, 'import seaborn as sns\n'), ((1841, 1870), 'numpy.interp', 'np.interp', (['new_x', 'old_x', 'data'], {}), '(new_x, old_x, data)\n', (1850, 1870), True, 'import numpy as np\n'), ((732, 743), 'scipy.signal.boxcar', 'boxcar', (['num'], {}), '(num)\n', (738, 743), False, 'from scipy.signal import boxcar\n'), ((768, 804), 'numpy.convolve', 'np.convolve', (['data', 'ones'], {'mode': '"""same"""'}), "(data, ones, mode='same')\n", (779, 804), True, 'import numpy as np\n'), ((1294, 1311), 'numpy.arange', 'np.arange', (['(1)', '(100)'], {}), '(1, 100)\n', (1303, 1311), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import matplotlib.colors as colorplt
import matplotlib.pyplot as plt
import numpy as np
from sktime.distances._distance import distance_alignment_path, pairwise_distance
gray_cmap = colorplt.LinearSegmentedColormap.from_list("", ["#c9cacb", "white"])
def _path_mask(cost_matrix, path, ax, theme=gray_cmap):
plot_matrix = np.zeros_like(cost_matrix)
max_size = max(cost_matrix.shape)
for i in range(max_size):
for j in range(max_size):
if (i, j) in path:
plot_matrix[i, j] = 1.0
elif cost_matrix[i, j] == np.inf:
plot_matrix[i, j] = 0.0
else:
plot_matrix[i, j] = 0.25
for i in range(max_size):
for j in range(max_size):
c = cost_matrix[j, i]
ax.text(i, j, str(round(c, 2)), va="center", ha="center", size=10)
ax.text(i, j, str(round(c, 2)), va="center", ha="center", size=10)
ax.matshow(plot_matrix, cmap=theme)
def _pairwise_path(x, y, metric):
pw_matrix = pairwise_distance(x, y, metric=metric)
path = []
for i in range(pw_matrix.shape[0]):
for j in range(pw_matrix.shape[1]):
if i == j:
path.append((i, j))
return path, pw_matrix.trace(), pw_matrix
def _plot_path(
x: np.ndarray,
y: np.ndarray,
metric: str,
dist_kwargs: dict = None,
title: str = "",
plot_over_pw: bool = False,
):
if dist_kwargs is None:
dist_kwargs = {}
try:
path, dist, cost_matrix = distance_alignment_path(
x, y, metric=metric, return_cost_matrix=True, **dist_kwargs
)
if metric == "lcss":
_path = []
for tup in path:
_path.append(tuple(x + 1 for x in tup))
path = _path
if plot_over_pw is True:
if metric == "lcss":
pw = pairwise_distance(x, y, metric="euclidean")
cost_matrix = np.zeros_like(cost_matrix)
cost_matrix[1:, 1:] = pw
else:
pw = pairwise_distance(x, y, metric="squared")
cost_matrix = pw
except NotImplementedError:
path, dist, cost_matrix = _pairwise_path(x, y, metric)
plt.figure(1, figsize=(8, 8))
x_size = x.shape[0]
# definitions for the axes
left, bottom = 0.01, 0.1
w_ts = h_ts = 0.2
left_h = left + w_ts + 0.02
width = height = 0.65
bottom_h = bottom + height + 0.02
rect_s_y = [left, bottom, w_ts, height]
rect_gram = [left_h, bottom, width, height]
rect_s_x = [left_h, bottom_h, width, h_ts]
ax_gram = plt.axes(rect_gram)
ax_s_x = plt.axes(rect_s_x)
ax_s_y = plt.axes(rect_s_y)
_path_mask(cost_matrix, path, ax_gram)
ax_gram.axis("off")
ax_gram.autoscale(False)
# ax_gram.plot([j for (i, j) in path], [i for (i, j) in path], "w-",
# linewidth=3.)
ax_s_x.plot(np.arange(x_size), y, "b-", linewidth=3.0, color="#818587")
ax_s_x.axis("off")
ax_s_x.set_xlim((0, x_size - 1))
ax_s_y.plot(-x, np.arange(x_size), "b-", linewidth=3.0, color="#818587")
ax_s_y.axis("off")
ax_s_y.set_ylim((0, x_size - 1))
ax_s_x.set_title(title, size=10)
return plt
def _plot_alignment(x, y, metric, dist_kwargs: dict = None, title: str = ""):
if dist_kwargs is None:
dist_kwargs = {}
try:
path, dist, cost_matrix = distance_alignment_path(
x, y, metric=metric, return_cost_matrix=True, **dist_kwargs
)
except NotImplementedError:
path, dist, cost_matrix = _pairwise_path(x, y, metric)
plt.figure(1, figsize=(8, 8))
plt.plot(x, "b-", color="black")
plt.plot(y, "g-", color="black")
for positions in path:
try:
plt.plot(
[positions[0], positions[1]],
[x[positions[0]], y[positions[1]]],
"--",
color="#818587",
)
except:
continue
plt.legend()
plt.title(title)
plt.tight_layout()
return plt
if __name__ == "__main__":
x = np.array(
[
-0.7553383207,
0.4460987596,
1.197682907,
0.1714334808,
0.5639929213,
0.6891222874,
1.793828873,
0.06570866314,
0.2877381702,
1.633620422,
]
)
y = np.array(
[
0.01765193577,
1.536784164,
-0.1413292622,
-0.7609346135,
-0.1767363331,
-2.192007072,
-0.1933165696,
-0.4648166839,
-0.9444888843,
-0.239523623,
]
)
import os
def _save_plt(plt):
plt[0].savefig(f"{metric_path}/{plt[1]}")
plt[0].cla()
plt[0].clf()
if not os.path.exists("./plots"):
os.makedirs("./plots")
metrics = [
"euclidean",
"erp",
"edr",
"lcss",
"squared",
"dtw",
"ddtw",
"wdtw",
"wddtw",
"msm",
]
# metrics = ['lcss']
for metric in metrics:
metric_path = f"./plots/{metric}"
if not os.path.exists(metric_path):
os.makedirs(metric_path)
save_plt(
(
_plot_path(x, y, metric, {"epsilon": 1.0}),
f"{metric}_path_through_cost_matrix",
)
)
_save_plt(
(
_plot_path(x, y, metric, {"window": 0.2, "epsilon": 1.0}),
f"{metric}_path_through_20_cost_matrix",
)
)
if metric == "wdtw":
g_val = [0.2, 0.3]
for g in g_val:
file_save = str(g).split(".")
_save_plt(
(
_plot_path(x, y, metric, {"g": g}),
f"{metric}_path_through_g{file_save[1]}_cost_matrix",
)
)
_save_plt((_plot_alignment(x, y, metric), f"{metric}_alignment"))
_save_plt(
(_plot_alignment(x, y, metric, {"window": 0.2}), f"{metric}_alignment_20")
)
|
[
"os.path.exists",
"os.makedirs",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"sktime.distances._distance.pairwise_distance",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"numpy.zeros_like",
"matplotlib.pyplot.legend",
"sktime.distances._distance.distance_alignment_path"
] |
[((208, 276), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colorplt.LinearSegmentedColormap.from_list', (['""""""', "['#c9cacb', 'white']"], {}), "('', ['#c9cacb', 'white'])\n", (250, 276), True, 'import matplotlib.colors as colorplt\n'), ((353, 379), 'numpy.zeros_like', 'np.zeros_like', (['cost_matrix'], {}), '(cost_matrix)\n', (366, 379), True, 'import numpy as np\n'), ((1048, 1086), 'sktime.distances._distance.pairwise_distance', 'pairwise_distance', (['x', 'y'], {'metric': 'metric'}), '(x, y, metric=metric)\n', (1065, 1086), False, 'from sktime.distances._distance import distance_alignment_path, pairwise_distance\n'), ((2259, 2288), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8, 8)'}), '(1, figsize=(8, 8))\n', (2269, 2288), True, 'import matplotlib.pyplot as plt\n'), ((2647, 2666), 'matplotlib.pyplot.axes', 'plt.axes', (['rect_gram'], {}), '(rect_gram)\n', (2655, 2666), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2698), 'matplotlib.pyplot.axes', 'plt.axes', (['rect_s_x'], {}), '(rect_s_x)\n', (2688, 2698), True, 'import matplotlib.pyplot as plt\n'), ((2712, 2730), 'matplotlib.pyplot.axes', 'plt.axes', (['rect_s_y'], {}), '(rect_s_y)\n', (2720, 2730), True, 'import matplotlib.pyplot as plt\n'), ((3646, 3675), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(8, 8)'}), '(1, figsize=(8, 8))\n', (3656, 3675), True, 'import matplotlib.pyplot as plt\n'), ((3681, 3713), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '"""b-"""'], {'color': '"""black"""'}), "(x, 'b-', color='black')\n", (3689, 3713), True, 'import matplotlib.pyplot as plt\n'), ((3718, 3750), 'matplotlib.pyplot.plot', 'plt.plot', (['y', '"""g-"""'], {'color': '"""black"""'}), "(y, 'g-', color='black')\n", (3726, 3750), True, 'import matplotlib.pyplot as plt\n'), ((4022, 4034), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4032, 4034), True, 'import matplotlib.pyplot as plt\n'), ((4039, 4055), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4048, 4055), True, 'import matplotlib.pyplot as plt\n'), ((4061, 4079), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4077, 4079), True, 'import matplotlib.pyplot as plt\n'), ((4132, 4291), 'numpy.array', 'np.array', (['[-0.7553383207, 0.4460987596, 1.197682907, 0.1714334808, 0.5639929213, \n 0.6891222874, 1.793828873, 0.06570866314, 0.2877381702, 1.633620422]'], {}), '([-0.7553383207, 0.4460987596, 1.197682907, 0.1714334808, \n 0.5639929213, 0.6891222874, 1.793828873, 0.06570866314, 0.2877381702, \n 1.633620422])\n', (4140, 4291), True, 'import numpy as np\n'), ((4436, 4601), 'numpy.array', 'np.array', (['[0.01765193577, 1.536784164, -0.1413292622, -0.7609346135, -0.1767363331, -\n 2.192007072, -0.1933165696, -0.4648166839, -0.9444888843, -0.239523623]'], {}), '([0.01765193577, 1.536784164, -0.1413292622, -0.7609346135, -\n 0.1767363331, -2.192007072, -0.1933165696, -0.4648166839, -0.9444888843,\n -0.239523623])\n', (4444, 4601), True, 'import numpy as np\n'), ((1545, 1634), 'sktime.distances._distance.distance_alignment_path', 'distance_alignment_path', (['x', 'y'], {'metric': 'metric', 'return_cost_matrix': '(True)'}), '(x, y, metric=metric, return_cost_matrix=True, **\n dist_kwargs)\n', (1568, 1634), False, 'from sktime.distances._distance import distance_alignment_path, pairwise_distance\n'), ((2951, 2968), 'numpy.arange', 'np.arange', (['x_size'], {}), '(x_size)\n', (2960, 2968), True, 'import numpy as np\n'), ((3092, 3109), 'numpy.arange', 'np.arange', (['x_size'], {}), '(x_size)\n', (3101, 3109), True, 'import numpy as np\n'), ((3439, 3528), 'sktime.distances._distance.distance_alignment_path', 'distance_alignment_path', (['x', 'y'], {'metric': 'metric', 'return_cost_matrix': '(True)'}), '(x, y, metric=metric, return_cost_matrix=True, **\n dist_kwargs)\n', (3462, 3528), False, 'from sktime.distances._distance import distance_alignment_path, pairwise_distance\n'), ((4881, 4906), 'os.path.exists', 'os.path.exists', (['"""./plots"""'], {}), "('./plots')\n", (4895, 4906), False, 'import os\n'), ((4916, 4938), 'os.makedirs', 'os.makedirs', (['"""./plots"""'], {}), "('./plots')\n", (4927, 4938), False, 'import os\n'), ((3804, 3905), 'matplotlib.pyplot.plot', 'plt.plot', (['[positions[0], positions[1]]', '[x[positions[0]], y[positions[1]]]', '"""--"""'], {'color': '"""#818587"""'}), "([positions[0], positions[1]], [x[positions[0]], y[positions[1]]],\n '--', color='#818587')\n", (3812, 3905), True, 'import matplotlib.pyplot as plt\n'), ((5235, 5262), 'os.path.exists', 'os.path.exists', (['metric_path'], {}), '(metric_path)\n', (5249, 5262), False, 'import os\n'), ((5276, 5300), 'os.makedirs', 'os.makedirs', (['metric_path'], {}), '(metric_path)\n', (5287, 5300), False, 'import os\n'), ((1903, 1946), 'sktime.distances._distance.pairwise_distance', 'pairwise_distance', (['x', 'y'], {'metric': '"""euclidean"""'}), "(x, y, metric='euclidean')\n", (1920, 1946), False, 'from sktime.distances._distance import distance_alignment_path, pairwise_distance\n'), ((1977, 2003), 'numpy.zeros_like', 'np.zeros_like', (['cost_matrix'], {}), '(cost_matrix)\n', (1990, 2003), True, 'import numpy as np\n'), ((2084, 2125), 'sktime.distances._distance.pairwise_distance', 'pairwise_distance', (['x', 'y'], {'metric': '"""squared"""'}), "(x, y, metric='squared')\n", (2101, 2125), False, 'from sktime.distances._distance import distance_alignment_path, pairwise_distance\n')]
|
import numpy as np
import pytest
from src.models.noise_transformation import average_true_var_real, average_true_var_imag, average_true_cov, \
average_true_noise_covariance, naive_noise_covariance
test_cases_real_variance = [
(2 - 3j, 0, 0, 0),
(0, 1, 1, np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
(2j, 1, 1, 4 * np.exp(-2) * (np.sinh(2) - np.sinh(1)) + np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
(-2j, 1, 1, 4 * np.exp(-2) * (np.sinh(2) - np.sinh(1)) + np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
]
test_cases_imag_variance = [
(4 - 3j, 0, 0, 0),
(0, 1, 1, np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))),
(2j, 1, 1, 4 * np.exp(-2) * (np.cosh(2) - np.cosh(1)) + np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
(-2j, 1, 1, 4 * np.exp(-2) * (np.cosh(2) - np.cosh(1)) + np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))),
]
test_cases_covariance = [
(4 - 3j, 0, 0, 0),
(0, 1, 1, 0),
(2j, 1, 1, 0),
(-2j, 1, 1, 0),
(np.sqrt(2) * (1 + 1j), 1, 1, 0.5 * np.exp(-4) * (1 + 5 * (1 - np.exp(1)))),
]
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_real_variance)
def test_variance_of_real_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_var_real(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected)
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_imag_variance)
def test_variance_of_imag_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_var_imag(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected)
@pytest.mark.parametrize("m,sd_magnitude,sd_phase,expected", test_cases_covariance)
def test_covariance_of_noise(m, sd_magnitude, sd_phase, expected):
res = average_true_cov(m, sd_magnitude, sd_phase)
np.testing.assert_allclose(res, expected, rtol=0, atol=1e-10)
def test_cartesian_noise_covariance_matrix():
sd_magnitude = 1
sd_phase = 1
measurement = np.zeros(2)
res = average_true_noise_covariance(measurement, sd_magnitude, sd_phase)
expected = np.diag(
[np.exp(-2) * (2 * np.cosh(2) - np.cosh(1))] * 2 + [np.exp(-2) * (2 * np.sinh(2) - np.sinh(1))] * 2)
np.testing.assert_allclose(res.todense(), expected)
def test_naive_covariance_matrix():
sd_magnitude = 1
sd_phase = 1
measurement = np.array([0, 1j])
expected = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
], dtype=np.float)
res = naive_noise_covariance(measurement, sd_magnitude, sd_phase)
np.testing.assert_allclose(res.todense(), expected, rtol=0, atol=1e-10)
|
[
"src.models.noise_transformation.naive_noise_covariance",
"numpy.sqrt",
"src.models.noise_transformation.average_true_var_imag",
"numpy.testing.assert_allclose",
"src.models.noise_transformation.average_true_cov",
"numpy.sinh",
"numpy.exp",
"pytest.mark.parametrize",
"numpy.zeros",
"src.models.noise_transformation.average_true_var_real",
"numpy.array",
"numpy.cosh",
"src.models.noise_transformation.average_true_noise_covariance"
] |
[((1045, 1134), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m,sd_magnitude,sd_phase,expected"""', 'test_cases_real_variance'], {}), "('m,sd_magnitude,sd_phase,expected',\n test_cases_real_variance)\n", (1068, 1134), False, 'import pytest\n'), ((1309, 1398), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m,sd_magnitude,sd_phase,expected"""', 'test_cases_imag_variance'], {}), "('m,sd_magnitude,sd_phase,expected',\n test_cases_imag_variance)\n", (1332, 1398), False, 'import pytest\n'), ((1573, 1659), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""m,sd_magnitude,sd_phase,expected"""', 'test_cases_covariance'], {}), "('m,sd_magnitude,sd_phase,expected',\n test_cases_covariance)\n", (1596, 1659), False, 'import pytest\n'), ((1211, 1259), 'src.models.noise_transformation.average_true_var_real', 'average_true_var_real', (['m', 'sd_magnitude', 'sd_phase'], {}), '(m, sd_magnitude, sd_phase)\n', (1232, 1259), False, 'from src.models.noise_transformation import average_true_var_real, average_true_var_imag, average_true_cov, average_true_noise_covariance, naive_noise_covariance\n'), ((1264, 1305), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'expected'], {}), '(res, expected)\n', (1290, 1305), True, 'import numpy as np\n'), ((1475, 1523), 'src.models.noise_transformation.average_true_var_imag', 'average_true_var_imag', (['m', 'sd_magnitude', 'sd_phase'], {}), '(m, sd_magnitude, sd_phase)\n', (1496, 1523), False, 'from src.models.noise_transformation import average_true_var_real, average_true_var_imag, average_true_cov, average_true_noise_covariance, naive_noise_covariance\n'), ((1528, 1569), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'expected'], {}), '(res, expected)\n', (1554, 1569), True, 'import numpy as np\n'), ((1733, 1776), 'src.models.noise_transformation.average_true_cov', 'average_true_cov', (['m', 'sd_magnitude', 'sd_phase'], {}), '(m, sd_magnitude, sd_phase)\n', (1749, 1776), False, 'from src.models.noise_transformation import average_true_var_real, average_true_var_imag, average_true_cov, average_true_noise_covariance, naive_noise_covariance\n'), ((1781, 1842), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'expected'], {'rtol': '(0)', 'atol': '(1e-10)'}), '(res, expected, rtol=0, atol=1e-10)\n', (1807, 1842), True, 'import numpy as np\n'), ((1947, 1958), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1955, 1958), True, 'import numpy as np\n'), ((1969, 2035), 'src.models.noise_transformation.average_true_noise_covariance', 'average_true_noise_covariance', (['measurement', 'sd_magnitude', 'sd_phase'], {}), '(measurement, sd_magnitude, sd_phase)\n', (1998, 2035), False, 'from src.models.noise_transformation import average_true_var_real, average_true_var_imag, average_true_cov, average_true_noise_covariance, naive_noise_covariance\n'), ((2319, 2338), 'numpy.array', 'np.array', (['[0, 1.0j]'], {}), '([0, 1.0j])\n', (2327, 2338), True, 'import numpy as np\n'), ((2352, 2439), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]]'], {'dtype': 'np.float'}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]], dtype=np\n .float)\n', (2360, 2439), True, 'import numpy as np\n'), ((2484, 2543), 'src.models.noise_transformation.naive_noise_covariance', 'naive_noise_covariance', (['measurement', 'sd_magnitude', 'sd_phase'], {}), '(measurement, sd_magnitude, sd_phase)\n', (2506, 2543), False, 'from src.models.noise_transformation import average_true_var_real, average_true_var_imag, average_true_cov, average_true_noise_covariance, naive_noise_covariance\n'), ((269, 279), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (275, 279), True, 'import numpy as np\n'), ((594, 604), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (600, 604), True, 'import numpy as np\n'), ((964, 974), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (971, 974), True, 'import numpy as np\n'), ((300, 310), 'numpy.cosh', 'np.cosh', (['(1)'], {}), '(1)\n', (307, 310), True, 'import numpy as np\n'), ((374, 384), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (380, 384), True, 'import numpy as np\n'), ((480, 490), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (486, 490), True, 'import numpy as np\n'), ((625, 635), 'numpy.sinh', 'np.sinh', (['(1)'], {}), '(1)\n', (632, 635), True, 'import numpy as np\n'), ((699, 709), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (705, 709), True, 'import numpy as np\n'), ((805, 815), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (811, 815), True, 'import numpy as np\n'), ((999, 1009), 'numpy.exp', 'np.exp', (['(-4)'], {}), '(-4)\n', (1005, 1009), True, 'import numpy as np\n'), ((287, 297), 'numpy.cosh', 'np.cosh', (['(2)'], {}), '(2)\n', (294, 297), True, 'import numpy as np\n'), ((333, 343), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (339, 343), True, 'import numpy as np\n'), ((347, 357), 'numpy.sinh', 'np.sinh', (['(2)'], {}), '(2)\n', (354, 357), True, 'import numpy as np\n'), ((360, 370), 'numpy.sinh', 'np.sinh', (['(1)'], {}), '(1)\n', (367, 370), True, 'import numpy as np\n'), ((405, 415), 'numpy.sinh', 'np.sinh', (['(1)'], {}), '(1)\n', (412, 415), True, 'import numpy as np\n'), ((439, 449), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (445, 449), True, 'import numpy as np\n'), ((453, 463), 'numpy.sinh', 'np.sinh', (['(2)'], {}), '(2)\n', (460, 463), True, 'import numpy as np\n'), ((466, 476), 'numpy.sinh', 'np.sinh', (['(1)'], {}), '(1)\n', (473, 476), True, 'import numpy as np\n'), ((511, 521), 'numpy.sinh', 'np.sinh', (['(1)'], {}), '(1)\n', (518, 521), True, 'import numpy as np\n'), ((612, 622), 'numpy.sinh', 'np.sinh', (['(2)'], {}), '(2)\n', (619, 622), True, 'import numpy as np\n'), ((658, 668), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (664, 668), True, 'import numpy as np\n'), ((672, 682), 'numpy.cosh', 'np.cosh', (['(2)'], {}), '(2)\n', (679, 682), True, 'import numpy as np\n'), ((685, 695), 'numpy.cosh', 'np.cosh', (['(1)'], {}), '(1)\n', (692, 695), True, 'import numpy as np\n'), ((730, 740), 'numpy.cosh', 'np.cosh', (['(1)'], {}), '(1)\n', (737, 740), True, 'import numpy as np\n'), ((764, 774), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (770, 774), True, 'import numpy as np\n'), ((778, 788), 'numpy.cosh', 'np.cosh', (['(2)'], {}), '(2)\n', (785, 788), True, 'import numpy as np\n'), ((791, 801), 'numpy.cosh', 'np.cosh', (['(1)'], {}), '(1)\n', (798, 801), True, 'import numpy as np\n'), ((836, 846), 'numpy.cosh', 'np.cosh', (['(1)'], {}), '(1)\n', (843, 846), True, 'import numpy as np\n'), ((392, 402), 'numpy.sinh', 'np.sinh', (['(2)'], {}), '(2)\n', (399, 402), True, 'import numpy as np\n'), ((498, 508), 'numpy.sinh', 'np.sinh', (['(2)'], {}), '(2)\n', (505, 508), True, 'import numpy as np\n'), ((717, 727), 'numpy.cosh', 'np.cosh', (['(2)'], {}), '(2)\n', (724, 727), True, 'import numpy as np\n'), ((823, 833), 'numpy.cosh', 'np.cosh', (['(2)'], {}), '(2)\n', (830, 833), True, 'import numpy as np\n'), ((1026, 1035), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (1032, 1035), True, 'import numpy as np\n'), ((2069, 2079), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (2075, 2079), True, 'import numpy as np\n'), ((2120, 2130), 'numpy.exp', 'np.exp', (['(-2)'], {}), '(-2)\n', (2126, 2130), True, 'import numpy as np\n'), ((2100, 2110), 'numpy.cosh', 'np.cosh', (['(1)'], {}), '(1)\n', (2107, 2110), True, 'import numpy as np\n'), ((2151, 2161), 'numpy.sinh', 'np.sinh', (['(1)'], {}), '(1)\n', (2158, 2161), True, 'import numpy as np\n'), ((2087, 2097), 'numpy.cosh', 'np.cosh', (['(2)'], {}), '(2)\n', (2094, 2097), True, 'import numpy as np\n'), ((2138, 2148), 'numpy.sinh', 'np.sinh', (['(2)'], {}), '(2)\n', (2145, 2148), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import os
import sys
# ensure `tests` directory path is on top of Python's module search
filedir = os.path.dirname(__file__)
sys.path.insert(0, filedir)
while filedir in sys.path[1:]:
sys.path.pop(sys.path.index(filedir)) # avoid duplication
import pytest
import numpy as np
from copy import deepcopy
from backend import K, AE_CONFIGS, BASEDIR, tempdir, notify, make_autoencoder
from backend import _init_session, _do_test_load, _get_test_names
from deeptrain.util.preprocessors import Preprocessor
from deeptrain.metrics import _standardize, _weighted_loss
#### CONFIGURE TESTING #######################################################
batch_size = 128
width, height = 28, 28
channels = 1
datadir = os.path.join(BASEDIR, 'tests', 'data', 'image')
tests_done = {}
CONFIGS = deepcopy(AE_CONFIGS)
CONFIGS['model']['batch_shape'] = (batch_size, width, height, channels)
CONFIGS['datagen']['batch_size'] = batch_size
CONFIGS['val_datagen']['batch_size'] = batch_size
def init_session(C, weights_path=None, loadpath=None, model=None):
return _init_session(C, weights_path=weights_path, loadpath=loadpath,
model=model, model_fn=make_autoencoder)
def mean_L_error(y_true, y_pred, sample_weight=1):
L = 1.5 # configurable
y_true, y_pred, sample_weight = _standardize(y_true, y_pred,
sample_weight)
return _weighted_loss(np.mean(np.abs(y_true - y_pred) ** L, axis=-1),
sample_weight)
def mLe(y_true, y_pred):
L = 1.5 # configurable
return K.mean(K.pow(K.abs(y_true - y_pred), L), axis=-1)
def numpy_loader(self, set_num):
# allow_pickle is irrelevant here, just for demo
return np.load(self._path(set_num), allow_pickle=True)
class RandCropPreprocessor(Preprocessor):
"""2D random crop. MNIST is 28x28, we try 25x25 crops,
e.g. batch[2:27, 3:28]."""
def __init__(self, size, crop_batch=True, crop_labels=False,
crop_same=False):
# length -> (length, length)
# (width, height) -> (width, height)
assert isinstance(size, (tuple, int))
self.size = size if isinstance(size, tuple) else (size, size)
self.crop_batch = crop_batch
self.crop_labels = crop_labels
self.crop_same = crop_same
def process(self, batch, labels):
if self.crop_batch:
(x_start, x_end), (y_start, y_end) = self._make_crop_mask(batch)
batch = batch[:, x_start:x_end, y_start:y_end]
if self.crop_labels:
if not self.crop_same or not self.crop_batch:
(x_start, x_end), (y_start, y_end
) = self._make_crop_mask(labels)
labels = labels[:, x_start:x_end, y_start:y_end]
return batch, labels
def _make_crop_mask(self, data):
_, w, h, *_ = data.shape # (samples, width, height, channels)
x_offset = np.random.randint(0, w - self.size[0])
y_offset = np.random.randint(0, h - self.size[1])
x_start, x_end = x_offset, x_offset + self.size[0]
y_start, y_end = y_offset, y_offset + self.size[1]
return (x_start, x_end), (y_start, y_end)
##############################################################################
@notify(tests_done)
def test_main():
C = deepcopy(AE_CONFIGS)
C['model' ].update({'loss': mLe,
'batch_shape': (128, 24, 24, 1)})
C['datagen' ].update({'data_loader': numpy_loader,
'preprocessor': RandCropPreprocessor(size=24)})
C['val_datagen'].update({'data_loader': numpy_loader,
'preprocessor': RandCropPreprocessor(size=24)})
C['traingen']['custom_metrics'] = {'mLe': mean_L_error}
with tempdir(C['traingen']['logs_dir']), \
tempdir(C['traingen']['best_models_dir']):
tg = init_session(C)
tg.train()
_do_test_load(tg, C, init_session)
##############################################################################
tests_done.update({name: None for name in _get_test_names(__name__)})
if __name__ == '__main__':
pytest.main([__file__, "-s"])
|
[
"numpy.abs",
"sys.path.insert",
"backend.notify",
"backend.tempdir",
"os.path.join",
"backend._do_test_load",
"pytest.main",
"os.path.dirname",
"numpy.random.randint",
"sys.path.index",
"backend._get_test_names",
"copy.deepcopy",
"backend.K.abs",
"deeptrain.metrics._standardize",
"backend._init_session"
] |
[((123, 148), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (138, 148), False, 'import os\n'), ((149, 176), 'sys.path.insert', 'sys.path.insert', (['(0)', 'filedir'], {}), '(0, filedir)\n', (164, 176), False, 'import sys\n'), ((734, 781), 'os.path.join', 'os.path.join', (['BASEDIR', '"""tests"""', '"""data"""', '"""image"""'], {}), "(BASEDIR, 'tests', 'data', 'image')\n", (746, 781), False, 'import os\n'), ((809, 829), 'copy.deepcopy', 'deepcopy', (['AE_CONFIGS'], {}), '(AE_CONFIGS)\n', (817, 829), False, 'from copy import deepcopy\n'), ((3320, 3338), 'backend.notify', 'notify', (['tests_done'], {}), '(tests_done)\n', (3326, 3338), False, 'from backend import K, AE_CONFIGS, BASEDIR, tempdir, notify, make_autoencoder\n'), ((1078, 1184), 'backend._init_session', '_init_session', (['C'], {'weights_path': 'weights_path', 'loadpath': 'loadpath', 'model': 'model', 'model_fn': 'make_autoencoder'}), '(C, weights_path=weights_path, loadpath=loadpath, model=model,\n model_fn=make_autoencoder)\n', (1091, 1184), False, 'from backend import _init_session, _do_test_load, _get_test_names\n'), ((1323, 1366), 'deeptrain.metrics._standardize', '_standardize', (['y_true', 'y_pred', 'sample_weight'], {}), '(y_true, y_pred, sample_weight)\n', (1335, 1366), False, 'from deeptrain.metrics import _standardize, _weighted_loss\n'), ((3364, 3384), 'copy.deepcopy', 'deepcopy', (['AE_CONFIGS'], {}), '(AE_CONFIGS)\n', (3372, 3384), False, 'from copy import deepcopy\n'), ((4193, 4222), 'pytest.main', 'pytest.main', (["[__file__, '-s']"], {}), "([__file__, '-s'])\n", (4204, 4222), False, 'import pytest\n'), ((225, 248), 'sys.path.index', 'sys.path.index', (['filedir'], {}), '(filedir)\n', (239, 248), False, 'import sys\n'), ((2973, 3011), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - self.size[0])'], {}), '(0, w - self.size[0])\n', (2990, 3011), True, 'import numpy as np\n'), ((3031, 3069), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - self.size[1])'], {}), '(0, h - self.size[1])\n', (3048, 3069), True, 'import numpy as np\n'), ((3830, 3864), 'backend.tempdir', 'tempdir', (["C['traingen']['logs_dir']"], {}), "(C['traingen']['logs_dir'])\n", (3837, 3864), False, 'from backend import K, AE_CONFIGS, BASEDIR, tempdir, notify, make_autoencoder\n'), ((3876, 3917), 'backend.tempdir', 'tempdir', (["C['traingen']['best_models_dir']"], {}), "(C['traingen']['best_models_dir'])\n", (3883, 3917), False, 'from backend import K, AE_CONFIGS, BASEDIR, tempdir, notify, make_autoencoder\n'), ((3975, 4009), 'backend._do_test_load', '_do_test_load', (['tg', 'C', 'init_session'], {}), '(tg, C, init_session)\n', (3988, 4009), False, 'from backend import _init_session, _do_test_load, _get_test_names\n'), ((1610, 1632), 'backend.K.abs', 'K.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (1615, 1632), False, 'from backend import K, AE_CONFIGS, BASEDIR, tempdir, notify, make_autoencoder\n'), ((4133, 4158), 'backend._get_test_names', '_get_test_names', (['__name__'], {}), '(__name__)\n', (4148, 4158), False, 'from backend import _init_session, _do_test_load, _get_test_names\n'), ((1450, 1473), 'numpy.abs', 'np.abs', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (1456, 1473), True, 'import numpy as np\n')]
|
"""
Main call.
TODO:
- parallize the mda processing portion? (dask)
"""
import numpy as np
import matplotlib.pyplot as plt
import MDAnalysis as mda
from command_line import create_cmd_arguments, handle_command_line
from calc_relax import Calc_19F_Relaxation
from calc_fh_dists import Calc_FH_Dists
from plot_relax import Plot_Relaxation
# if python file is being used
if __name__ == '__main__':
# args_list to save time for now (TODO)
magnet = 14.1 # Tesla (600 MHz of 1H+)
tc = 8.2e-9 # 8.2ns for CypA, tc in sec
"""
Command line
"""
# Create command line arguments with argparse
argument_parser = create_cmd_arguments()
# Retrieve list of args
args = handle_command_line(argument_parser)
# TODO: hack for now, later put as seperate args?
# CSA tensors for 4F-Trp
if args.system == "w4f":
sgm11 = 11.2
sgm22 = -48.3
sgm33 = -112.8
elif args.system == "w5f":
sgm11 = 4.8
sgm22 = -60.5
sgm33 = -86.1
elif args.system == "w6f":
sgm11 = 12.9
sgm22 = -51.2
sgm33 = -91.6
elif args.system == "w7f":
sgm11 = 4.6
sgm22 = -48.3
sgm33 = -123.3
"""
Load trajectory or pdb data and calc all F-H distances.
# TODO: do for each frame, also test with water
"""
# TODO: for big trajectories, can't load in_memory, must stream it but this can be slow
traj = mda.Universe(args.parm, args.crd, in_memory=True, in_memory_step=args.step_size)
fh_dist_base = Calc_FH_Dists(traj, dist=3).run()
"""
For each distance value, calculate the R1 and R2 value.
"""
# TODO: update to ndarrays, maybe make into function, seperate script?
# test speed and optimize
# TODO: make this able to take multiple files and find stdev, maybe a seperate proc function
# array of size frames x 3 columns (frame, avg R1, avg R2) # TODO: add stdev?
r1_r2 = np.zeros(shape=(len(fh_dist_base.results[:,1:]), 3))
r1_r2[:, 0] = fh_dist_base.results[:,0]
# Here: calling each calc class seperately and only sum the dd contributions, csa is not dependent
# note this new implementation is alot slower... (compared to having just one calc_relax and averaging later)
# but not sure, didn't test the difference
for num, dists in enumerate(fh_dist_base.results[:,1:]):
calc_relax = Calc_19F_Relaxation(tc, magnet, sgm11, sgm22, sgm33)
r1_csa = calc_relax.calc_csa_r1()
r2_csa = calc_relax.calc_csa_r2()
# TODO: these are relatively small lists, may not need to change to ndarray
# but if I do, then I need to cut out the NaN or zero values before the np.mean step
r1_dd = 0
r2_dd = 0
for fh_dist in dists:
if fh_dist == 0:
continue # TODO: is there a better way to do this?
# instantiate the calc_relax class and then call individual class methods
calc_relax = Calc_19F_Relaxation(tc, magnet, sgm11, sgm22, sgm33, fh_dist)
# sum each dd contribution
r1_dd += calc_relax.calc_dd_r1()
r2_dd += calc_relax.calc_dd_r2()
# fill in col 1 (R1), col 2 (R2)
r1_r2[num, 1] = r1_dd + r1_csa
r1_r2[num, 2] = r2_dd + r2_csa
# test seperate values
print(r1_dd, r1_csa)
print(r2_dd, r2_csa)
"""
Save the frame, avg and stdev R1 and R2 data as a tsv?
"""
if args.output_file is not None:
np.savetxt(args.output_file, r1_r2, delimiter="\t")
"""
Plot the R1 and R2 data.
"""
# plt.plot(fh_dist_base.results[:,0], r1_r2[:,0])
# plt.plot(fh_dist_base.results[:,0], r1_r2[:,1])
plt.plot(r1_r2[:, 0], r1_r2[:, 1])
plt.plot(r1_r2[:, 0], r1_r2[:, 2])
print(f"R1-AVG={np.mean(r1_r2[:,1])}\nR2-AVG={np.mean(r1_r2[:,2])}")
#plt.hlines(1.99, xmin=0, xmax=fh_dist_base.results[-1,0]) # R1
#plt.hlines(109.1, xmin=0, xmax=fh_dist_base.results[-1,0]) # R2
plt.show()
# plotter class
# plotter = Plot_Relaxation(r1_r2, "dist")
# plotter.plot_r2()
# plt.show()
|
[
"numpy.mean",
"calc_fh_dists.Calc_FH_Dists",
"calc_relax.Calc_19F_Relaxation",
"command_line.handle_command_line",
"matplotlib.pyplot.plot",
"command_line.create_cmd_arguments",
"numpy.savetxt",
"MDAnalysis.Universe",
"matplotlib.pyplot.show"
] |
[((681, 703), 'command_line.create_cmd_arguments', 'create_cmd_arguments', ([], {}), '()\n', (701, 703), False, 'from command_line import create_cmd_arguments, handle_command_line\n'), ((743, 779), 'command_line.handle_command_line', 'handle_command_line', (['argument_parser'], {}), '(argument_parser)\n', (762, 779), False, 'from command_line import create_cmd_arguments, handle_command_line\n'), ((1482, 1567), 'MDAnalysis.Universe', 'mda.Universe', (['args.parm', 'args.crd'], {'in_memory': '(True)', 'in_memory_step': 'args.step_size'}), '(args.parm, args.crd, in_memory=True, in_memory_step=args.step_size\n )\n', (1494, 1567), True, 'import MDAnalysis as mda\n'), ((3759, 3793), 'matplotlib.pyplot.plot', 'plt.plot', (['r1_r2[:, 0]', 'r1_r2[:, 1]'], {}), '(r1_r2[:, 0], r1_r2[:, 1])\n', (3767, 3793), True, 'import matplotlib.pyplot as plt\n'), ((3798, 3832), 'matplotlib.pyplot.plot', 'plt.plot', (['r1_r2[:, 0]', 'r1_r2[:, 2]'], {}), '(r1_r2[:, 0], r1_r2[:, 2])\n', (3806, 3832), True, 'import matplotlib.pyplot as plt\n'), ((4052, 4062), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4060, 4062), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2496), 'calc_relax.Calc_19F_Relaxation', 'Calc_19F_Relaxation', (['tc', 'magnet', 'sgm11', 'sgm22', 'sgm33'], {}), '(tc, magnet, sgm11, sgm22, sgm33)\n', (2463, 2496), False, 'from calc_relax import Calc_19F_Relaxation\n'), ((3549, 3600), 'numpy.savetxt', 'np.savetxt', (['args.output_file', 'r1_r2'], {'delimiter': '"""\t"""'}), "(args.output_file, r1_r2, delimiter='\\t')\n", (3559, 3600), True, 'import numpy as np\n'), ((1582, 1609), 'calc_fh_dists.Calc_FH_Dists', 'Calc_FH_Dists', (['traj'], {'dist': '(3)'}), '(traj, dist=3)\n', (1595, 1609), False, 'from calc_fh_dists import Calc_FH_Dists\n'), ((3035, 3096), 'calc_relax.Calc_19F_Relaxation', 'Calc_19F_Relaxation', (['tc', 'magnet', 'sgm11', 'sgm22', 'sgm33', 'fh_dist'], {}), '(tc, magnet, sgm11, sgm22, sgm33, fh_dist)\n', (3054, 3096), False, 'from calc_relax import Calc_19F_Relaxation\n'), ((3853, 3873), 'numpy.mean', 'np.mean', (['r1_r2[:, 1]'], {}), '(r1_r2[:, 1])\n', (3860, 3873), True, 'import numpy as np\n'), ((3883, 3903), 'numpy.mean', 'np.mean', (['r1_r2[:, 2]'], {}), '(r1_r2[:, 2])\n', (3890, 3903), True, 'import numpy as np\n')]
|
import numpy as np
class BoundBox:
"""
Adopted from https://github.com/thtrieu/darkflow/blob/master/darkflow/utils/box.py
"""
def __init__(self, obj_prob, probs=None, box_coord=[float() for i in range(4)]):
self.x, self.y = float(box_coord[0]), float(box_coord[1])
self.w, self.h = float(box_coord[2]), float(box_coord[3])
self.c = 0.
self.obj_prob = obj_prob
self.class_probs = None if probs is None else np.array(probs)
def get_score(self):
return max(self.class_probs)
def get_classindex(self):
return np.argmax(self.class_probs) # class_index = np.argmax(box.classes)
def get_coordinates(self):
return self.x, self.y, self.w, self.h
def overlap(x1, w1, x2, w2):
l1 = x1 - w1 / 2.
l2 = x2 - w2 / 2.
left = max(l1, l2)
r1 = x1 + w1 / 2.
r2 = x2 + w2 / 2.
right = min(r1, r2)
return right - left
def box_intersection(a, b):
w = overlap(a.x, a.w, b.x, b.w)
h = overlap(a.y, a.h, b.y, b.h)
if w < 0 or h < 0: return 0;
area = w * h
return area
def box_union(a, b):
i = box_intersection(a, b)
u = a.w * a.h + b.w * b.h - i
return u
def box_iou(a, b):
# Box intersect over union.
return box_intersection(a, b) / box_union(a, b)
def prob_compare(box):
return box.probs[box.class_num]
def prob_compare2(boxa, boxb):
if (boxa.pi < boxb.pi):
return 1
elif (boxa.pi == boxb.pi):
return 0
else:
return -1
|
[
"numpy.array",
"numpy.argmax"
] |
[((594, 621), 'numpy.argmax', 'np.argmax', (['self.class_probs'], {}), '(self.class_probs)\n', (603, 621), True, 'import numpy as np\n'), ((469, 484), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (477, 484), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import click
import os
PRIORITY = ('Read-through', 'Protein coding',
'Pseudogene', 'TUCP', 'lncrna', 'lncRNA', 'other', 'ncRNA,other')
type_map = {
'other': 'lncRNA',
'ncRNA,other': 'lncRNA',
'lncrna': 'lncRNA',
'protein_coding': 'Protein coding',
'pseudogene': 'Pseudogene',
'read_through': 'Read-through'
}
@click.command()
@click.option(
'-m',
'--meta_table',
type=click.Path(exists=True, dir_okay=False),
help='taco compare metadata',
required=True,
)
@click.option(
'-t',
'--tucp',
type=click.Path(exists=True, dir_okay=False),
help='tucp transcripts.',
required=True,
)
@click.option(
'-o',
'--out_dir',
type=click.Path(file_okay=False),
help='gene classify/summary directory based on \
taco compare result and feelnc classify.',
required=True
)
@click.option(
'-n',
'--name',
type=click.STRING,
help='Summary table name',
default=None
)
def main(meta_table, tucp, out_dir, name):
meta_table_df = pd.read_table(meta_table, index_col=0)
tucp_df = pd.read_table(tucp, header=None, index_col=0)
tucp_series = tucp_df.index.intersection(meta_table_df.index)
# label TUCP
meta_table_df.loc[tucp_series, 'category'] = 'TUCP'
# label read_through
mask = meta_table_df.category_relative_detail == 'read_through'
meta_table_df.loc[mask, 'category'] = 'read_through'
# filter out intronic transcripts
meta_table_df = meta_table_df[meta_table_df.category_relative_detail !=
'intronic_same_strand']
# rename gene type to analysis name
meta_table_df.loc[:, 'category'].replace(type_map, inplace=True)
# function to summarize transcript/gene type
def type_summary(type_df, col_name):
type_df.columns = ['category', 'novel_status']
type_summary = type_df.groupby(
['category', 'novel_status']).size()
type_summary.name = col_name
type_summary = pd.DataFrame(type_summary)
f_sum = type_summary.unstack('novel_status', fill_value=0)
f_sum.loc[:, (col_name, 'Total')] = (
f_sum.loc[:, (col_name, 'Annotated')] +
f_sum.loc[:, (col_name, 'Unannotated')])
return f_sum
# annotation status according to exonic_overlap
meta_table_df.loc[:, 'novel_status'] = np.where(
meta_table_df.category_relative == 'exonic_overlap',
'Annotated', 'Unannotated')
meta_table_df = meta_table_df.reset_index()
tr_sum = type_summary(meta_table_df.loc[:, ['category', 'novel_status']],
'Transcript')
meta_table_df.loc[:, 'new_gene_id'] = meta_table_df.novel_status + \
'.' + meta_table_df.gene_id
tr_type_df = meta_table_df.loc[:, ['transcript_id', 'new_gene_id', 'category']]
meta_table_type_df = meta_table_df.loc[:, [
'new_gene_id', 'category', 'novel_status']]
meta_table_type_df.columns = ['gene_id', 'category', 'novel_status']
gene_type_map = meta_table_type_df.groupby(
['gene_id', 'novel_status'])['category'].unique()
meta_table_df = meta_table_df.reset_index()
gene_name_df = meta_table_df.loc[:, ['new_gene_id',
'category_relative',
'ref_gene_id',
'ref_gene_name']]
gene_name_df.columns = [
'gene_id', 'category_relative', 'ref_gene_id', 'ref_gene_name']
gene_name_df = gene_name_df[gene_name_df.category_relative ==
'exonic_overlap']
gene_name_df = gene_name_df.loc[:, [
'gene_id', 'ref_gene_id', 'ref_gene_name']].drop_duplicates()
def get_type(type_list):
for each_type in PRIORITY:
if each_type in type_list:
return type_map.get(each_type, each_type)
gene_type_list = map(get_type, gene_type_map)
gene_type_df = pd.DataFrame(
gene_type_list, index=gene_type_map.index, columns=['type'])
gene_type_df = gene_type_df.reset_index().set_index('gene_id')
read_through_genes = gene_type_df[gene_type_df.type ==
"Read-through"].index
gene_name_df = gene_name_df[~gene_name_df.gene_id.isin(read_through_genes)]
gene_name_df = gene_name_df.set_index('gene_id')
read_through_sup = gene_name_df[
gene_name_df.index.value_counts() > 1].index.unique()
gene_type_df.loc[read_through_sup, 'type'] = 'Read-through'
g_sum = type_summary(gene_type_df.loc[:, ['type', 'novel_status']],
'Gene')
type_stats = pd.concat([tr_sum, g_sum], axis=1)
type_stats.index.name = 'Category'
summary_file = os.path.join(out_dir, 'assembly.number.summary.txt')
classify_file = os.path.join(out_dir, 'gene.classify.txt')
tr_classify_file = os.path.join(out_dir, 'tr.classify.txt')
name_file = os.path.join(out_dir, 'gene.name.txt')
if name is not None:
type_stats.loc[:, ('', 'Name')] = name
output_header = False
else:
output_header = True
gene_type_df = gene_type_df.drop('novel_status', axis=1)
type_stats.to_csv(summary_file, sep='\t', header=output_header)
gene_type_df.to_csv(classify_file, sep='\t')
tr_type_df.to_csv(tr_classify_file, sep='\t', index=False)
gene_name_df = gene_name_df[gene_name_df.index.value_counts() == 1]
gene_name_df.to_csv(name_file, sep='\t')
if __name__ == '__main__':
main()
|
[
"click.option",
"numpy.where",
"os.path.join",
"click.Path",
"pandas.read_table",
"pandas.DataFrame",
"click.command",
"pandas.concat"
] |
[((390, 405), 'click.command', 'click.command', ([], {}), '()\n', (403, 405), False, 'import click\n'), ((897, 989), 'click.option', 'click.option', (['"""-n"""', '"""--name"""'], {'type': 'click.STRING', 'help': '"""Summary table name"""', 'default': 'None'}), "('-n', '--name', type=click.STRING, help='Summary table name',\n default=None)\n", (909, 989), False, 'import click\n'), ((1071, 1109), 'pandas.read_table', 'pd.read_table', (['meta_table'], {'index_col': '(0)'}), '(meta_table, index_col=0)\n', (1084, 1109), True, 'import pandas as pd\n'), ((1124, 1169), 'pandas.read_table', 'pd.read_table', (['tucp'], {'header': 'None', 'index_col': '(0)'}), '(tucp, header=None, index_col=0)\n', (1137, 1169), True, 'import pandas as pd\n'), ((2397, 2490), 'numpy.where', 'np.where', (["(meta_table_df.category_relative == 'exonic_overlap')", '"""Annotated"""', '"""Unannotated"""'], {}), "(meta_table_df.category_relative == 'exonic_overlap', 'Annotated',\n 'Unannotated')\n", (2405, 2490), True, 'import numpy as np\n'), ((3983, 4056), 'pandas.DataFrame', 'pd.DataFrame', (['gene_type_list'], {'index': 'gene_type_map.index', 'columns': "['type']"}), "(gene_type_list, index=gene_type_map.index, columns=['type'])\n", (3995, 4056), True, 'import pandas as pd\n'), ((4670, 4704), 'pandas.concat', 'pd.concat', (['[tr_sum, g_sum]'], {'axis': '(1)'}), '([tr_sum, g_sum], axis=1)\n', (4679, 4704), True, 'import pandas as pd\n'), ((4763, 4815), 'os.path.join', 'os.path.join', (['out_dir', '"""assembly.number.summary.txt"""'], {}), "(out_dir, 'assembly.number.summary.txt')\n", (4775, 4815), False, 'import os\n'), ((4836, 4878), 'os.path.join', 'os.path.join', (['out_dir', '"""gene.classify.txt"""'], {}), "(out_dir, 'gene.classify.txt')\n", (4848, 4878), False, 'import os\n'), ((4902, 4942), 'os.path.join', 'os.path.join', (['out_dir', '"""tr.classify.txt"""'], {}), "(out_dir, 'tr.classify.txt')\n", (4914, 4942), False, 'import os\n'), ((4959, 4997), 'os.path.join', 'os.path.join', (['out_dir', '"""gene.name.txt"""'], {}), "(out_dir, 'gene.name.txt')\n", (4971, 4997), False, 'import os\n'), ((2035, 2061), 'pandas.DataFrame', 'pd.DataFrame', (['type_summary'], {}), '(type_summary)\n', (2047, 2061), True, 'import pandas as pd\n'), ((460, 499), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (470, 499), False, 'import click\n'), ((604, 643), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'dir_okay': '(False)'}), '(exists=True, dir_okay=False)\n', (614, 643), False, 'import click\n'), ((747, 774), 'click.Path', 'click.Path', ([], {'file_okay': '(False)'}), '(file_okay=False)\n', (757, 774), False, 'import click\n')]
|
#!/usr/bin/env python
"""Module for global fitting titrations (pH and cl) on 2 datasets
"""
import os
import sys
import argparse
import numpy as np
from lmfit import Parameters, Minimizer, minimize, conf_interval, report_fit
import pandas as pd
import matplotlib.pyplot as plt
# from scipy import optimize
def ci_report(ci):
"""return text of a report for confidence intervals"""
maxlen = max([len(i) for i in ci])
buff = []
add = buff.append
convp = lambda x: ("%.2f" % (x[0]*100))+'%'
# I modified "%.5f"
conv = lambda x: "%.6G" % x[1]
title_shown = False
for name, row in ci.items():
if not title_shown:
add("".join([''.rjust(maxlen)] +
[i.rjust(10) for i in map(convp, row)]))
title_shown = True
add("".join([name.rjust(maxlen)] +
[i.rjust(10) for i in map(conv, row)]))
return '\n'.join(buff)
def residual(pars, x, data=None, titration_type=None):
"""residual function for lmfit
Parameters
----------
pars: lmfit Parameters()
x : list of x vectors
data : list of y vectors
Return
------
a vector for the residues (yfit - data)
or the fitted values
"""
vals = pars.valuesdict()
SA1 = vals['SA1']
SB1 = vals['SB1']
K = vals['K']
SA2 = vals['SA2']
SB2 = vals['SB2']
if titration_type == 'pH':
model1 = (SB1 + SA1 * 10 ** (K - x[0])) / (1 + 10 ** (K - x[0]))
model2 = (SB2 + SA2 * 10 ** (K - x[1])) / (1 + 10 ** (K - x[1]))
elif titration_type == 'cl':
model1 = (SA1 + SB1 * x[0] / K) / (1 + x[0] / K)
model2 = (SA2 + SB2 * x[1] / K) / (1 + x[1] / K)
else:
print('Error: residual call must indicate a titration type')
sys.exit()
if data is None:
return np.r_[model1, model2]
return np.r_[model1 - data[0], model2 - data[1]]
def main():
description = "Fit a pH or Cl titration file: x y1 y2"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('file',
help='the file <x y1 y2> without heads')
parser.add_argument('out_folder',
help='The folder to output the .txt and .png files')
parser.add_argument('-t', '--titration-of', dest='titration_type',
action="store", default="pH", choices=["pH", "cl"],
help='Type of titration, pH or cl')
parser.add_argument('-v', '--verbose', action='store_true',
help='Printout runtime information.increase verbosity')
parser.add_argument('--boot', dest='nboot', type=int,
help='bootstraping using <n> iterations')
args = parser.parse_args()
ttype = args.titration_type
#df = pd.read_csv(args.file, sep=' ', names=['x', 'y1', 'y2'])
df = pd.read_csv(args.file)
if not os.path.isdir(args.out_folder):
os.makedirs(args.out_folder)
fit_params = Parameters()
fit_params.add('SA1', value=df.y1[df.x == min(df.x)].values[0], min=0)
fit_params.add('SB1', value=df.y1[df.x == max(df.x)].values[0], min=0)
fit_params.add('SA2', value=df.y2[df.x == min(df.x)].values[0], min=0)
fit_params.add('SB2', value=df.y2[df.x == max(df.x)].values[0], min=0)
if args.titration_type == "pH":
fit_params.add('K', value=7, min=4, max=10)
elif args.titration_type == "cl":
fit_params.add('K', value=20, min=0, max=1000)
mini = Minimizer(residual, fit_params, fcn_args=([df.x, df.x],),
fcn_kws={'data': [df.y1, df.y2], 'titration_type': ttype})
res = mini.minimize()
report_fit(fit_params)
ci = conf_interval(mini, res, sigmas=[.674, .95])
print(ci_report(ci))
# plotting
xfit = np.linspace(df.x.min(), df.x.max(), 100)
yfit = residual(fit_params, [xfit, xfit], titration_type=ttype) # kws={}
yfit = yfit.reshape(2, len(yfit) // 2)
plt.plot(df.x, df.y1, 'o', df.x, df.y2, 's', xfit, yfit[0], '-',
xfit, yfit[1], '-')
plt.grid(True)
f_out = os.path.join(args.out_folder, os.path.split(args.file)[1])
plt.savefig(f_out + ".png")
if args.nboot:
bootstrap(df, args.nboot, fit_params, f_out, ttype)
def bootstrap(df, nboot, fit_params, f_out, ttype):
"""Perform bootstrap to estimate parameters variance
Parameters
----------
df : DataFrame
nboot : int
fit_params: lmfit.fit_params
f_out : string
Output
------
print results
plot
"""
import seaborn as sns
n_points = len(df)
kds = []
sa1 = []
sb1 = []
sa2 = []
sb2 = []
for i in range(nboot):
boot_idxs = np.random.randint(0, n_points-1, n_points)
df2 = df.loc[boot_idxs]
df2.reset_index(drop=True, inplace=True)
boot_idxs = np.random.randint(0, n_points-1, n_points)
df3 = df.loc[boot_idxs]
df3.reset_index(drop=True, inplace=True)
try:
res = minimize(residual, fit_params, args=([df2.x, df3.x],),
kws={'data': [df2.y1, df3.y2], 'titration_type': ttype})
kds.append(res.params['K'].value)
sa1.append(res.params['SA1'].value)
sb1.append(res.params['SB1'].value)
sa2.append(res.params['SA2'].value)
sb2.append(res.params['SB2'].value)
except:
print(df2)
print(df3)
dff = pd.DataFrame({'K': kds, 'SA1': sa1, 'SB1': sb1, 'SA2': sa2,
'SB2': sb2})
print("bootstrap: ",
round(dff.K.quantile(.025), 3),
round(dff.K.quantile(.163), 3),
round(dff.K.median(), 3),
round(dff.K.quantile(.837), 3),
round(dff.K.quantile(.975), 3))
sns.set_style('darkgrid')
g = sns.PairGrid(dff)
# g.map_diag(sns.kdeplot, lw=3)
g.map_diag(plt.hist, alpha=0.4)
g.map_upper(plt.scatter, s=9, alpha=0.6)
g.map_lower(sns.kdeplot, cmap="Blues_d")
plt.savefig(f_out + "-bs" + ".png")
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"lmfit.Minimizer",
"argparse.ArgumentParser",
"pandas.read_csv",
"lmfit.conf_interval",
"os.makedirs",
"matplotlib.pyplot.plot",
"os.path.split",
"seaborn.set_style",
"numpy.random.randint",
"os.path.isdir",
"lmfit.report_fit",
"sys.exit",
"pandas.DataFrame",
"seaborn.PairGrid",
"lmfit.Parameters",
"lmfit.minimize"
] |
[((1986, 2034), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (2009, 2034), False, 'import argparse\n'), ((2861, 2883), 'pandas.read_csv', 'pd.read_csv', (['args.file'], {}), '(args.file)\n', (2872, 2883), True, 'import pandas as pd\n'), ((2981, 2993), 'lmfit.Parameters', 'Parameters', ([], {}), '()\n', (2991, 2993), False, 'from lmfit import Parameters, Minimizer, minimize, conf_interval, report_fit\n'), ((3486, 3606), 'lmfit.Minimizer', 'Minimizer', (['residual', 'fit_params'], {'fcn_args': '([df.x, df.x],)', 'fcn_kws': "{'data': [df.y1, df.y2], 'titration_type': ttype}"}), "(residual, fit_params, fcn_args=([df.x, df.x],), fcn_kws={'data':\n [df.y1, df.y2], 'titration_type': ttype})\n", (3495, 3606), False, 'from lmfit import Parameters, Minimizer, minimize, conf_interval, report_fit\n'), ((3652, 3674), 'lmfit.report_fit', 'report_fit', (['fit_params'], {}), '(fit_params)\n', (3662, 3674), False, 'from lmfit import Parameters, Minimizer, minimize, conf_interval, report_fit\n'), ((3684, 3730), 'lmfit.conf_interval', 'conf_interval', (['mini', 'res'], {'sigmas': '[0.674, 0.95]'}), '(mini, res, sigmas=[0.674, 0.95])\n', (3697, 3730), False, 'from lmfit import Parameters, Minimizer, minimize, conf_interval, report_fit\n'), ((3947, 4036), 'matplotlib.pyplot.plot', 'plt.plot', (['df.x', 'df.y1', '"""o"""', 'df.x', 'df.y2', '"""s"""', 'xfit', 'yfit[0]', '"""-"""', 'xfit', 'yfit[1]', '"""-"""'], {}), "(df.x, df.y1, 'o', df.x, df.y2, 's', xfit, yfit[0], '-', xfit, yfit\n [1], '-')\n", (3955, 4036), True, 'import matplotlib.pyplot as plt\n'), ((4049, 4063), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4057, 4063), True, 'import matplotlib.pyplot as plt\n'), ((4139, 4166), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_out + '.png')"], {}), "(f_out + '.png')\n", (4150, 4166), True, 'import matplotlib.pyplot as plt\n'), ((5436, 5508), 'pandas.DataFrame', 'pd.DataFrame', (["{'K': kds, 'SA1': sa1, 'SB1': sb1, 'SA2': sa2, 'SB2': sb2}"], {}), "({'K': kds, 'SA1': sa1, 'SB1': sb1, 'SA2': sa2, 'SB2': sb2})\n", (5448, 5508), True, 'import pandas as pd\n'), ((5766, 5791), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (5779, 5791), True, 'import seaborn as sns\n'), ((5800, 5817), 'seaborn.PairGrid', 'sns.PairGrid', (['dff'], {}), '(dff)\n', (5812, 5817), True, 'import seaborn as sns\n'), ((5984, 6019), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_out + '-bs' + '.png')"], {}), "(f_out + '-bs' + '.png')\n", (5995, 6019), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2925), 'os.path.isdir', 'os.path.isdir', (['args.out_folder'], {}), '(args.out_folder)\n', (2908, 2925), False, 'import os\n'), ((2935, 2963), 'os.makedirs', 'os.makedirs', (['args.out_folder'], {}), '(args.out_folder)\n', (2946, 2963), False, 'import os\n'), ((4693, 4737), 'numpy.random.randint', 'np.random.randint', (['(0)', '(n_points - 1)', 'n_points'], {}), '(0, n_points - 1, n_points)\n', (4710, 4737), True, 'import numpy as np\n'), ((4837, 4881), 'numpy.random.randint', 'np.random.randint', (['(0)', '(n_points - 1)', 'n_points'], {}), '(0, n_points - 1, n_points)\n', (4854, 4881), True, 'import numpy as np\n'), ((1778, 1788), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1786, 1788), False, 'import sys\n'), ((4106, 4130), 'os.path.split', 'os.path.split', (['args.file'], {}), '(args.file)\n', (4119, 4130), False, 'import os\n'), ((4992, 5107), 'lmfit.minimize', 'minimize', (['residual', 'fit_params'], {'args': '([df2.x, df3.x],)', 'kws': "{'data': [df2.y1, df3.y2], 'titration_type': ttype}"}), "(residual, fit_params, args=([df2.x, df3.x],), kws={'data': [df2.y1,\n df3.y2], 'titration_type': ttype})\n", (5000, 5107), False, 'from lmfit import Parameters, Minimizer, minimize, conf_interval, report_fit\n')]
|
#!/usr/bin/env python
from __future__ import division
import numpy as np
from lfd.environment.simulation import DynamicSimulationRobotWorld
from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject
from lfd.environment import environment
from lfd.environment import sim_util
from lfd.demonstration.demonstration import Demonstration
from lfd.registration.registration import TpsRpmRegistrationFactory
from lfd.registration.plotting_openrave import registration_plot_cb
from lfd.transfer.transfer import FingerTrajectoryTransferer
from lfd.transfer.registration_transfer import TwoStepRegistrationAndTrajectoryTransferer
from move_rope import create_augmented_traj, create_rope
def create_rope_demo(env, rope_poss):
rope_sim_obj = create_rope(rope_poss)
env.sim.add_objects([rope_sim_obj])
env.sim.settle()
scene_state = env.observe_scene()
env.sim.remove_objects([rope_sim_obj])
pick_pos = rope_poss[0] + .1 * (rope_poss[1] - rope_poss[0])
drop_pos = rope_poss[3] + .1 * (rope_poss[2] - rope_poss[3]) + np.r_[0, .2, 0]
pick_R = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
drop_R = np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])
move_height = .2
aug_traj = create_augmented_traj(env.sim.robot, pick_pos, drop_pos, pick_R, drop_R, move_height)
demo = Demonstration("rope_demo", scene_state, aug_traj)
return demo
def main():
# define simulation objects
table_height = 0.77
sim_objs = []
sim_objs.append(XmlSimulationObject("robots/pr2-beta-static.zae", dynamic=False))
sim_objs.append(BoxSimulationObject("table", [1, 0, table_height-.1], [.85, .85, .1], dynamic=False))
# initialize simulation world and environment
sim = DynamicSimulationRobotWorld()
sim.add_objects(sim_objs)
sim.create_viewer()
sim.robot.SetDOFValues([0.25], [sim.robot.GetJoint('torso_lift_joint').GetJointIndex()])
sim.robot.SetDOFValues([1.25], [sim.robot.GetJoint('head_tilt_joint').GetJointIndex()]) # move head down so it can see the rope
sim_util.reset_arms_to_side(sim)
env = environment.LfdEnvironment(sim, sim, downsample_size=0.025)
demo_rope_poss = np.array([[.2, -.2, table_height+0.006],
[.8, -.2, table_height+0.006],
[.8, .2, table_height+0.006],
[.2, .2, table_height+0.006]])
demo = create_rope_demo(env, demo_rope_poss)
test_rope_poss = np.array([[.2, -.2, table_height+0.006],
[.5, -.4, table_height+0.006],
[.8, .0, table_height+0.006],
[.8, .2, table_height+0.006],
[.6, .0, table_height+0.006],
[.4, .2, table_height+0.006],
[.2, .2, table_height+0.006]])
test_rope_sim_obj = create_rope(test_rope_poss)
sim.add_objects([test_rope_sim_obj])
sim.settle()
test_scene_state = env.observe_scene()
reg_factory = TpsRpmRegistrationFactory()
traj_transferer = FingerTrajectoryTransferer(sim)
plot_cb = lambda i, i_em, x_nd, y_md, xtarg_nd, wt_n, f, corr_nm, rad: registration_plot_cb(sim, x_nd, y_md, f)
reg_and_traj_transferer = TwoStepRegistrationAndTrajectoryTransferer(reg_factory, traj_transferer)
test_aug_traj = reg_and_traj_transferer.transfer(demo, test_scene_state, callback=plot_cb, plotting=True)
env.execute_augmented_trajectory(test_aug_traj)
if __name__ == '__main__':
main()
|
[
"move_rope.create_rope",
"lfd.environment.environment.LfdEnvironment",
"move_rope.create_augmented_traj",
"lfd.transfer.registration_transfer.TwoStepRegistrationAndTrajectoryTransferer",
"lfd.environment.sim_util.reset_arms_to_side",
"lfd.registration.registration.TpsRpmRegistrationFactory",
"lfd.environment.simulation_object.XmlSimulationObject",
"numpy.array",
"lfd.demonstration.demonstration.Demonstration",
"lfd.environment.simulation.DynamicSimulationRobotWorld",
"lfd.environment.simulation_object.BoxSimulationObject",
"lfd.registration.plotting_openrave.registration_plot_cb",
"lfd.transfer.transfer.FingerTrajectoryTransferer"
] |
[((767, 789), 'move_rope.create_rope', 'create_rope', (['rope_poss'], {}), '(rope_poss)\n', (778, 789), False, 'from move_rope import create_augmented_traj, create_rope\n'), ((1098, 1142), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 0], [-1, 0, 0]]'], {}), '([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])\n', (1106, 1142), True, 'import numpy as np\n'), ((1156, 1201), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 0, -1], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 0, -1], [-1, 0, 0]])\n', (1164, 1201), True, 'import numpy as np\n'), ((1238, 1327), 'move_rope.create_augmented_traj', 'create_augmented_traj', (['env.sim.robot', 'pick_pos', 'drop_pos', 'pick_R', 'drop_R', 'move_height'], {}), '(env.sim.robot, pick_pos, drop_pos, pick_R, drop_R,\n move_height)\n', (1259, 1327), False, 'from move_rope import create_augmented_traj, create_rope\n'), ((1340, 1389), 'lfd.demonstration.demonstration.Demonstration', 'Demonstration', (['"""rope_demo"""', 'scene_state', 'aug_traj'], {}), "('rope_demo', scene_state, aug_traj)\n", (1353, 1389), False, 'from lfd.demonstration.demonstration import Demonstration\n'), ((1750, 1779), 'lfd.environment.simulation.DynamicSimulationRobotWorld', 'DynamicSimulationRobotWorld', ([], {}), '()\n', (1777, 1779), False, 'from lfd.environment.simulation import DynamicSimulationRobotWorld\n'), ((2068, 2100), 'lfd.environment.sim_util.reset_arms_to_side', 'sim_util.reset_arms_to_side', (['sim'], {}), '(sim)\n', (2095, 2100), False, 'from lfd.environment import sim_util\n'), ((2116, 2175), 'lfd.environment.environment.LfdEnvironment', 'environment.LfdEnvironment', (['sim', 'sim'], {'downsample_size': '(0.025)'}), '(sim, sim, downsample_size=0.025)\n', (2142, 2175), False, 'from lfd.environment import environment\n'), ((2202, 2360), 'numpy.array', 'np.array', (['[[0.2, -0.2, table_height + 0.006], [0.8, -0.2, table_height + 0.006], [0.8,\n 0.2, table_height + 0.006], [0.2, 0.2, table_height + 0.006]]'], {}), '([[0.2, -0.2, table_height + 0.006], [0.8, -0.2, table_height + \n 0.006], [0.8, 0.2, table_height + 0.006], [0.2, 0.2, table_height + 0.006]]\n )\n', (2210, 2360), True, 'import numpy as np\n'), ((2508, 2773), 'numpy.array', 'np.array', (['[[0.2, -0.2, table_height + 0.006], [0.5, -0.4, table_height + 0.006], [0.8,\n 0.0, table_height + 0.006], [0.8, 0.2, table_height + 0.006], [0.6, 0.0,\n table_height + 0.006], [0.4, 0.2, table_height + 0.006], [0.2, 0.2, \n table_height + 0.006]]'], {}), '([[0.2, -0.2, table_height + 0.006], [0.5, -0.4, table_height + \n 0.006], [0.8, 0.0, table_height + 0.006], [0.8, 0.2, table_height + \n 0.006], [0.6, 0.0, table_height + 0.006], [0.4, 0.2, table_height + \n 0.006], [0.2, 0.2, table_height + 0.006]])\n', (2516, 2773), True, 'import numpy as np\n'), ((2952, 2979), 'move_rope.create_rope', 'create_rope', (['test_rope_poss'], {}), '(test_rope_poss)\n', (2963, 2979), False, 'from move_rope import create_augmented_traj, create_rope\n'), ((3104, 3131), 'lfd.registration.registration.TpsRpmRegistrationFactory', 'TpsRpmRegistrationFactory', ([], {}), '()\n', (3129, 3131), False, 'from lfd.registration.registration import TpsRpmRegistrationFactory\n'), ((3154, 3185), 'lfd.transfer.transfer.FingerTrajectoryTransferer', 'FingerTrajectoryTransferer', (['sim'], {}), '(sim)\n', (3180, 3185), False, 'from lfd.transfer.transfer import FingerTrajectoryTransferer\n'), ((3337, 3409), 'lfd.transfer.registration_transfer.TwoStepRegistrationAndTrajectoryTransferer', 'TwoStepRegistrationAndTrajectoryTransferer', (['reg_factory', 'traj_transferer'], {}), '(reg_factory, traj_transferer)\n', (3379, 3409), False, 'from lfd.transfer.registration_transfer import TwoStepRegistrationAndTrajectoryTransferer\n'), ((1513, 1577), 'lfd.environment.simulation_object.XmlSimulationObject', 'XmlSimulationObject', (['"""robots/pr2-beta-static.zae"""'], {'dynamic': '(False)'}), "('robots/pr2-beta-static.zae', dynamic=False)\n", (1532, 1577), False, 'from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject\n'), ((1599, 1693), 'lfd.environment.simulation_object.BoxSimulationObject', 'BoxSimulationObject', (['"""table"""', '[1, 0, table_height - 0.1]', '[0.85, 0.85, 0.1]'], {'dynamic': '(False)'}), "('table', [1, 0, table_height - 0.1], [0.85, 0.85, 0.1],\n dynamic=False)\n", (1618, 1693), False, 'from lfd.environment.simulation_object import XmlSimulationObject, BoxSimulationObject\n'), ((3266, 3306), 'lfd.registration.plotting_openrave.registration_plot_cb', 'registration_plot_cb', (['sim', 'x_nd', 'y_md', 'f'], {}), '(sim, x_nd, y_md, f)\n', (3286, 3306), False, 'from lfd.registration.plotting_openrave import registration_plot_cb\n')]
|
import csv
import os.path
import random
import numpy as np
import scipy.io
import torch
import torchvision
from torch.utils.data import Dataset
# from .util import *
from data.util import default_loader, read_img, augment, get_image_paths
class PIPALFolder(Dataset):
def __init__(self, root=None, index=None, transform=None, opt=None):
if index is None:
index = list(range(0, 200))
if opt is not None:
self.opt = opt
root = opt['datasets']['pipal']
patch_num = opt['patch_num']
else:
patch_num = 32
refpath = os.path.join(root, 'Train_Ref')
refname = self.getFileName(refpath, '.bmp')
dispath = os.path.join(root, 'Train_Dis')
txtpath = os.path.join(root, 'Train_Label')
sample = []
for i, item in enumerate(index):
ref = refname[item]
# print(ref, end=' ')
txtname = ref.split('.')[0] + '.txt'
fh = open(os.path.join(txtpath, txtname), 'r')
for line in fh:
line = line.split('\n')
words = line[0].split(',')
for aug in range(patch_num):
sample.append((
(os.path.join(dispath, words[0]), os.path.join(refpath, ref)),
np.array(words[1]).astype(np.float32) / 1000.0
))
# print('')
self.samples = sorted(sample)
self.transform = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.patch_size = opt['patch_size']
# self.loader = default_loader
def __getitem__(self, index):
path, target = self.samples[index]
'''img_dis = self.loader(path[0])
img_ref = self.loader(path[1])'''
img_dis = read_img(env=None, path=path[0])
img_ref = read_img(env=None, path=path[1])
'''if self.transform is not None:
img_dis = self.transform(img_dis)
img_ref = self.transform(img_ref)'''
if self.patch_size < 288:
H, W, _ = img_ref.shape
crop_size = self.patch_size
rnd_h = random.randint(0, max(0, (H - crop_size)))
rnd_w = random.randint(0, max(0, (W - crop_size)))
img_dis = img_dis[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :]
img_ref = img_ref[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :]
# augmentation - flip, rotate
img_dis, img_ref = augment([img_dis, img_ref], self.opt['use_flip'], rot=False)
if img_ref.shape[2] == 3:
img_ref = img_ref[:, :, [2, 1, 0]]
img_dis = img_dis[:, :, [2, 1, 0]]
img_ref = torch.from_numpy(np.ascontiguousarray(np.transpose(img_ref, (2, 0, 1)))).float()
img_dis = torch.from_numpy(np.ascontiguousarray(np.transpose(img_dis, (2, 0, 1)))).float()
img_dis = self.transform(img_dis)
img_ref = self.transform(img_ref)
return {'Dis': img_dis, 'Ref': img_ref, 'Label': target}
def __len__(self):
length = len(self.samples)
return length
@staticmethod
def getFileName(path, suffix):
filename = []
f_list = os.listdir(path)
# print f_list
for i in f_list:
if os.path.splitext(i)[1] == suffix:
filename.append(i)
filename.sort()
return filename
# TODO
class IQATestDataset(Dataset):
def __init__(self, opt):
super(IQATestDataset, self).__init__()
self.opt = opt
self.paths_Dis = None
self.paths_Ref = None
refpath = os.path.join(root, 'Train_Ref')
refname = self.getFileName(refpath, '.bmp')
dispath = os.path.join(root, 'Train_Dis')
txtpath = os.path.join(root, 'Train_Label')
sample = []
for i, item in enumerate(index):
ref = refname[item]
# print(ref, end=' ')
txtname = ref.split('.')[0] + '.txt'
fh = open(os.path.join(txtpath, txtname), 'r')
for line in fh:
line = line.split('\n')
words = line[0].split(',')
sample.append((
(os.path.join(dispath, words[0]), os.path.join(refpath, ref)),
np.array(words[1]).astype(np.float32)
))
# print('')
self.samples = sample
self.transform = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def __getitem__(self, index):
path, target = self.samples[index]
img_dis = read_img(env=None, path=path[0])
img_ref = read_img(env=None, path=path[1])
'''H, W, _ = img_ref.shape
crop_size = 224
rnd_h = random.randint(0, max(0, (H - crop_size) // 2))
rnd_w = random.randint(0, max(0, (W - crop_size) // 2))
img_dis = img_dis[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :]
img_ref = img_ref[rnd_h:rnd_h + crop_size, rnd_w:rnd_w + crop_size, :]
# augmentation - flip, rotate
img_dis, img_ref = augment([img_dis, img_ref], self.opt['use_flip'], rot=False)'''
if img_ref.shape[2] == 3:
img_ref = img_ref[:, :, [2, 1, 0]]
img_dis = img_dis[:, :, [2, 1, 0]]
img_ref = torch.from_numpy(np.ascontiguousarray(np.transpose(img_ref, (2, 0, 1)))).float()
img_dis = torch.from_numpy(np.ascontiguousarray(np.transpose(img_dis, (2, 0, 1)))).float()
img_dis = self.transform(img_dis)
img_ref = self.transform(img_ref)
return {'Dis': img_dis, 'Ref': img_ref, 'Label': target, 'Dis_path': path[0]}
def __len__(self):
return len(self.samples)
@staticmethod
def getFileName(path, suffix):
filename = []
f_list = os.listdir(path)
# print f_list
for i in f_list:
if os.path.splitext(i)[1] == suffix:
filename.append(i)
filename.sort()
return filename
|
[
"data.util.augment",
"numpy.array",
"torchvision.transforms.Normalize",
"data.util.read_img",
"numpy.transpose"
] |
[((1538, 1630), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, \n 0.224, 0.225])\n', (1570, 1630), False, 'import torchvision\n'), ((1956, 1988), 'data.util.read_img', 'read_img', ([], {'env': 'None', 'path': 'path[0]'}), '(env=None, path=path[0])\n', (1964, 1988), False, 'from data.util import default_loader, read_img, augment, get_image_paths\n'), ((2008, 2040), 'data.util.read_img', 'read_img', ([], {'env': 'None', 'path': 'path[1]'}), '(env=None, path=path[1])\n', (2016, 2040), False, 'from data.util import default_loader, read_img, augment, get_image_paths\n'), ((2663, 2723), 'data.util.augment', 'augment', (['[img_dis, img_ref]', "self.opt['use_flip']"], {'rot': '(False)'}), "([img_dis, img_ref], self.opt['use_flip'], rot=False)\n", (2670, 2723), False, 'from data.util import default_loader, read_img, augment, get_image_paths\n'), ((4654, 4746), 'torchvision.transforms.Normalize', 'torchvision.transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, \n 0.224, 0.225])\n', (4686, 4746), False, 'import torchvision\n'), ((4903, 4935), 'data.util.read_img', 'read_img', ([], {'env': 'None', 'path': 'path[0]'}), '(env=None, path=path[0])\n', (4911, 4935), False, 'from data.util import default_loader, read_img, augment, get_image_paths\n'), ((4955, 4987), 'data.util.read_img', 'read_img', ([], {'env': 'None', 'path': 'path[1]'}), '(env=None, path=path[1])\n', (4963, 4987), False, 'from data.util import default_loader, read_img, augment, get_image_paths\n'), ((2916, 2948), 'numpy.transpose', 'np.transpose', (['img_ref', '(2, 0, 1)'], {}), '(img_ref, (2, 0, 1))\n', (2928, 2948), True, 'import numpy as np\n'), ((3016, 3048), 'numpy.transpose', 'np.transpose', (['img_dis', '(2, 0, 1)'], {}), '(img_dis, (2, 0, 1))\n', (3028, 3048), True, 'import numpy as np\n'), ((5666, 5698), 'numpy.transpose', 'np.transpose', (['img_ref', '(2, 0, 1)'], {}), '(img_ref, (2, 0, 1))\n', (5678, 5698), True, 'import numpy as np\n'), ((5766, 5798), 'numpy.transpose', 'np.transpose', (['img_dis', '(2, 0, 1)'], {}), '(img_dis, (2, 0, 1))\n', (5778, 5798), True, 'import numpy as np\n'), ((4516, 4534), 'numpy.array', 'np.array', (['words[1]'], {}), '(words[1])\n', (4524, 4534), True, 'import numpy as np\n'), ((1379, 1397), 'numpy.array', 'np.array', (['words[1]'], {}), '(words[1])\n', (1387, 1397), True, 'import numpy as np\n')]
|
import pytest
import numpy as np
import pandas as pd
from SPARTACUS10 import spatial_silhouette as spasi
import sklearn.metrics as metrics
import os
def find_path(name, path = None):
if path is None:
path = os.getcwd()
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
def test_silhouette():
"""
Does silhouette_coefficient() function produce the same results as
silhouette_score() function from sklearn.metrics using Euclidean metric?
"""
# Test on matrixA
X = np.genfromtxt(find_path("matrixA.csv"), delimiter=",", skip_header=1, usecols = range(1,21))
V = X.shape[1]
for i in range(3, 11):
labels = np.random.randint(1, i+1, V)
sil_score1 = spasi.silhouette_coefficient(X, labels, metric = "euclidean", iter_max = 10)
sil_score2 = metrics.silhouette_score(X.T, labels, metric = "euclidean")
assert np.round(sil_score1,10) == np.round(sil_score2, 10), "Silhouette function (Euclidean) produces different results than that implemented in scikit-learn"
# Test on random data comparison with existing function
V = 100
X = np.random.normal(size = (10, V))
for i in range(3, 11):
labels = np.random.randint(1, i+1, V)
sil_score1 = spasi.silhouette_coefficient(X, labels, metric = "euclidean", iter_max = 10)
sil_score2 = metrics.silhouette_score(X.T, labels, metric = "euclidean")
assert np.round(sil_score1,10) == np.round(sil_score2, 10), "Silhouette function (Euclidean) produces different results than that implemented in scikit-learn"
# Test on random data
random_data = np.genfromtxt(find_path("random_data.csv"), delimiter=",")
random_labels = np.genfromtxt(find_path("random_labels.csv"), delimiter=",")
silhouette_score_Eucl = spasi.silhouette_coefficient(random_data, random_labels, metric = "euclidean")
assert np.isclose(silhouette_score_Eucl, -0.018137954346288798), "Error in Euclidean silhouette_coefficient function"
silhouette_score_corr = spasi.silhouette_coefficient(random_data, random_labels, metric = "correlation")
assert np.isclose(silhouette_score_corr, -0.01710701512585803), "Error in correlation silhouette_coefficient function"
def test_ensemble_silhouette():
X = np.array([[1,1,2,2,3,3,4,4],
[1,1,2,2,3,3,4,4],
[1,1,2,2,3,3,4,4],
[1,1,2,2,5,5,6,6],
[1,1,1,2,3,3,3,4],
[1,1,1,2,3,3,3,4]])
labels = [1,1,2,2,3,3,4,4]
assert spasi.silhouette_coefficient(X[0:4,], labels, metric = "jaccard", iter_max = 4) == 1, "Ensemble silhouette produces wrong results"
sil_score1 = spasi.silhouette_coefficient(X, labels, metric = "jaccard", iter_max = 4)
assert np.round(sil_score1, 8) == 0.79166667, "Ensemble silhouette produces wrong results"
X1 = np.array([[1,1,2,2], [1,2,2,2], [1,1,1,2]])
labels1 = [1,1,2,2]
sil_score2 = spasi.silhouette_coefficient(X1, labels1, metric = "jaccard", iter_max = 4)
assert np.round(sil_score2, 8) == 0.46666667, "Ensemble silhouette produces wrong results"
def test_simplified_silhouette():
# Test on random data
random_data = np.genfromtxt(find_path("random_data.csv"), delimiter=",")
random_labels = np.genfromtxt(find_path("random_labels.csv"), delimiter=",")
simp_silhouette_score_Eucl = spasi.simplified_silhouette_coefficient(random_data, random_labels, metric = "euclidean")
assert np.isclose(simp_silhouette_score_Eucl, 0.01761300723620632), "Error in Euclidean simplified_silhouette_coefficient function"
simp_silhouette_score_corr = spasi.simplified_silhouette_coefficient(random_data, random_labels, metric = "correlation")
assert np.isclose(simp_silhouette_score_corr, 0.07464102055366918), "Error in correlation simplified_silhouette_coefficient function"
def test_spatial_silhouette():
# Test on random data
random_data = np.genfromtxt(find_path("random_data_spatial.csv"), delimiter=",")
matXYZ = np.argwhere(np.zeros((8, 3, 2)) == 0)
labels = np.repeat(np.array([1,2,3,4]), 2*3*2)
list_neighbors = spasi.get_list_neighbors(matXYZ)
spatial_silhouette_score_Eucl = spasi.silhouette_coefficient_spatial(random_data, labels, list_neighbors, metric = "euclidean")
assert np.isclose(spatial_silhouette_score_Eucl, -0.0019062813008068388), "Error in Euclidean silhouette_coefficient_spatial function"
spatial_silhouette_score_corr = spasi.silhouette_coefficient_spatial(random_data, labels, list_neighbors, metric = "correlation")
assert np.isclose(spatial_silhouette_score_corr, -0.0013034499248535598), "Error in correlation silhouette_coefficient_spatial function"
def test_spatial_simplified_silhouette():
# Test on random data
random_data = np.genfromtxt(find_path("random_data_spatial.csv"), delimiter=",")
matXYZ = np.argwhere(np.zeros((8, 3, 2)) == 0)
labels = np.repeat(np.array([1,2,3,4]), 2*3*2)
list_neighbors = spasi.get_list_neighbors(matXYZ)
spatial_simp_silhouette_score_Eucl = spasi.simplified_silhouette_coefficient_spatial(random_data, labels, list_neighbors, metric = "euclidean")
assert np.isclose(spatial_simp_silhouette_score_Eucl, 0.06783823739924444), "Error in Euclidean simplified_silhouette_coefficient_spatial function"
spatial_simp_silhouette_score_corr = spasi.simplified_silhouette_coefficient_spatial(random_data, labels, list_neighbors, metric = "correlation")
assert np.isclose(spatial_simp_silhouette_score_corr, 0.22422765231602626), "Error in correlation simplified_silhouette_coefficient_spatial function"
def test_list_neighbors():
list_neighbors_true = pd.read_csv(find_path("list_neighbors.csv"))
list_neighbors_true.columns = pd.RangeIndex(start=0, stop=5, step=1)
matXYZ = np.argwhere(np.zeros((4, 3, 2)) == 0)
list_neighbors = spasi.get_list_neighbors(matXYZ)
list_neighbors = pd.DataFrame(list_neighbors)
list_neighbors.columns = pd.RangeIndex(start=0, stop=5, step=1)
assert pd.DataFrame.equals(list_neighbors_true, list_neighbors), "list_neighbors does not work"
# pd.testing.assert_frame_equal(list_neighbors_true, list_neighbors, check_dtype = False, check_column_type = False)
# def test_main():
# assert main([]) == 0
|
[
"numpy.random.normal",
"numpy.isclose",
"SPARTACUS10.spatial_silhouette.simplified_silhouette_coefficient_spatial",
"numpy.round",
"os.path.join",
"SPARTACUS10.spatial_silhouette.silhouette_coefficient_spatial",
"os.getcwd",
"numpy.array",
"numpy.random.randint",
"SPARTACUS10.spatial_silhouette.get_list_neighbors",
"numpy.zeros",
"SPARTACUS10.spatial_silhouette.silhouette_coefficient",
"pandas.RangeIndex",
"pandas.DataFrame",
"pandas.DataFrame.equals",
"SPARTACUS10.spatial_silhouette.simplified_silhouette_coefficient",
"os.walk",
"sklearn.metrics.silhouette_score"
] |
[((268, 281), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (275, 281), False, 'import os\n'), ((1207, 1237), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(10, V)'}), '(size=(10, V))\n', (1223, 1237), True, 'import numpy as np\n'), ((1882, 1958), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient', 'spasi.silhouette_coefficient', (['random_data', 'random_labels'], {'metric': '"""euclidean"""'}), "(random_data, random_labels, metric='euclidean')\n", (1910, 1958), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((1972, 2028), 'numpy.isclose', 'np.isclose', (['silhouette_score_Eucl', '(-0.018137954346288798)'], {}), '(silhouette_score_Eucl, -0.018137954346288798)\n', (1982, 2028), True, 'import numpy as np\n'), ((2111, 2189), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient', 'spasi.silhouette_coefficient', (['random_data', 'random_labels'], {'metric': '"""correlation"""'}), "(random_data, random_labels, metric='correlation')\n", (2139, 2189), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((2203, 2258), 'numpy.isclose', 'np.isclose', (['silhouette_score_corr', '(-0.01710701512585803)'], {}), '(silhouette_score_corr, -0.01710701512585803)\n', (2213, 2258), True, 'import numpy as np\n'), ((2360, 2535), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3, 3, 4, 4], [1, 1, 2, 2, 3, 3, 4, 4], [1, 1, 2, 2, 3, 3, 4, \n 4], [1, 1, 2, 2, 5, 5, 6, 6], [1, 1, 1, 2, 3, 3, 3, 4], [1, 1, 1, 2, 3,\n 3, 3, 4]]'], {}), '([[1, 1, 2, 2, 3, 3, 4, 4], [1, 1, 2, 2, 3, 3, 4, 4], [1, 1, 2, 2, \n 3, 3, 4, 4], [1, 1, 2, 2, 5, 5, 6, 6], [1, 1, 1, 2, 3, 3, 3, 4], [1, 1,\n 1, 2, 3, 3, 3, 4]])\n', (2368, 2535), True, 'import numpy as np\n'), ((2788, 2857), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient', 'spasi.silhouette_coefficient', (['X', 'labels'], {'metric': '"""jaccard"""', 'iter_max': '(4)'}), "(X, labels, metric='jaccard', iter_max=4)\n", (2816, 2857), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((2966, 3018), 'numpy.array', 'np.array', (['[[1, 1, 2, 2], [1, 2, 2, 2], [1, 1, 1, 2]]'], {}), '([[1, 1, 2, 2], [1, 2, 2, 2], [1, 1, 1, 2]])\n', (2974, 3018), True, 'import numpy as np\n'), ((3051, 3122), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient', 'spasi.silhouette_coefficient', (['X1', 'labels1'], {'metric': '"""jaccard"""', 'iter_max': '(4)'}), "(X1, labels1, metric='jaccard', iter_max=4)\n", (3079, 3122), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((3482, 3574), 'SPARTACUS10.spatial_silhouette.simplified_silhouette_coefficient', 'spasi.simplified_silhouette_coefficient', (['random_data', 'random_labels'], {'metric': '"""euclidean"""'}), "(random_data, random_labels, metric=\n 'euclidean')\n", (3521, 3574), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((3583, 3642), 'numpy.isclose', 'np.isclose', (['simp_silhouette_score_Eucl', '(0.01761300723620632)'], {}), '(simp_silhouette_score_Eucl, 0.01761300723620632)\n', (3593, 3642), True, 'import numpy as np\n'), ((3741, 3835), 'SPARTACUS10.spatial_silhouette.simplified_silhouette_coefficient', 'spasi.simplified_silhouette_coefficient', (['random_data', 'random_labels'], {'metric': '"""correlation"""'}), "(random_data, random_labels, metric=\n 'correlation')\n", (3780, 3835), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((3844, 3903), 'numpy.isclose', 'np.isclose', (['simp_silhouette_score_corr', '(0.07464102055366918)'], {}), '(simp_silhouette_score_corr, 0.07464102055366918)\n', (3854, 3903), True, 'import numpy as np\n'), ((4237, 4269), 'SPARTACUS10.spatial_silhouette.get_list_neighbors', 'spasi.get_list_neighbors', (['matXYZ'], {}), '(matXYZ)\n', (4261, 4269), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((4307, 4404), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient_spatial', 'spasi.silhouette_coefficient_spatial', (['random_data', 'labels', 'list_neighbors'], {'metric': '"""euclidean"""'}), "(random_data, labels, list_neighbors,\n metric='euclidean')\n", (4343, 4404), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((4417, 4482), 'numpy.isclose', 'np.isclose', (['spatial_silhouette_score_Eucl', '(-0.0019062813008068388)'], {}), '(spatial_silhouette_score_Eucl, -0.0019062813008068388)\n', (4427, 4482), True, 'import numpy as np\n'), ((4581, 4680), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient_spatial', 'spasi.silhouette_coefficient_spatial', (['random_data', 'labels', 'list_neighbors'], {'metric': '"""correlation"""'}), "(random_data, labels, list_neighbors,\n metric='correlation')\n", (4617, 4680), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((4693, 4758), 'numpy.isclose', 'np.isclose', (['spatial_silhouette_score_corr', '(-0.0013034499248535598)'], {}), '(spatial_silhouette_score_corr, -0.0013034499248535598)\n', (4703, 4758), True, 'import numpy as np\n'), ((5100, 5132), 'SPARTACUS10.spatial_silhouette.get_list_neighbors', 'spasi.get_list_neighbors', (['matXYZ'], {}), '(matXYZ)\n', (5124, 5132), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((5175, 5283), 'SPARTACUS10.spatial_silhouette.simplified_silhouette_coefficient_spatial', 'spasi.simplified_silhouette_coefficient_spatial', (['random_data', 'labels', 'list_neighbors'], {'metric': '"""euclidean"""'}), "(random_data, labels,\n list_neighbors, metric='euclidean')\n", (5222, 5283), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((5296, 5363), 'numpy.isclose', 'np.isclose', (['spatial_simp_silhouette_score_Eucl', '(0.06783823739924444)'], {}), '(spatial_simp_silhouette_score_Eucl, 0.06783823739924444)\n', (5306, 5363), True, 'import numpy as np\n'), ((5478, 5588), 'SPARTACUS10.spatial_silhouette.simplified_silhouette_coefficient_spatial', 'spasi.simplified_silhouette_coefficient_spatial', (['random_data', 'labels', 'list_neighbors'], {'metric': '"""correlation"""'}), "(random_data, labels,\n list_neighbors, metric='correlation')\n", (5525, 5588), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((5601, 5668), 'numpy.isclose', 'np.isclose', (['spatial_simp_silhouette_score_corr', '(0.22422765231602626)'], {}), '(spatial_simp_silhouette_score_corr, 0.22422765231602626)\n', (5611, 5668), True, 'import numpy as np\n'), ((5881, 5919), 'pandas.RangeIndex', 'pd.RangeIndex', ([], {'start': '(0)', 'stop': '(5)', 'step': '(1)'}), '(start=0, stop=5, step=1)\n', (5894, 5919), True, 'import pandas as pd\n'), ((5992, 6024), 'SPARTACUS10.spatial_silhouette.get_list_neighbors', 'spasi.get_list_neighbors', (['matXYZ'], {}), '(matXYZ)\n', (6016, 6024), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((6046, 6074), 'pandas.DataFrame', 'pd.DataFrame', (['list_neighbors'], {}), '(list_neighbors)\n', (6058, 6074), True, 'import pandas as pd\n'), ((6104, 6142), 'pandas.RangeIndex', 'pd.RangeIndex', ([], {'start': '(0)', 'stop': '(5)', 'step': '(1)'}), '(start=0, stop=5, step=1)\n', (6117, 6142), True, 'import pandas as pd\n'), ((6154, 6210), 'pandas.DataFrame.equals', 'pd.DataFrame.equals', (['list_neighbors_true', 'list_neighbors'], {}), '(list_neighbors_true, list_neighbors)\n', (6173, 6210), True, 'import pandas as pd\n'), ((227, 238), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (236, 238), False, 'import os\n'), ((741, 771), 'numpy.random.randint', 'np.random.randint', (['(1)', '(i + 1)', 'V'], {}), '(1, i + 1, V)\n', (758, 771), True, 'import numpy as np\n'), ((791, 863), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient', 'spasi.silhouette_coefficient', (['X', 'labels'], {'metric': '"""euclidean"""', 'iter_max': '(10)'}), "(X, labels, metric='euclidean', iter_max=10)\n", (819, 863), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((893, 950), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['X.T', 'labels'], {'metric': '"""euclidean"""'}), "(X.T, labels, metric='euclidean')\n", (917, 950), True, 'import sklearn.metrics as metrics\n'), ((1284, 1314), 'numpy.random.randint', 'np.random.randint', (['(1)', '(i + 1)', 'V'], {}), '(1, i + 1, V)\n', (1301, 1314), True, 'import numpy as np\n'), ((1334, 1406), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient', 'spasi.silhouette_coefficient', (['X', 'labels'], {'metric': '"""euclidean"""', 'iter_max': '(10)'}), "(X, labels, metric='euclidean', iter_max=10)\n", (1362, 1406), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((1436, 1493), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['X.T', 'labels'], {'metric': '"""euclidean"""'}), "(X.T, labels, metric='euclidean')\n", (1460, 1493), True, 'import sklearn.metrics as metrics\n'), ((2640, 2715), 'SPARTACUS10.spatial_silhouette.silhouette_coefficient', 'spasi.silhouette_coefficient', (['X[0:4,]', 'labels'], {'metric': '"""jaccard"""', 'iter_max': '(4)'}), "(X[0:4,], labels, metric='jaccard', iter_max=4)\n", (2668, 2715), True, 'from SPARTACUS10 import spatial_silhouette as spasi\n'), ((2873, 2896), 'numpy.round', 'np.round', (['sil_score1', '(8)'], {}), '(sil_score1, 8)\n', (2881, 2896), True, 'import numpy as np\n'), ((3139, 3162), 'numpy.round', 'np.round', (['sil_score2', '(8)'], {}), '(sil_score2, 8)\n', (3147, 3162), True, 'import numpy as np\n'), ((4188, 4210), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (4196, 4210), True, 'import numpy as np\n'), ((5051, 5073), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (5059, 5073), True, 'import numpy as np\n'), ((328, 352), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (340, 352), False, 'import os\n'), ((970, 994), 'numpy.round', 'np.round', (['sil_score1', '(10)'], {}), '(sil_score1, 10)\n', (978, 994), True, 'import numpy as np\n'), ((997, 1021), 'numpy.round', 'np.round', (['sil_score2', '(10)'], {}), '(sil_score2, 10)\n', (1005, 1021), True, 'import numpy as np\n'), ((1513, 1537), 'numpy.round', 'np.round', (['sil_score1', '(10)'], {}), '(sil_score1, 10)\n', (1521, 1537), True, 'import numpy as np\n'), ((1540, 1564), 'numpy.round', 'np.round', (['sil_score2', '(10)'], {}), '(sil_score2, 10)\n', (1548, 1564), True, 'import numpy as np\n'), ((4139, 4158), 'numpy.zeros', 'np.zeros', (['(8, 3, 2)'], {}), '((8, 3, 2))\n', (4147, 4158), True, 'import numpy as np\n'), ((5002, 5021), 'numpy.zeros', 'np.zeros', (['(8, 3, 2)'], {}), '((8, 3, 2))\n', (5010, 5021), True, 'import numpy as np\n'), ((5945, 5964), 'numpy.zeros', 'np.zeros', (['(4, 3, 2)'], {}), '((4, 3, 2))\n', (5953, 5964), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import pickle as pk
import random
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import Imputer
def read_csv(csv_path):
df = pd.read_csv(csv_path)
return df
def encode_label(Y):
le = preprocessing.LabelEncoder()
cls = le.fit(Y)
cls = le.transform(Y)
return cls
def split_test(num_data, percent):
select_id = random.sample(range(num_data), int(num_data*percent))
return select_id
def save_pk(data, pk_path):
with open(pk_path, 'wb') as f:
pk.dump(data, f)
def read_pk(pk_path):
with open(pk_path, 'rb') as f:
data = pk.load(f)
return data
def random_split_test_save(num_data, pk_path, ratio=0.1):
selected_id = split_test(num_data, ratio)
save_pk(selected_id, pk_path)
def list_to_float(data):
power = 0
val = 0
data = data[::-1]
for d in data:
val += int(d)*(10**power)
power += len(d)
return val
def X_preprocessing(X, scenario):
# print ('X.shape = {}'.format(X.shape))
r = X.shape[0]
c = X.shape[1]
# convert ip to float
for i in range(r):
for j in [0, 2]:
if scenario == 'A':
X[i, j] = list_to_float(X[i, j].split('.'))
elif scenario == 'B':
pass
nan_idx = np.where(X == np.nan)[0]
print ('nan_idx = {}'.format(nan_idx))
inf_idx = np.where(X == 'Infinity')[0]
print ('inf_idx = {}'.format(inf_idx))
print('finite_idx = {}'.format(np.isfinite(X.all())))
X[nan_idx] = 0
X[inf_idx] = 0
return X
if __name__ == '__main__':
csv_path = '../../TorCSV/CSV/Scenario-A/merged_5s.csv'
df = read_csv(csv_path)
print ('read CSV !!!')
df_mat = df.as_matrix()
# get input X and label Y #
X = df_mat[:, :-1]
Y = df_mat[:, -1]
X = X_preprocessing(X)
# read the list idx to test #
pk_path = 'selected_id.pkl'
test_idx = read_pk(pk_path)
# print (test_idx)
# encode label #
le = preprocessing.LabelEncoder()
cls = le.fit(Y)
Y = le.transform(Y)
X_test = X[test_idx, :]
Y_test = Y[test_idx]
X_train = np.delete(X, test_idx, axis=0)
Y_train = np.delete(Y, test_idx, axis=0)
clf = RandomForestClassifier(max_depth=2, random_state=0)
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
print ('accuracy = {}'.format(accuracy_score(Y_test, Y_pred)))
filename = 'randomForest.sav'
pk.dump(clf, open(filename, 'wb'))
|
[
"sklearn.preprocessing.LabelEncoder",
"pickle.dump",
"pandas.read_csv",
"numpy.where",
"numpy.delete",
"pickle.load",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.accuracy_score"
] |
[((281, 302), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (292, 302), True, 'import pandas as pd\n'), ((349, 377), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (375, 377), False, 'from sklearn import preprocessing\n'), ((2115, 2143), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (2141, 2143), False, 'from sklearn import preprocessing\n'), ((2256, 2286), 'numpy.delete', 'np.delete', (['X', 'test_idx'], {'axis': '(0)'}), '(X, test_idx, axis=0)\n', (2265, 2286), True, 'import numpy as np\n'), ((2301, 2331), 'numpy.delete', 'np.delete', (['Y', 'test_idx'], {'axis': '(0)'}), '(Y, test_idx, axis=0)\n', (2310, 2331), True, 'import numpy as np\n'), ((2343, 2394), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(2)', 'random_state': '(0)'}), '(max_depth=2, random_state=0)\n', (2365, 2394), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((641, 657), 'pickle.dump', 'pk.dump', (['data', 'f'], {}), '(data, f)\n', (648, 657), True, 'import pickle as pk\n'), ((732, 742), 'pickle.load', 'pk.load', (['f'], {}), '(f)\n', (739, 742), True, 'import pickle as pk\n'), ((1423, 1444), 'numpy.where', 'np.where', (['(X == np.nan)'], {}), '(X == np.nan)\n', (1431, 1444), True, 'import numpy as np\n'), ((1505, 1530), 'numpy.where', 'np.where', (["(X == 'Infinity')"], {}), "(X == 'Infinity')\n", (1513, 1530), True, 'import numpy as np\n'), ((2493, 2523), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'Y_pred'], {}), '(Y_test, Y_pred)\n', (2507, 2523), False, 'from sklearn.metrics import accuracy_score\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 21:44:55 2017
@author: Mike
"""
import numpy as np
import cv2
import glob
import pickle
import matplotlib.pyplot as plt
from matplotlib.pyplot import *
import os
from scipy import stats
from moviepy.editor import VideoFileClip
from IPython.display import HTML
from camera_calibration import calibrate_camera, distortion_correct
from sobel_library import abs_sobel_image, sobel_mag_thresh, sobel_dir_thresh
from collections import deque
run_camera_cal = 1
#HLS Color space threshold filter
def color_binary(img, colorspace, color_thresh):
if colorspace == 'HLS':
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
binary_output = np.zeros_like(S)
binary_output[((S > color_thresh [0]) & (S < color_thresh [1]))] = 1
return binary_output
#combine the thresholds for the color map and the gradient threshold
# send in an image with binary color scheme and binary gradient scheme
def bin_color_gradient(binary_gradient , binary_color):
binary_output = np.zeros_like(binary_gradient)
binary_output[((binary_gradient == 1) | (binary_color == 1))] = 1
# polys = np.array([[(350,720),(580,500),(800,500),(1000,720)]], dtype = np.int32)
polys = np.array([[(350,720),(580,500),(800,500),(900,720)]], dtype = np.int32)
cv2.fillPoly(binary_output, polys, 0, lineType=8, shift=0)
return binary_output
#Function to warp images to birds eye view
def warp(img,source_points, destination_points):
img_shape = (img.shape[1], img.shape[0])
src = np.float32(source_points)
dst = np.float32(destination_points)
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(img,M,img_shape, flags = cv2.INTER_LINEAR)
return warped, M, Minv
global left_fit_deque
global right_fit_deque
deque_size = 3
left_fit_deque = []
left_fit_deque = deque(maxlen = deque_size)
right_fit_deque = []
right_fit_deque = deque(maxlen = deque_size)
class Lane():
def __init__(self):
self.llm = []
self.rlm = []
mylane = Lane()
coeffs = []
C0_L = np.zeros(deque_size)
C1_L = np.zeros(deque_size)
C2_L = np.zeros(deque_size)
C0_R = np.zeros(deque_size)
C1_R = np.zeros(deque_size)
C2_R = np.zeros(deque_size)
def polyfit(warped_image, orig_img, Minv):
#def polyfit(warped_image):
# print('Initiating line overlay onto binary warped image')
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(warped_image[warped_image.shape[0]//2:,:], axis=0)
#histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((warped_image, warped_image, warped_image))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(warped_image.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = warped_image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = warped_image.shape[0] - (window+1)*window_height
win_y_high = warped_image.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
#Store the left poly coefficient in a deque for later use
left_fit_deque.append(left_fit)
# Take the deque of polynomial data and extract the three coefficients, avearge them for stability
for idx, coeffs in enumerate(left_fit_deque):
C0_L[idx] = coeffs[0]
C1_L[idx] = coeffs[1]
C2_L[idx] = coeffs[2]
average_C0_L = np.mean(C0_L)
average_C1_L = np.mean(C1_L)
average_C2_L = np.mean(C2_L)
left_fit[0] = average_C0_L
left_fit[1] = average_C1_L
left_fit[2] = average_C2_L
right_fit = np.polyfit(righty, rightx, 2)
#Store the left poly coefficient in a deque for later use
right_fit_deque.append(right_fit)
# Take the deque of polynomial data and extract the three coefficients, avearge them for stability
for idx, coeffs in enumerate(right_fit_deque):
C0_R[idx] = coeffs[0]
C1_R[idx] = coeffs[1]
C2_R[idx] = coeffs[2]
average_C0_R = np.mean(C0_R)
average_C1_R = np.mean(C1_R)
average_C2_R = np.mean(C2_R)
right_fit[0] = average_C0_R
right_fit[1] = average_C1_R
right_fit[2] = average_C2_R
# Generate x and y values for plotting
ploty = np.linspace(0, warped_image.shape[0]-1, warped_image.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
# left_fitx = left_fit_deque[0]*ploty**2 + left_fit_deque[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# plt.figure(figsize = (20,10))
# plt.imshow(out_img)
# plt.plot(left_fitx, ploty, color='blue')
# plt.plot(right_fitx, ploty, color='red')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# plt.show()
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped_image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# =============================================================================
# In this section we calculate the radius of curvature for the warped lines
# =============================================================================
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
# print(left_curverad, right_curverad)
# Example values: 1926.74 1908.48
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# Now our radius of curvature is in meters
# print(left_curverad, 'm', right_curverad, 'm')
# Example values: 632.1 m 626.2 m
# =============================================================================
# Calculate the position from center for the vehicle relative to the left lane
# =============================================================================
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (orig_img.shape[1], orig_img.shape[0]))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(newwarp,'Recording: project_video',(10,50), font, 1,(255,0,0),3,cv2.LINE_AA)
cv2.putText(newwarp,'Road Radius of curvature: {} km'.format(left_curverad/1000),(10,100), font, 1,(255,0,0),3,cv2.LINE_AA)
# =============================================================================
# Add the Section for fitting the radius of curvature to the image
# =============================================================================
vehicle_center = newwarp.shape[1]/2 #assuming that the video feed is from veh center
y_pixels = np.arange(newwarp.shape[0]-10, newwarp.shape[0]+1)
# y_pixels = 719
lx_loc = left_fit_cr[0]*y_pixels**2+left_fit_cr[1]*y_pixels+left_fit_cr[2]
rx_loc = right_fit_cr[0]*y_pixels**2+right_fit_cr[1]*y_pixels+right_fit_cr[2]
lane_center_pixel = (right_fitx[0] + left_fitx[0])/2
vehicle_offset = (vehicle_center - lane_center_pixel)*xm_per_pix
# pct_difference = vehicle_offset/
if vehicle_offset > 0:
cv2.putText(newwarp,'Ego Vehicle is {} meters right of lane center'.format(vehicle_offset),(10,150), font, 1,(255,0,0),3,cv2.LINE_AA)
if vehicle_offset < 0:
cv2.putText(newwarp,'Ego Vehicle is {} meters left of lane center'.format(vehicle_offset),(10,150), font, 1,(255,0,0),3,cv2.LINE_AA)
if vehicle_offset == 0:
cv2.putText(newwarp,'Ego Vehicle is directly on center!! Great job!',(10,150), font, 1,(255,0,0),3,cv2.LINE_AA)
# =============================================================================
# This plots the lane line data for debugging vehicle center
# =============================================================================
# plt.plot(lx_loc,y_pixels,'x')
# plt.title('Left Lane Line Pixel Locations')
# plt.show()
#
# plt.plot(rx_loc,y_pixels,'x')
# plt.title('Right Lane Line Pixel Locations')
# plt.show()
#
# plt.plot(left_fitx,'x')
# plt.plot(right_fitx,'o')
# plt.title('Left Lane and Right Lane overlay, horizontal dir i "y" in image space')
# plt.show()
#
# plt.figure(figsize = (15,15))
# plt.imshow(newwarp)
# plt.show()
#
# Combine the result with the original image
#img = cv2.imread(img)
img = cv2.cvtColor(orig_img,cv2.COLOR_BGR2RGB)
# result = cv2.addWeighted(orig_img, 1, newwarp, 0.3, 0)
result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)
#This is the final overlaid image with the texxto n it
# plt.figure(figsize = (10,10))
# plt.title('final result')
# plt.imshow(result)
# plt.show()
return result, left_fitx, right_fitx, ploty
if run_camera_cal == 1:
#--------------------- CAll functions and initiate camera cal and distortion corrrect-----------------------
#This section calls the camera calibration function
# Call the function to parse through the calibration image array and return
#the base object point, corners and a grascale image for reference size
#***** TURN THIS ON LATER!!!!!! when you want to calibrate the camera
# Make a list of calibration images
image_dir = "C:\\Users\\mrpal\\Documents\\Projects\\CarND-Advanced-Lane-Lines\\camera_cal\\"
images = os.listdir('camera_cal')
corners, imgpoints, objpoints, gray = calibrate_camera(image_dir, images)
##Generate the distortion coefficients and camera matrix, trans vector and rot vector
print('Generating distortion coefficients and camera matrix parameters')
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,gray.shape[::-1], None, None)
#Undistort the images in the test_images folder
image_dir = "C:\\Users\\mrpal\\Documents\\Projects\\CarND-Advanced-Lane-Lines\\test_images\\"
images = os.listdir('test_images')
print('Selected image directory is: {} '.format(image_dir))
print('The images in the directory are: {}' .format(images))
distortion_corrected = distortion_correct(image_dir, images, mtx, dist)
cv2.destroyAllWindows()
#--------------------- CAll functions to initiate a pipeline for image processing----------------------
image_dir = "C:\\Users\\mrpal\\Documents\\Projects\\CarND-Advanced-Lane-Lines\\test_images\\"
images = os.listdir('test_images')
print('Selected image directory is: {} '.format(image_dir))
print('The images in the directory are: {} \n' .format(images))
#print('The images in the directory are: {} \n' .format(images_new))
sobel_kernel = 9
#mag_thresh = [30,255]
#keep it
grad_threshold = [50,150]
sobel_mag = [0,255]
#distortion correct
if len(glob.glob('./test_images/*Distortion*.jpg')) == 0:
print('there are no distortion corrected images in the directory, let us create them')
distortion_corrected = distortion_correct(image_dir, images, mtx, dist)
images = glob.glob('./test_images/*Distortion*.jpg')
def process_image(images):
# for idx, fname in enumerate(images):
img = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)
# img = cv2.cvtColor(images, cv2.COLOR_RGB2BGR)
# orig_image = img
# img = cv2.imread(fname)
# plt.figure(figsize = (20,10))
# plt.imshow(img)
# plt.show()
#pull in the absolute binary gradient data in X and Y
gradx_binary = abs_sobel_image(img,'x',grad_threshold , sobel_kernel)
# plt.figure(figsize = (20,10))
# plt.title('Binary Gradient Thresholding in X direction')
# plt.imshow(gradx_binary, cmap='gray')
# plt.show()
grady_binary = abs_sobel_image(img,'y',grad_threshold , sobel_kernel)
# plt.figure(figsize = (20,10))
# plt.title('Binary Gradient Thresholding in Y direction')
# plt.imshow(grady_binary, cmap='gray')
# plt.show()
#Calculate the Sobel direction gradient binary threshold
dir_binary = sobel_dir_thresh(img, sobel_kernel=15, thresh=(0.6, np.pi/2))
# print(dir_binary.dtype)
# plt.figure(figsize = (20,10))
# plt.title('Binary Sobel (Absolute) Gradient Thresholding')
# plt.imshow(dir_binary, cmap = 'gray')
# mag_binary = sobel_mag_thresh(img, sobel_kernel, mag_thresh= (50, 150))
mag_binary = sobel_mag_thresh(img, sobel_kernel, mag_thresh= (80, 150))
# plt.figure(figsize = (20,10))
# plt.title('Binary Gradient Magnitude Thresholding')
# plt.imshow(mag_binary, cmap='gray')
# mag_binary
#Combine the gradient thresholds into a coherent image, there still may be gaps where color thresholding comes in
combined_binary = np.zeros_like(dir_binary)
# combined_binary[(gradx_binary == 1) | ((mag_binary == 1) | (dir_binary == 1))] = 1
combined_binary[(gradx_binary == 1) | ((mag_binary == 1) & (dir_binary == 1))] = 1
#combined_binary[((gradx_binary == 1) & (grady_binary == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
# plt.figure(figsize = (20,10))
# plt.title('Combined Binary Gradient Thresholding (X,Mag,Dir)')
# plt.imshow(combined_binary, cmap = 'gray')
# plt.show()
binary_color = color_binary(img, 'HLS', color_thresh = [80,255])
# binary_color = color_binary(img, 'HLS', color_thresh = [80,180])
# plt.figure(figsize = (20,10))
# plt.title('Binary Color Thresholding in HLS')
# plt.imshow(binary_color, cmap = 'gray')
# plt.show()
#Visualize the overall combined thresholding on the test images
color_grad_combined = bin_color_gradient(combined_binary , binary_color)
# plt.figure(figsize = (20,10))
# plt.title('Combined color and gradient mag thresholding')
# plt.imshow(color_grad_combined, cmap = 'gray')
# plt.show()
img_size = img.shape
offset = 100
src = np.float32([(200, 720), (580, 480), (720, 480), (1050, 720)])
dst = np.float32([(280, 720), (400, 190), (920, 190), (960, 720)])
destination_points = np.float32([[offset, img_size[1]-offset], [img_size[0]-offset, img_size[1]-offset],
[img_size[0]-offset, offset],
[offset, offset]])
source_points = np.float32(([450,780], [680, 1050], [680,250], [450, 500]))
binary_warped, M, Minv = warp(color_grad_combined,src, dst)
#warped_image_test = warp(img,source_points, destination_points)
# plt.figure(figsize = (20,10))
# plt.imshow(binary_warped, cmap='gray')
# plt.show()
#
#
# import numpy as np
# plt.figure(figsize = (20,10))
# histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# plt.plot(histogram)
# plt.show()
#
#Need the line data to be fed back out
out, left_fitx, right_fitx, ploty = polyfit(binary_warped,img, Minv)
# out = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)
return out
#######--------------------------
##os.system("ffmpeg -i project_video.mp4 -vf fps=15/1 out_%03d.jpg'
Test_Video_dir = os.listdir("test_videos/")
video_output = 'project_video_output.mp4'
clip1 = VideoFileClip("test_videos/project_video.mp4").subclip(13,18)
#clip1 = VideoFileClip("test_videos/project_video.mp4")
clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
clip.write_videofile(video_output, audio=False)
#-------------------------------------------
|
[
"cv2.rectangle",
"numpy.polyfit",
"numpy.hstack",
"numpy.array",
"cv2.warpPerspective",
"sobel_library.abs_sobel_image",
"cv2.destroyAllWindows",
"cv2.calibrateCamera",
"camera_calibration.calibrate_camera",
"numpy.arange",
"numpy.mean",
"os.listdir",
"collections.deque",
"camera_calibration.distortion_correct",
"numpy.max",
"sobel_library.sobel_dir_thresh",
"cv2.addWeighted",
"numpy.linspace",
"numpy.vstack",
"numpy.concatenate",
"moviepy.editor.VideoFileClip",
"glob.glob",
"cv2.fillPoly",
"cv2.getPerspectiveTransform",
"numpy.argmax",
"cv2.putText",
"cv2.cvtColor",
"numpy.int_",
"numpy.int",
"numpy.dstack",
"sobel_library.sobel_mag_thresh",
"numpy.absolute",
"numpy.sum",
"numpy.zeros",
"numpy.zeros_like",
"numpy.float32"
] |
[((2065, 2089), 'collections.deque', 'deque', ([], {'maxlen': 'deque_size'}), '(maxlen=deque_size)\n', (2070, 2089), False, 'from collections import deque\n'), ((2133, 2157), 'collections.deque', 'deque', ([], {'maxlen': 'deque_size'}), '(maxlen=deque_size)\n', (2138, 2157), False, 'from collections import deque\n'), ((2315, 2335), 'numpy.zeros', 'np.zeros', (['deque_size'], {}), '(deque_size)\n', (2323, 2335), True, 'import numpy as np\n'), ((2344, 2364), 'numpy.zeros', 'np.zeros', (['deque_size'], {}), '(deque_size)\n', (2352, 2364), True, 'import numpy as np\n'), ((2373, 2393), 'numpy.zeros', 'np.zeros', (['deque_size'], {}), '(deque_size)\n', (2381, 2393), True, 'import numpy as np\n'), ((2402, 2422), 'numpy.zeros', 'np.zeros', (['deque_size'], {}), '(deque_size)\n', (2410, 2422), True, 'import numpy as np\n'), ((2431, 2451), 'numpy.zeros', 'np.zeros', (['deque_size'], {}), '(deque_size)\n', (2439, 2451), True, 'import numpy as np\n'), ((2460, 2480), 'numpy.zeros', 'np.zeros', (['deque_size'], {}), '(deque_size)\n', (2468, 2480), True, 'import numpy as np\n'), ((20018, 20044), 'os.listdir', 'os.listdir', (['"""test_videos/"""'], {}), "('test_videos/')\n", (20028, 20044), False, 'import os\n'), ((804, 820), 'numpy.zeros_like', 'np.zeros_like', (['S'], {}), '(S)\n', (817, 820), True, 'import numpy as np\n'), ((1153, 1183), 'numpy.zeros_like', 'np.zeros_like', (['binary_gradient'], {}), '(binary_gradient)\n', (1166, 1183), True, 'import numpy as np\n'), ((1355, 1431), 'numpy.array', 'np.array', (['[[(350, 720), (580, 500), (800, 500), (900, 720)]]'], {'dtype': 'np.int32'}), '([[(350, 720), (580, 500), (800, 500), (900, 720)]], dtype=np.int32)\n', (1363, 1431), True, 'import numpy as np\n'), ((1434, 1492), 'cv2.fillPoly', 'cv2.fillPoly', (['binary_output', 'polys', '(0)'], {'lineType': '(8)', 'shift': '(0)'}), '(binary_output, polys, 0, lineType=8, shift=0)\n', (1446, 1492), False, 'import cv2\n'), ((1691, 1716), 'numpy.float32', 'np.float32', (['source_points'], {}), '(source_points)\n', (1701, 1716), True, 'import numpy as np\n'), ((1728, 1758), 'numpy.float32', 'np.float32', (['destination_points'], {}), '(destination_points)\n', (1738, 1758), True, 'import numpy as np\n'), ((1768, 1805), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (1795, 1805), False, 'import cv2\n'), ((1817, 1854), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (1844, 1854), False, 'import cv2\n'), ((1868, 1930), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', 'img_shape'], {'flags': 'cv2.INTER_LINEAR'}), '(img, M, img_shape, flags=cv2.INTER_LINEAR)\n', (1887, 1930), False, 'import cv2\n'), ((2773, 2833), 'numpy.sum', 'np.sum', (['warped_image[warped_image.shape[0] // 2:, :]'], {'axis': '(0)'}), '(warped_image[warped_image.shape[0] // 2:, :], axis=0)\n', (2779, 2833), True, 'import numpy as np\n'), ((3200, 3230), 'numpy.int', 'np.int', (['(histogram.shape[0] / 2)'], {}), '(histogram.shape[0] / 2)\n', (3206, 3230), True, 'import numpy as np\n'), ((3247, 3278), 'numpy.argmax', 'np.argmax', (['histogram[:midpoint]'], {}), '(histogram[:midpoint])\n', (3256, 3278), True, 'import numpy as np\n'), ((3459, 3499), 'numpy.int', 'np.int', (['(warped_image.shape[0] / nwindows)'], {}), '(warped_image.shape[0] / nwindows)\n', (3465, 3499), True, 'import numpy as np\n'), ((3625, 3645), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (3633, 3645), True, 'import numpy as np\n'), ((3662, 3682), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (3670, 3682), True, 'import numpy as np\n'), ((5759, 5789), 'numpy.concatenate', 'np.concatenate', (['left_lane_inds'], {}), '(left_lane_inds)\n', (5773, 5789), True, 'import numpy as np\n'), ((5813, 5844), 'numpy.concatenate', 'np.concatenate', (['right_lane_inds'], {}), '(right_lane_inds)\n', (5827, 5844), True, 'import numpy as np\n'), ((6119, 6146), 'numpy.polyfit', 'np.polyfit', (['lefty', 'leftx', '(2)'], {}), '(lefty, leftx, 2)\n', (6129, 6146), True, 'import numpy as np\n'), ((6560, 6573), 'numpy.mean', 'np.mean', (['C0_L'], {}), '(C0_L)\n', (6567, 6573), True, 'import numpy as np\n'), ((6594, 6607), 'numpy.mean', 'np.mean', (['C1_L'], {}), '(C1_L)\n', (6601, 6607), True, 'import numpy as np\n'), ((6628, 6641), 'numpy.mean', 'np.mean', (['C2_L'], {}), '(C2_L)\n', (6635, 6641), True, 'import numpy as np\n'), ((6775, 6804), 'numpy.polyfit', 'np.polyfit', (['righty', 'rightx', '(2)'], {}), '(righty, rightx, 2)\n', (6785, 6804), True, 'import numpy as np\n'), ((7225, 7238), 'numpy.mean', 'np.mean', (['C0_R'], {}), '(C0_R)\n', (7232, 7238), True, 'import numpy as np\n'), ((7259, 7272), 'numpy.mean', 'np.mean', (['C1_R'], {}), '(C1_R)\n', (7266, 7272), True, 'import numpy as np\n'), ((7293, 7306), 'numpy.mean', 'np.mean', (['C2_R'], {}), '(C2_R)\n', (7300, 7306), True, 'import numpy as np\n'), ((7475, 7539), 'numpy.linspace', 'np.linspace', (['(0)', '(warped_image.shape[0] - 1)', 'warped_image.shape[0]'], {}), '(0, warped_image.shape[0] - 1, warped_image.shape[0])\n', (7486, 7539), True, 'import numpy as np\n'), ((8305, 8349), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (8314, 8349), True, 'import numpy as np\n'), ((8595, 8627), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (8604, 8627), True, 'import numpy as np\n'), ((9157, 9170), 'numpy.max', 'np.max', (['ploty'], {}), '(ploty)\n', (9163, 9170), True, 'import numpy as np\n'), ((9727, 9784), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(left_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, left_fitx * xm_per_pix, 2)\n', (9737, 9784), True, 'import numpy as np\n'), ((9801, 9859), 'numpy.polyfit', 'np.polyfit', (['(ploty * ym_per_pix)', '(right_fitx * xm_per_pix)', '(2)'], {}), '(ploty * ym_per_pix, right_fitx * xm_per_pix, 2)\n', (9811, 9859), True, 'import numpy as np\n'), ((10668, 10745), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'Minv', '(orig_img.shape[1], orig_img.shape[0])'], {}), '(color_warp, Minv, (orig_img.shape[1], orig_img.shape[0]))\n', (10687, 10745), False, 'import cv2\n'), ((10795, 10895), 'cv2.putText', 'cv2.putText', (['newwarp', '"""Recording: project_video"""', '(10, 50)', 'font', '(1)', '(255, 0, 0)', '(3)', 'cv2.LINE_AA'], {}), "(newwarp, 'Recording: project_video', (10, 50), font, 1, (255, 0,\n 0), 3, cv2.LINE_AA)\n", (10806, 10895), False, 'import cv2\n'), ((11385, 11439), 'numpy.arange', 'np.arange', (['(newwarp.shape[0] - 10)', '(newwarp.shape[0] + 1)'], {}), '(newwarp.shape[0] - 10, newwarp.shape[0] + 1)\n', (11394, 11439), True, 'import numpy as np\n'), ((13097, 13138), 'cv2.cvtColor', 'cv2.cvtColor', (['orig_img', 'cv2.COLOR_BGR2RGB'], {}), '(orig_img, cv2.COLOR_BGR2RGB)\n', (13109, 13138), False, 'import cv2\n'), ((13215, 13255), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(img, 1, newwarp, 0.3, 0)\n', (13230, 13255), False, 'import cv2\n'), ((14131, 14155), 'os.listdir', 'os.listdir', (['"""camera_cal"""'], {}), "('camera_cal')\n", (14141, 14155), False, 'import os\n'), ((14209, 14244), 'camera_calibration.calibrate_camera', 'calibrate_camera', (['image_dir', 'images'], {}), '(image_dir, images)\n', (14225, 14244), False, 'from camera_calibration import calibrate_camera, distortion_correct\n'), ((14456, 14527), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (14475, 14527), False, 'import cv2\n'), ((14705, 14730), 'os.listdir', 'os.listdir', (['"""test_images"""'], {}), "('test_images')\n", (14715, 14730), False, 'import os\n'), ((14890, 14938), 'camera_calibration.distortion_correct', 'distortion_correct', (['image_dir', 'images', 'mtx', 'dist'], {}), '(image_dir, images, mtx, dist)\n', (14908, 14938), False, 'from camera_calibration import calibrate_camera, distortion_correct\n'), ((14944, 14967), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (14965, 14967), False, 'import cv2\n'), ((15208, 15233), 'os.listdir', 'os.listdir', (['"""test_images"""'], {}), "('test_images')\n", (15218, 15233), False, 'import os\n'), ((15895, 15938), 'glob.glob', 'glob.glob', (['"""./test_images/*Distortion*.jpg"""'], {}), "('./test_images/*Distortion*.jpg')\n", (15904, 15938), False, 'import glob\n'), ((16037, 16076), 'cv2.cvtColor', 'cv2.cvtColor', (['images', 'cv2.COLOR_BGR2RGB'], {}), '(images, cv2.COLOR_BGR2RGB)\n', (16049, 16076), False, 'import cv2\n'), ((16352, 16407), 'sobel_library.abs_sobel_image', 'abs_sobel_image', (['img', '"""x"""', 'grad_threshold', 'sobel_kernel'], {}), "(img, 'x', grad_threshold, sobel_kernel)\n", (16367, 16407), False, 'from sobel_library import abs_sobel_image, sobel_mag_thresh, sobel_dir_thresh\n'), ((16589, 16644), 'sobel_library.abs_sobel_image', 'abs_sobel_image', (['img', '"""y"""', 'grad_threshold', 'sobel_kernel'], {}), "(img, 'y', grad_threshold, sobel_kernel)\n", (16604, 16644), False, 'from sobel_library import abs_sobel_image, sobel_mag_thresh, sobel_dir_thresh\n'), ((16896, 16959), 'sobel_library.sobel_dir_thresh', 'sobel_dir_thresh', (['img'], {'sobel_kernel': '(15)', 'thresh': '(0.6, np.pi / 2)'}), '(img, sobel_kernel=15, thresh=(0.6, np.pi / 2))\n', (16912, 16959), False, 'from sobel_library import abs_sobel_image, sobel_mag_thresh, sobel_dir_thresh\n'), ((17235, 17292), 'sobel_library.sobel_mag_thresh', 'sobel_mag_thresh', (['img', 'sobel_kernel'], {'mag_thresh': '(80, 150)'}), '(img, sobel_kernel, mag_thresh=(80, 150))\n', (17251, 17292), False, 'from sobel_library import abs_sobel_image, sobel_mag_thresh, sobel_dir_thresh\n'), ((17595, 17620), 'numpy.zeros_like', 'np.zeros_like', (['dir_binary'], {}), '(dir_binary)\n', (17608, 17620), True, 'import numpy as np\n'), ((18783, 18844), 'numpy.float32', 'np.float32', (['[(200, 720), (580, 480), (720, 480), (1050, 720)]'], {}), '([(200, 720), (580, 480), (720, 480), (1050, 720)])\n', (18793, 18844), True, 'import numpy as np\n'), ((18856, 18916), 'numpy.float32', 'np.float32', (['[(280, 720), (400, 190), (920, 190), (960, 720)]'], {}), '([(280, 720), (400, 190), (920, 190), (960, 720)])\n', (18866, 18916), True, 'import numpy as np\n'), ((18955, 19100), 'numpy.float32', 'np.float32', (['[[offset, img_size[1] - offset], [img_size[0] - offset, img_size[1] -\n offset], [img_size[0] - offset, offset], [offset, offset]]'], {}), '([[offset, img_size[1] - offset], [img_size[0] - offset, img_size\n [1] - offset], [img_size[0] - offset, offset], [offset, offset]])\n', (18965, 19100), True, 'import numpy as np\n'), ((19193, 19254), 'numpy.float32', 'np.float32', (['([450, 780], [680, 1050], [680, 250], [450, 500])'], {}), '(([450, 780], [680, 1050], [680, 250], [450, 500]))\n', (19203, 19254), True, 'import numpy as np\n'), ((668, 704), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HLS'], {}), '(img, cv2.COLOR_RGB2HLS)\n', (680, 704), False, 'import cv2\n'), ((2990, 3043), 'numpy.dstack', 'np.dstack', (['(warped_image, warped_image, warped_image)'], {}), '((warped_image, warped_image, warped_image))\n', (2999, 3043), True, 'import numpy as np\n'), ((3298, 3329), 'numpy.argmax', 'np.argmax', (['histogram[midpoint:]'], {}), '(histogram[midpoint:])\n', (3307, 3329), True, 'import numpy as np\n'), ((4619, 4719), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xleft_low, win_y_low)', '(win_xleft_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xleft_low, win_y_low), (win_xleft_high,\n win_y_high), (0, 255, 0), 2)\n', (4632, 4719), False, 'import cv2\n'), ((4729, 4831), 'cv2.rectangle', 'cv2.rectangle', (['out_img', '(win_xright_low, win_y_low)', '(win_xright_high, win_y_high)', '(0, 255, 0)', '(2)'], {}), '(out_img, (win_xright_low, win_y_low), (win_xright_high,\n win_y_high), (0, 255, 0), 2)\n', (4742, 4831), False, 'import cv2\n'), ((8713, 8727), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (8720, 8727), True, 'import numpy as np\n'), ((9247, 9275), 'numpy.absolute', 'np.absolute', (['(2 * left_fit[0])'], {}), '(2 * left_fit[0])\n', (9258, 9275), True, 'import numpy as np\n'), ((9353, 9382), 'numpy.absolute', 'np.absolute', (['(2 * right_fit[0])'], {}), '(2 * right_fit[0])\n', (9364, 9382), True, 'import numpy as np\n'), ((9993, 10024), 'numpy.absolute', 'np.absolute', (['(2 * left_fit_cr[0])'], {}), '(2 * left_fit_cr[0])\n', (10004, 10024), True, 'import numpy as np\n'), ((10119, 10151), 'numpy.absolute', 'np.absolute', (['(2 * right_fit_cr[0])'], {}), '(2 * right_fit_cr[0])\n', (10130, 10151), True, 'import numpy as np\n'), ((12177, 12300), 'cv2.putText', 'cv2.putText', (['newwarp', '"""Ego Vehicle is directly on center!! Great job!"""', '(10, 150)', 'font', '(1)', '(255, 0, 0)', '(3)', 'cv2.LINE_AA'], {}), "(newwarp, 'Ego Vehicle is directly on center!! Great job!', (10,\n 150), font, 1, (255, 0, 0), 3, cv2.LINE_AA)\n", (12188, 12300), False, 'import cv2\n'), ((15826, 15874), 'camera_calibration.distortion_correct', 'distortion_correct', (['image_dir', 'images', 'mtx', 'dist'], {}), '(image_dir, images, mtx, dist)\n', (15844, 15874), False, 'from camera_calibration import calibrate_camera, distortion_correct\n'), ((20097, 20143), 'moviepy.editor.VideoFileClip', 'VideoFileClip', (['"""test_videos/project_video.mp4"""'], {}), "('test_videos/project_video.mp4')\n", (20110, 20143), False, 'from moviepy.editor import VideoFileClip\n'), ((8242, 8269), 'numpy.zeros_like', 'np.zeros_like', (['warped_image'], {}), '(warped_image)\n', (8255, 8269), True, 'import numpy as np\n'), ((15647, 15690), 'glob.glob', 'glob.glob', (['"""./test_images/*Distortion*.jpg"""'], {}), "('./test_images/*Distortion*.jpg')\n", (15656, 15690), False, 'import glob\n'), ((5531, 5564), 'numpy.mean', 'np.mean', (['nonzerox[good_left_inds]'], {}), '(nonzerox[good_left_inds])\n', (5538, 5564), True, 'import numpy as np\n'), ((5654, 5688), 'numpy.mean', 'np.mean', (['nonzerox[good_right_inds]'], {}), '(nonzerox[good_right_inds])\n', (5661, 5688), True, 'import numpy as np\n'), ((8466, 8495), 'numpy.vstack', 'np.vstack', (['[left_fitx, ploty]'], {}), '([left_fitx, ploty])\n', (8475, 8495), True, 'import numpy as np\n'), ((8549, 8579), 'numpy.vstack', 'np.vstack', (['[right_fitx, ploty]'], {}), '([right_fitx, ploty])\n', (8558, 8579), True, 'import numpy as np\n')]
|
from IMLearn.learners import UnivariateGaussian, MultivariateGaussian
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
pio.templates.default = "simple_white"
SAMPLES_NUM = 1000
LEFT_CIRCLE = '('
RIGHT_CIRCLE = ')'
COMMA = ', '
GRAPH_SIZE = 500
HEATMAP_SIZE = 700
def test_univariate_gaussian():
# Question 1 - Draw samples and print fitted model
uni = UnivariateGaussian()
mu, sigma = 10, 1
s = np.random.normal(mu, sigma, SAMPLES_NUM)
res = uni.fit(s)
print(LEFT_CIRCLE + str(res.mu_) + COMMA + str(res.var_) + RIGHT_CIRCLE)
# Question 2 - Empirically showing sample mean is consistent
ms = np.linspace(10, 1000, 100).astype(int)
diff = []
for m in ms:
diff.append(abs(uni.fit(s[0:m]).mu_ - mu))
go.Figure([go.Scatter(x=ms, y=diff, mode='markers+lines')],
layout=go.Layout(title=r"$\text{ Distance between estimated "
r"and true value of the expectation as a function of samples number}$",
xaxis_title="$m\\text{ - number of samples}$",
yaxis_title="r$distance$",
height=GRAPH_SIZE)).show()
# Question 3 - Plotting Empirical PDF of fitted model
pdf_values = uni.pdf(s)
go.Figure([go.Scatter(x=s, y=pdf_values, mode='markers')],
layout=go.Layout(title=r"$\text{ Sampled values distribution}$",
xaxis_title="$m\\text{ - sampled values}$",
yaxis_title="r$ pdf - values$",
height=GRAPH_SIZE)).show()
# As I expected, the samples' distribution is gaussian around the expectation (10)
def test_multivariate_gaussian():
# Question 4 - Draw samples and print fitted model
multi_uni = MultivariateGaussian()
mu = np.array([0, 0, 4, 0])
sigma = np.asarray([[1, 0.2, 0, 0.5],
[0.2, 2, 0, 0],
[0, 0, 1, 0],
[0.5, 0, 0, 1]])
s = np.random.multivariate_normal(mu, sigma, SAMPLES_NUM)
res = multi_uni.fit(s)
print(str(res.mu_) + '\n' + str(res.cov_))
# Question 5 - Likelihood evaluation
ms = np.linspace(-10, 10, 200)
logs = np.zeros((200, 200))
i = 0
j = 0
for f1 in ms:
for f3 in ms:
logs[i][j] = (MultivariateGaussian.log_likelihood(np.transpose([f1, 0, f3, 0]), sigma, s))
j += 1
j = 0
i += 1
go.Figure([go.Heatmap(x=ms, y=ms, z=np.asarray(logs), colorbar=dict(title="Log Likelihood"))],
layout=go.Layout(title=
r"$\text{ Log Likelihood as function of "
r"different expectancies}$",
width=HEATMAP_SIZE, height=HEATMAP_SIZE,
xaxis_title="$f3$", yaxis_title="$f1$")).show()
# Question 6 - Maximum likelihood
index = np.argmax(logs)
row = int(index / 200)
col = int(index % 200)
print("Maximum value is achieved for the pair: f1 = " + str(round(ms[row], 3)) + " f3 = " + str(round(ms[col], 3)))
if __name__ == '__main__':
np.random.seed(0)
test_univariate_gaussian()
test_multivariate_gaussian()
|
[
"numpy.random.normal",
"numpy.transpose",
"plotly.graph_objects.Layout",
"numpy.random.multivariate_normal",
"numpy.asarray",
"numpy.argmax",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"plotly.graph_objects.Scatter",
"numpy.random.seed",
"IMLearn.learners.UnivariateGaussian",
"IMLearn.learners.MultivariateGaussian"
] |
[((392, 412), 'IMLearn.learners.UnivariateGaussian', 'UnivariateGaussian', ([], {}), '()\n', (410, 412), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((443, 483), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma', 'SAMPLES_NUM'], {}), '(mu, sigma, SAMPLES_NUM)\n', (459, 483), True, 'import numpy as np\n'), ((1848, 1870), 'IMLearn.learners.MultivariateGaussian', 'MultivariateGaussian', ([], {}), '()\n', (1868, 1870), False, 'from IMLearn.learners import UnivariateGaussian, MultivariateGaussian\n'), ((1880, 1902), 'numpy.array', 'np.array', (['[0, 0, 4, 0]'], {}), '([0, 0, 4, 0])\n', (1888, 1902), True, 'import numpy as np\n'), ((1915, 1991), 'numpy.asarray', 'np.asarray', (['[[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]]'], {}), '([[1, 0.2, 0, 0.5], [0.2, 2, 0, 0], [0, 0, 1, 0], [0.5, 0, 0, 1]])\n', (1925, 1991), True, 'import numpy as np\n'), ((2066, 2119), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'sigma', 'SAMPLES_NUM'], {}), '(mu, sigma, SAMPLES_NUM)\n', (2095, 2119), True, 'import numpy as np\n'), ((2246, 2271), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(200)'], {}), '(-10, 10, 200)\n', (2257, 2271), True, 'import numpy as np\n'), ((2283, 2303), 'numpy.zeros', 'np.zeros', (['(200, 200)'], {}), '((200, 200))\n', (2291, 2303), True, 'import numpy as np\n'), ((3176, 3191), 'numpy.argmax', 'np.argmax', (['logs'], {}), '(logs)\n', (3185, 3191), True, 'import numpy as np\n'), ((3399, 3416), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3413, 3416), True, 'import numpy as np\n'), ((658, 684), 'numpy.linspace', 'np.linspace', (['(10)', '(1000)', '(100)'], {}), '(10, 1000, 100)\n', (669, 684), True, 'import numpy as np\n'), ((2426, 2454), 'numpy.transpose', 'np.transpose', (['[f1, 0, f3, 0]'], {}), '([f1, 0, f3, 0])\n', (2438, 2454), True, 'import numpy as np\n'), ((796, 842), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'ms', 'y': 'diff', 'mode': '"""markers+lines"""'}), "(x=ms, y=diff, mode='markers+lines')\n", (806, 842), True, 'import plotly.graph_objects as go\n'), ((866, 1096), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': '"""$\\\\text{ Distance between estimated and true value of the expectation as a function of samples number}$"""', 'xaxis_title': '"""$m\\\\text{ - number of samples}$"""', 'yaxis_title': '"""r$distance$"""', 'height': 'GRAPH_SIZE'}), "(title=\n '$\\\\text{ Distance between estimated and true value of the expectation as a function of samples number}$'\n , xaxis_title='$m\\\\text{ - number of samples}$', yaxis_title=\n 'r$distance$', height=GRAPH_SIZE)\n", (875, 1096), True, 'import plotly.graph_objects as go\n'), ((1328, 1373), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 's', 'y': 'pdf_values', 'mode': '"""markers"""'}), "(x=s, y=pdf_values, mode='markers')\n", (1338, 1373), True, 'import plotly.graph_objects as go\n'), ((1397, 1559), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': '"""$\\\\text{ Sampled values distribution}$"""', 'xaxis_title': '"""$m\\\\text{ - sampled values}$"""', 'yaxis_title': '"""r$ pdf - values$"""', 'height': 'GRAPH_SIZE'}), "(title='$\\\\text{ Sampled values distribution}$', xaxis_title=\n '$m\\\\text{ - sampled values}$', yaxis_title='r$ pdf - values$', height=\n GRAPH_SIZE)\n", (1406, 1559), True, 'import plotly.graph_objects as go\n'), ((2687, 2863), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': '"""$\\\\text{ Log Likelihood as function of different expectancies}$"""', 'width': 'HEATMAP_SIZE', 'height': 'HEATMAP_SIZE', 'xaxis_title': '"""$f3$"""', 'yaxis_title': '"""$f1$"""'}), "(title=\n '$\\\\text{ Log Likelihood as function of different expectancies}$',\n width=HEATMAP_SIZE, height=HEATMAP_SIZE, xaxis_title='$f3$',\n yaxis_title='$f1$')\n", (2696, 2863), True, 'import plotly.graph_objects as go\n'), ((2556, 2572), 'numpy.asarray', 'np.asarray', (['logs'], {}), '(logs)\n', (2566, 2572), True, 'import numpy as np\n')]
|
import csv
import random
from functools import partial
from typing import Callable, Optional
from pdb import set_trace as st
import os
import random
import pandas as pd
from typing import Any, Callable, Dict, Iterable, List, Tuple, Union
import numpy as np
import tensorflow as tf
from foolbox.attacks import (
FGSM,
Attack,
DeepFoolAttack,
IterativeGradientSignAttack,
SaliencyMapAttack,
)
# from foolbox.criteria import TargetClass
# from foolbox.models import TensorFlowModel
from tensorflow.python.training import saver
from tensorflow.python.training.session_manager import SessionManager
import tensorflow as tf
import numpy as np
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
from model.config import LENET
from model import LeNet
import nninst_mode as mode
from dataset import mnist
from dataset.config import MNIST_TRAIN, MNIST_PATH
from dataset.mnist_transforms import *
from trace.lenet_mnist_class_trace_v2 import (
data_config,
)
from trace.common import (
class_trace,
)
from tf_utils import new_session_config
from nninst_statistics import calc_trace_side_overlap
from nninst_trace import TraceKey
from nninst_utils.numpy import arg_approx
from nninst_utils.ray import ray_init
from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath
from .common import get_overlay_summary, clean_overlap_ratio, \
translation_overlap_ratio, attack_overlap_ratio, \
lenet_mnist_example
from .cw_attack import cw_generate_adversarial_example
from .eval_mnist import foolbox_generate_adversarial_example
from .cw_attacks import CarliniL2
from nninst_graph import AttrMap, Graph, GraphAttrKey
from nninst_utils.ray import ray_iter
from tf_graph import (
MaskWeightWithTraceHook,
model_fn_with_fetch_hook,
)
from trace.common import (
get_predicted_value,
get_rank,
predict,
reconstruct_class_trace_from_tf,
reconstruct_trace_from_tf,
reconstruct_trace_from_tf_brute_force,
)
from .analyse_class_trace import reconstruct_edge
# Model config
model_label = "augmentation"
model_dir = f"result/lenet/model_{model_label}"
# Trace config
trace_dir = f"{model_dir}/traces"
trace_name = "noop"
# Result dir
result_name = "test"
key = TraceKey.POINT
# Result dir
key_name = key.split('.')[1]
# reduce_mode includes output, channel, none
reduce_mode = "none"
result_dir = f"{model_dir}/conv_point_NOT/{reduce_mode}_{trace_name}_attack_overlap"
# result_dir = f"result/lenet/test"
images_per_class = 100
attack_name = "FGSM"
attacks = {
"FGSM": [FGSM],
"BIM": [IterativeGradientSignAttack],
"JSMA": [SaliencyMapAttack],
"DeepFool": [DeepFoolAttack],
# "DeepFool_full": [DeepFoolAttack, dict(subsample=None)],
# "CWL2": [CarliniL2],
}
adversarial_label = 1
normal_label = -1
class_trace_fn=lambda class_id: lenet_mnist_class_trace(
class_id,
threshold,
label=model_label,
trace_dir = trace_dir,
)
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
def reconstruct_point(
trace,
graph,
key,
node_name,
):
attrs = trace.nodes[node_name]
def to_bitmap(shape, attr):
mask = np.zeros(np.prod(shape), dtype=np.int8)
mask[TraceKey.to_array(attr)] = 1
return mask.reshape(shape)
if key in attrs:
return to_bitmap(attrs[key + "_shape"], attrs[key])
else:
for attr_name, attr in attrs.items():
if attr_name.startswith(TraceKey.POINT + ".") and attr is not None:
return to_bitmap(attrs[TraceKey.POINT_SHAPE], attr)
RuntimeError(f"Key not found")
def filter_point_by_key(
trace: AttrMap,
key: str =TraceKey.POINT,
graph = LENET.network_class.graph().load(),
):
reconstruct_point_fn = partial(
reconstruct_point,
trace,
graph,
key,
)
op_to_mask = {}
# print(trace.nodes.keys())
for node_name in sorted(trace.nodes):
# print(f"{node_name}: {trace.nodes[node_name].keys()}")
if key in trace.nodes[node_name]:
op_to_mask[node_name] = reconstruct_point_fn(node_name)
# for op in op_to_mask:
# print(f"{op}: {op_to_mask[op].shape}")
# st()
return op_to_mask
def reduce_edge_mask(edge_mask: AttrMap, reduce_mode="none"):
reduced_edge = {}
for node_name in edge_mask:
# shape of edge (Ci, Hk, Wk, Co, Ho, Wo)
edge = edge_mask[node_name]
if "conv2d" in node_name:
if reduce_mode == "channel":
edge_sum = edge_mask[node_name].sum(0)
edge_sum[edge_sum>0] = 1
elif reduce_mode == "output":
edge_sum = edge_mask[node_name].sum(-1).sum(-1)
edge_sum[edge_sum>0] = 1
else:
edge_sum = edge_mask[node_name]
else:
edge_sum = edge
reduced_edge[node_name] = edge_sum
return reduced_edge
def detect_by_reduced_edge(class_trace, trace, reduce_mode = "none"):
class_masks = filter_point_by_key(
class_trace,
key = key
)
sample_masks = filter_point_by_key(
trace,
key = key
)
class_masks = reduce_edge_mask(class_masks, reduce_mode = reduce_mode)
sample_masks = reduce_edge_mask(sample_masks, reduce_mode = reduce_mode)
is_adversarial = False
for node_name in class_masks:
if "conv2d" not in node_name or "Relu" not in node_name:
continue
class_mask = class_masks[node_name]
sample_mask = sample_masks[node_name]
class_zero = class_mask==0
sample_zero_sum = sample_mask[class_zero].sum()
if sample_zero_sum>0:
is_adversarial = True
if is_adversarial:
return adversarial_label
else:
return normal_label
# Compute the mean overlap ratio of attacked image
def attack_reduced_edge_detection(
attack_name: str,
attack_fn,
generate_adversarial_fn,
class_trace_fn: Callable[[int], IOAction[AttrMap]],
select_fn: Callable[[np.ndarray], np.ndarray],
overlap_fn: Callable[[AttrMap, AttrMap, str], float],
path: str,
per_channel: bool = False,
per_node: bool = False,
images_per_class: int = 1,
num_gpus: float = 0.2,
model_dir = "result/lenet/model_augmentation",
transforms = None,
transform_name = "noop",
reduce_mode = "none",
**kwargs,
):
def get_overlap_ratio() -> pd.DataFrame:
def get_row(class_id: int, image_id: int) -> Dict[str, Any]:
nonlocal model_dir
mode.check(False)
data_dir = abspath(MNIST_PATH)
model_dir = abspath(model_dir)
ckpt_dir = f"{model_dir}/ckpts"
create_model = lambda: LeNet(data_format="channels_first")
graph = LeNet.graph().load()
model_fn = partial(
model_fn_with_fetch_hook,
create_model=create_model, graph=graph
)
predicted_label = predict(
create_model=create_model,
input_fn=lambda: mnist.test(data_dir)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
model_dir=ckpt_dir,
)
if predicted_label != class_id:
return [{}] if per_node else {}
adversarial_example = lenet_mnist_example(
attack_name=attack_name,
attack_fn=attack_fn,
generate_adversarial_fn=generate_adversarial_fn,
class_id=class_id,
image_id=image_id,
# model_dir not ckpt_dir
model_dir=model_dir,
transforms = transforms,
transform_name = transform_name,
mode = "test",
).load()
if adversarial_example is None:
return [{}] if per_node else {}
adversarial_predicted_label = predict(
create_model=create_model,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
model_dir=ckpt_dir,
)
if predicted_label == adversarial_predicted_label:
return [{}] if per_node else {}
trace = reconstruct_trace_from_tf(
class_id=class_id,
model_fn=model_fn,
input_fn=lambda: mnist.test(data_dir, transforms=transforms)
.filter(
lambda image, label: tf.equal(
tf.convert_to_tensor(class_id, dtype=tf.int32), label
)
)
.skip(image_id)
.take(1)
.batch(1),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
if trace is None:
return [{}] if per_node else {}
adversarial_trace = reconstruct_trace_from_tf_brute_force(
model_fn=model_fn,
input_fn=lambda: tf.data.Dataset.from_tensors(
mnist.normalize(adversarial_example)
),
select_fn=select_fn,
model_dir=ckpt_dir,
per_channel=per_channel,
)[0]
adversarial_label = adversarial_trace.attrs[GraphAttrKey.PREDICT]
row = {
"image_id": image_id,
"class_id": class_id,
"original.prediction":
detect_by_reduced_edge(
class_trace_fn(class_id).load(),
trace,
reduce_mode,
),
"adversarial.prediction":
detect_by_reduced_edge(
class_trace_fn(adversarial_label).load(),
adversarial_trace,
reduce_mode,
),
}
return row
detections = ray_iter(
get_row,
(
(class_id, image_id)
for image_id in range(0, images_per_class)
for class_id in range(0, 10)
),
# ((-1, image_id) for image_id in range(mnist_info.test().size)),
chunksize=1,
out_of_order=True,
num_gpus=num_gpus,
)
traces = [detection for detection in detections if len(detection) != 0]
return pd.DataFrame(traces)
return CsvIOAction(path, init_fn=get_overlap_ratio)
def attack_transform_overlap(attack_name,
transform_name,
transforms,
reduce_mode = "none",
result_dir = "result/lenet/9transform_attack_overlap"):
name = attack_name+'_'+transform_name
lenet_mnist_class_trace = class_trace(
trace_name,
model_config=LENET,
data_config=data_config,
)
threshold = 0.5
# DeepFool will shutdown when num_gpu<0.2
num_gpus = 0.2
overlap_fn = calc_trace_side_overlap
per_channel = False
path = os.path.join(result_dir, f"{name}_overlap.csv")
# print(f"Computing {name}")
# lenet_overlap_ratio = attack_reduced_edge_detection_count_violation(
lenet_overlap_ratio = attack_reduced_edge_detection(
attack_name=attack_name,
attack_fn=attacks[attack_name][0],
generate_adversarial_fn=cw_generate_adversarial_example
if attack_name.startswith("CW")
else foolbox_generate_adversarial_example,
class_trace_fn=lambda class_id: lenet_mnist_class_trace(
class_id,
threshold,
label=model_label,
trace_dir = trace_dir,
),
select_fn=lambda input: arg_approx(input, threshold),
overlap_fn=overlap_fn,
path=path,
per_channel=per_channel,
preprocessing=(0.1307, 0.3081),
image_size=28,
class_num=10,
norm_fn=mnist.normalize,
data_format="channels_first",
**(attacks[attack_name][1] if len(attacks[attack_name]) == 2 else {}),
images_per_class=images_per_class,
model_dir=model_dir,
num_gpus = num_gpus,
transforms = transforms,
transform_name = transform_name,
reduce_mode = reduce_mode,
)
lenet_overlap_ratio.save()
return lenet_overlap_ratio.load()
def compute_accuracy(trace_frame):
adversarial_metric = trace_frame["adversarial.prediction"]
original_metric = trace_frame["original.prediction"]
predictions = np.concatenate([adversarial_metric, original_metric])
row_filter = np.isfinite(predictions)
labels = np.concatenate(
[
np.repeat(1, adversarial_metric.shape[0]),
np.repeat(-1, original_metric.shape[0]),
]
)
labels = labels[row_filter]
predictions = predictions[row_filter]
fpr, tpr, thresholds = metrics.roc_curve(labels, predictions)
roc_auc = metrics.auc(fpr, tpr)
return fpr, tpr, roc_auc
def draw_attack_transform_roc(exp_to_roc, save_name, result_dir):
plt.title('ROC')
detection_results = {}
for exp_name, item in exp_to_roc.items():
fpr, tpr, roc_auc, color = item
print(f"{exp_name}: fpr={fpr}, tpr={tpr}")
plt.plot(fpr, tpr,color,label=f"{exp_name}_AUC={roc_auc:.2f}")
detection_results[exp_name] = [fpr, tpr]
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.ylabel('TPR')
plt.xlabel('FPR')
path = os.path.join(result_dir, f"{save_name}.png")
plt.savefig(path)
path = os.path.join(result_dir, f"{save_name}.txt")
with open(path, "w") as f:
for name in detection_results:
print(f"{exp_name}: fpr={fpr}, tpr={tpr}", file=f)
def attack_exp():
exp_to_roc = {}
os.makedirs(result_dir, exist_ok=True)
for transforms, transform_name, color in [
[None, "noop", 'b'],
# [Translate(dx=-5,dy=-5), "leftup", 'g'],
# [Translate(dx=5,dy=5), "rightdown", 'c'],
# [Translate(dx=-5), "left", 'y'],
# [Translate(dy=-5), "up", 'm'],
]:
exp_name = attack_name+"_"+transform_name
print(f"Computing {exp_name}")
trace_frame = attack_transform_overlap(attack_name,
transform_name,
transforms,
reduce_mode = reduce_mode,
result_dir=result_dir)
exp_to_roc[exp_name] = compute_accuracy(trace_frame) + (color,)
draw_attack_transform_roc(exp_to_roc,
save_name=attack_name,
result_dir=result_dir)
if __name__ == "__main__":
# mode.debug()
mode.local()
# ray_init("gpu")
ray_init(
log_to_driver=False
)
tf.set_random_seed(3)
np.random.seed(3)
random.seed(3)
attack_exp()
|
[
"numpy.prod",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"model.config.LENET.network_class.graph",
"nninst_utils.numpy.arg_approx",
"sklearn.metrics.roc_curve",
"numpy.isfinite",
"nninst_utils.fs.abspath",
"nninst_utils.ray.ray_init",
"matplotlib.pyplot.switch_backend",
"tensorflow.set_random_seed",
"nninst_mode.local",
"numpy.repeat",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"nninst_mode.check",
"numpy.random.seed",
"numpy.concatenate",
"pandas.DataFrame",
"model.LeNet.graph",
"tensorflow.convert_to_tensor",
"dataset.mnist.test",
"nninst_utils.fs.CsvIOAction",
"matplotlib.pyplot.savefig",
"trace.common.class_trace",
"model.LeNet",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"dataset.mnist.normalize",
"os.makedirs",
"os.path.join",
"random.seed",
"functools.partial",
"nninst_trace.TraceKey.to_array"
] |
[((724, 749), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""Agg"""'], {}), "('Agg')\n", (742, 749), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3079), 'trace.common.class_trace', 'class_trace', (['trace_name'], {'model_config': 'LENET', 'data_config': 'data_config'}), '(trace_name, model_config=LENET, data_config=data_config)\n', (3022, 3079), False, 'from trace.common import class_trace\n'), ((3961, 4006), 'functools.partial', 'partial', (['reconstruct_point', 'trace', 'graph', 'key'], {}), '(reconstruct_point, trace, graph, key)\n', (3968, 4006), False, 'from functools import partial\n'), ((11111, 11155), 'nninst_utils.fs.CsvIOAction', 'CsvIOAction', (['path'], {'init_fn': 'get_overlap_ratio'}), '(path, init_fn=get_overlap_ratio)\n', (11122, 11155), False, 'from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath\n'), ((11492, 11560), 'trace.common.class_trace', 'class_trace', (['trace_name'], {'model_config': 'LENET', 'data_config': 'data_config'}), '(trace_name, model_config=LENET, data_config=data_config)\n', (11503, 11560), False, 'from trace.common import class_trace\n'), ((11853, 11900), 'os.path.join', 'os.path.join', (['result_dir', 'f"""{name}_overlap.csv"""'], {}), "(result_dir, f'{name}_overlap.csv')\n", (11865, 11900), False, 'import os\n'), ((13327, 13380), 'numpy.concatenate', 'np.concatenate', (['[adversarial_metric, original_metric]'], {}), '([adversarial_metric, original_metric])\n', (13341, 13380), True, 'import numpy as np\n'), ((13398, 13422), 'numpy.isfinite', 'np.isfinite', (['predictions'], {}), '(predictions)\n', (13409, 13422), True, 'import numpy as np\n'), ((13687, 13725), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['labels', 'predictions'], {}), '(labels, predictions)\n', (13704, 13725), True, 'import sklearn.metrics as metrics\n'), ((13740, 13761), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (13751, 13761), True, 'import sklearn.metrics as metrics\n'), ((13863, 13879), 'matplotlib.pyplot.title', 'plt.title', (['"""ROC"""'], {}), "('ROC')\n", (13872, 13879), True, 'import matplotlib.pyplot as plt\n'), ((14170, 14199), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (14180, 14199), True, 'import matplotlib.pyplot as plt\n'), ((14204, 14235), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (14212, 14235), True, 'import matplotlib.pyplot as plt\n'), ((14236, 14253), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TPR"""'], {}), "('TPR')\n", (14246, 14253), True, 'import matplotlib.pyplot as plt\n'), ((14258, 14275), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FPR"""'], {}), "('FPR')\n", (14268, 14275), True, 'import matplotlib.pyplot as plt\n'), ((14288, 14332), 'os.path.join', 'os.path.join', (['result_dir', 'f"""{save_name}.png"""'], {}), "(result_dir, f'{save_name}.png')\n", (14300, 14332), False, 'import os\n'), ((14337, 14354), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (14348, 14354), True, 'import matplotlib.pyplot as plt\n'), ((14367, 14411), 'os.path.join', 'os.path.join', (['result_dir', 'f"""{save_name}.txt"""'], {}), "(result_dir, f'{save_name}.txt')\n", (14379, 14411), False, 'import os\n'), ((14589, 14627), 'os.makedirs', 'os.makedirs', (['result_dir'], {'exist_ok': '(True)'}), '(result_dir, exist_ok=True)\n', (14600, 14627), False, 'import os\n'), ((15572, 15584), 'nninst_mode.local', 'mode.local', ([], {}), '()\n', (15582, 15584), True, 'import nninst_mode as mode\n'), ((15612, 15641), 'nninst_utils.ray.ray_init', 'ray_init', ([], {'log_to_driver': '(False)'}), '(log_to_driver=False)\n', (15620, 15641), False, 'from nninst_utils.ray import ray_init\n'), ((15661, 15682), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(3)'], {}), '(3)\n', (15679, 15682), True, 'import tensorflow as tf\n'), ((15687, 15704), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (15701, 15704), True, 'import numpy as np\n'), ((15709, 15723), 'random.seed', 'random.seed', (['(3)'], {}), '(3)\n', (15720, 15723), False, 'import random\n'), ((11078, 11098), 'pandas.DataFrame', 'pd.DataFrame', (['traces'], {}), '(traces)\n', (11090, 11098), True, 'import pandas as pd\n'), ((14053, 14117), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', 'color'], {'label': 'f"""{exp_name}_AUC={roc_auc:.2f}"""'}), "(fpr, tpr, color, label=f'{exp_name}_AUC={roc_auc:.2f}')\n", (14061, 14117), True, 'import matplotlib.pyplot as plt\n'), ((3357, 3371), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3364, 3371), True, 'import numpy as np\n'), ((3401, 3424), 'nninst_trace.TraceKey.to_array', 'TraceKey.to_array', (['attr'], {}), '(attr)\n', (3418, 3424), False, 'from nninst_trace import TraceKey\n'), ((3891, 3918), 'model.config.LENET.network_class.graph', 'LENET.network_class.graph', ([], {}), '()\n', (3916, 3918), False, 'from model.config import LENET\n'), ((6819, 6836), 'nninst_mode.check', 'mode.check', (['(False)'], {}), '(False)\n', (6829, 6836), True, 'import nninst_mode as mode\n'), ((6860, 6879), 'nninst_utils.fs.abspath', 'abspath', (['MNIST_PATH'], {}), '(MNIST_PATH)\n', (6867, 6879), False, 'from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath\n'), ((6904, 6922), 'nninst_utils.fs.abspath', 'abspath', (['model_dir'], {}), '(model_dir)\n', (6911, 6922), False, 'from nninst_utils.fs import ensure_dir, IOAction, CsvIOAction, abspath\n'), ((7102, 7175), 'functools.partial', 'partial', (['model_fn_with_fetch_hook'], {'create_model': 'create_model', 'graph': 'graph'}), '(model_fn_with_fetch_hook, create_model=create_model, graph=graph)\n', (7109, 7175), False, 'from functools import partial\n'), ((13474, 13515), 'numpy.repeat', 'np.repeat', (['(1)', 'adversarial_metric.shape[0]'], {}), '(1, adversarial_metric.shape[0])\n', (13483, 13515), True, 'import numpy as np\n'), ((13529, 13568), 'numpy.repeat', 'np.repeat', (['(-1)', 'original_metric.shape[0]'], {}), '(-1, original_metric.shape[0])\n', (13538, 13568), True, 'import numpy as np\n'), ((7002, 7037), 'model.LeNet', 'LeNet', ([], {'data_format': '"""channels_first"""'}), "(data_format='channels_first')\n", (7007, 7037), False, 'from model import LeNet\n'), ((12518, 12546), 'nninst_utils.numpy.arg_approx', 'arg_approx', (['input', 'threshold'], {}), '(input, threshold)\n', (12528, 12546), False, 'from nninst_utils.numpy import arg_approx\n'), ((7058, 7071), 'model.LeNet.graph', 'LeNet.graph', ([], {}), '()\n', (7069, 7071), False, 'from model import LeNet\n'), ((8539, 8575), 'dataset.mnist.normalize', 'mnist.normalize', (['adversarial_example'], {}), '(adversarial_example)\n', (8554, 8575), False, 'from dataset import mnist\n'), ((9630, 9666), 'dataset.mnist.normalize', 'mnist.normalize', (['adversarial_example'], {}), '(adversarial_example)\n', (9645, 9666), False, 'from dataset import mnist\n'), ((7338, 7358), 'dataset.mnist.test', 'mnist.test', (['data_dir'], {}), '(data_dir)\n', (7348, 7358), False, 'from dataset import mnist\n'), ((7459, 7505), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['class_id'], {'dtype': 'tf.int32'}), '(class_id, dtype=tf.int32)\n', (7479, 7505), True, 'import tensorflow as tf\n'), ((8908, 8951), 'dataset.mnist.test', 'mnist.test', (['data_dir'], {'transforms': 'transforms'}), '(data_dir, transforms=transforms)\n', (8918, 8951), False, 'from dataset import mnist\n'), ((9052, 9098), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['class_id'], {'dtype': 'tf.int32'}), '(class_id, dtype=tf.int32)\n', (9072, 9098), True, 'import tensorflow as tf\n')]
|
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 2
num_adversaries = 1
num_landmarks = 5
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
if i < num_adversaries:
agent.adversary = True
agent.color = np.array([0.75, 0.25, 0.25])
else:
agent.adversary = False
agent.color = np.array([0.25, 0.25, 0.75])
# add landmarks for goal posts and puck
goal_posts = [[-0.25, -1.0],
[-0.25, 1.0],
[0.25, -1.0],
[0.25, 1.0]]
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
if i > 0:
landmark.collide = True
landmark.movable = False
landmark.state.p_pos = np.array(goal_posts[i-1])
landmark.state.p_vel = np.zeros(world.dim_p)
else:
landmark.collide = True
landmark.movable = True
# add landmarks for rink boundary
#world.landmarks += self.set_boundaries(world)
# make initial conditions
self.reset_world(world)
return world
def set_boundaries(self, world):
boundary_list = []
landmark_size = 1
edge = 1 + landmark_size
num_landmarks = int(edge * 2 / landmark_size)
for x_pos in [-edge, edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([x_pos, -1 + i * landmark_size])
boundary_list.append(l)
for y_pos in [-edge, edge]:
for i in range(num_landmarks):
l = Landmark()
l.state.p_pos = np.array([-1 + i * landmark_size, y_pos])
boundary_list.append(l)
for i, l in enumerate(boundary_list):
l.name = 'boundary %d' % i
l.collide = True
l.movable = False
l.boundary = True
l.color = np.array([0.75, 0.75, 0.75])
l.size = landmark_size
l.state.p_vel = np.zeros(world.dim_p)
return boundary_list
def reset_world(self, world):
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
if i > 0:
landmark.color = np.array([0.7, 0.7, 0.7])
else:
landmark.color = np.array([0.1, 0.1, 0.1])
landmark.index = i
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
world.landmarks[0].state.p_pos = np.random.uniform(-1, +1, world.dim_p)
world.landmarks[0].state.p_vel = np.zeros(world.dim_p)
# return all agents of the blue team
def blue_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
# return all agents of the red team
def red_agents(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world):
# Agents are rewarded based on team they belong to
return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
def agent_reward(self, agent, world):
# reward for blue team agent
return 0.0
def adversary_reward(self, agent, world):
# reward for red team agent
return 0.0
def observation(self, agent, world):
# get positions/vel of all entities in this agent's reference frame
entity_pos = []
entity_vel = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
if entity.movable:
entity_vel.append(entity.state.p_vel)
# get positions/vel of all other agents in this agent's reference frame
other_pos = []
other_vel = []
for other in world.agents:
if other is agent: continue
other_pos.append(other.state.p_pos - agent.state.p_pos)
other_vel.append(other.state.p_vel)
return np.concatenate([agent.state.p_vel] + entity_pos + entity_vel + other_pos + other_vel)
|
[
"multiagent.core.Landmark",
"numpy.array",
"numpy.zeros",
"multiagent.core.World",
"numpy.concatenate",
"numpy.random.uniform",
"multiagent.core.Agent"
] |
[((188, 195), 'multiagent.core.World', 'World', ([], {}), '()\n', (193, 195), False, 'from multiagent.core import World, Agent, Landmark\n'), ((3273, 3311), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (3290, 3311), True, 'import numpy as np\n'), ((3353, 3374), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (3361, 3374), True, 'import numpy as np\n'), ((4795, 4884), 'numpy.concatenate', 'np.concatenate', (['([agent.state.p_vel] + entity_pos + entity_vel + other_pos + other_vel)'], {}), '([agent.state.p_vel] + entity_pos + entity_vel + other_pos +\n other_vel)\n', (4809, 4884), True, 'import numpy as np\n'), ((383, 390), 'multiagent.core.Agent', 'Agent', ([], {}), '()\n', (388, 390), False, 'from multiagent.core import World, Agent, Landmark\n'), ((1043, 1053), 'multiagent.core.Landmark', 'Landmark', ([], {}), '()\n', (1051, 1053), False, 'from multiagent.core import World, Agent, Landmark\n'), ((2521, 2549), 'numpy.array', 'np.array', (['[0.75, 0.75, 0.75]'], {}), '([0.75, 0.75, 0.75])\n', (2529, 2549), True, 'import numpy as np\n'), ((2613, 2634), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2621, 2634), True, 'import numpy as np\n'), ((3089, 3127), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(+1)', 'world.dim_p'], {}), '(-1, +1, world.dim_p)\n', (3106, 3127), True, 'import numpy as np\n'), ((3160, 3181), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (3168, 3181), True, 'import numpy as np\n'), ((3210, 3231), 'numpy.zeros', 'np.zeros', (['world.dim_c'], {}), '(world.dim_c)\n', (3218, 3231), True, 'import numpy as np\n'), ((678, 706), 'numpy.array', 'np.array', (['[0.75, 0.25, 0.25]'], {}), '([0.75, 0.25, 0.25])\n', (686, 706), True, 'import numpy as np\n'), ((795, 823), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.75]'], {}), '([0.25, 0.25, 0.75])\n', (803, 823), True, 'import numpy as np\n'), ((1328, 1355), 'numpy.array', 'np.array', (['goal_posts[i - 1]'], {}), '(goal_posts[i - 1])\n', (1336, 1355), True, 'import numpy as np\n'), ((1393, 1414), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (1401, 1414), True, 'import numpy as np\n'), ((1974, 1984), 'multiagent.core.Landmark', 'Landmark', ([], {}), '()\n', (1982, 1984), False, 'from multiagent.core import World, Agent, Landmark\n'), ((2017, 2058), 'numpy.array', 'np.array', (['[x_pos, -1 + i * landmark_size]'], {}), '([x_pos, -1 + i * landmark_size])\n', (2025, 2058), True, 'import numpy as np\n'), ((2199, 2209), 'multiagent.core.Landmark', 'Landmark', ([], {}), '()\n', (2207, 2209), False, 'from multiagent.core import World, Agent, Landmark\n'), ((2242, 2283), 'numpy.array', 'np.array', (['[-1 + i * landmark_size, y_pos]'], {}), '([-1 + i * landmark_size, y_pos])\n', (2250, 2283), True, 'import numpy as np\n'), ((2852, 2877), 'numpy.array', 'np.array', (['[0.7, 0.7, 0.7]'], {}), '([0.7, 0.7, 0.7])\n', (2860, 2877), True, 'import numpy as np\n'), ((2929, 2954), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (2937, 2954), True, 'import numpy as np\n')]
|
# Imports
import numpy as np
# Single to double frame
# Combines images by 2, returning an array with two frames (one for each image).
#
# Input: 5 images with step 1.
# Output: 4 double-framed images.
# FrameA: 1 2 3 4
# FrameB: 2 3 4 5
#
# Input: 8 images with step 3.
# Output: 5 doubled-framed images.
# FrameA: 1 2 3 4 5
# FrameB: 4 5 6 7 8
#
# This function also crops the image according to the provided Region of Interest (ROI), that must be passed as:
# ROI = [X-start X-end Y-start Y-end], for example: [1 100 1 50].
#
# Output:
# Array with the following dimensions: 0 - Image; 1 - Frame; 2 - Height (Y); 3 - Width (X).
def single_to_double_frame(images, step=1, roi=None):
total_images = images.shape[0]
frameA_idx = list(range(0, total_images-step))
frameB_idx = [idx+1 for idx in frameA_idx]
images_double_framed = []
for idx in frameA_idx:
double_frame = [images[frameA_idx[idx]], images[frameB_idx[idx]]]
if roi and len(roi) == 4:
size_y, size_x = double_frame[0].shape
min_x, max_x = max(0, roi[0]-1), min(roi[1], size_x)
min_y, max_y = max(0, roi[2]-1), min(roi[3], size_x)
double_frame[0] = np.array(double_frame[0][min_y:max_y, min_x:max_x])
double_frame[1] = np.array(double_frame[1][min_y:max_y, min_x:max_x])
images_double_framed += [double_frame]
return np.array(images_double_framed)
|
[
"numpy.array"
] |
[((1479, 1509), 'numpy.array', 'np.array', (['images_double_framed'], {}), '(images_double_framed)\n', (1487, 1509), True, 'import numpy as np\n'), ((1273, 1324), 'numpy.array', 'np.array', (['double_frame[0][min_y:max_y, min_x:max_x]'], {}), '(double_frame[0][min_y:max_y, min_x:max_x])\n', (1281, 1324), True, 'import numpy as np\n'), ((1355, 1406), 'numpy.array', 'np.array', (['double_frame[1][min_y:max_y, min_x:max_x]'], {}), '(double_frame[1][min_y:max_y, min_x:max_x])\n', (1363, 1406), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
"""
This script parses and cleans up a provided Flow Cytometry Standard (fcs) file
and saves it as a Comma Separated Value (csv).
"""
import os
import re
import numpy as np
import pandas as pd
import optparse
import fcsparser
# #########################################################################
def main():
# Initialize the option parser
parser = optparse.OptionParser()
#Add options.
parser.add_option('-i', '--input_file', dest='filename', help='name of single\
file to be processed.', metavar="filename")
parser.add_option('-d', '--directory', dest='inputdir', help='name of\
input directory to be processed')
parser.add_option('-p', '--pattern', dest='pattern', help='filename\
pattern to parse files.')
parser.add_option('-o', '--output', dest='out',
help='name of output directory')
parser.add_option('-c', '--channel', action='append', dest='channels',
help=' individual channels to extract. Each channel must have its\
own -c flag.')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\
help='print progress to stdout', default=False)
parser.add_option('-f', '--force', action='store_true', dest='force',
help='force saving of files to output directory if needed.',
default=False)
# get the ops and args
ops, args = parser.parse_args()
# List files
if (ops.inputdir == None) & (ops.filename == None):
raise ValueError('no input directory/file provided! Please indicate\
the input directory that contains the fcs files')
# get all the files in the directory
files = []
if ops.inputdir != None:
usr_files = np.array(os.listdir(ops.inputdir))
# Use the pattern to identify all of the files.
files_idx = np.array([ops.pattern in f for f in usr_files])
file_names = usr_files[files_idx]
#Add the input directory ahead of each file.
for f in file_names:
files.append('%s/%s' %(ops.inputdir, f))
else:
files.append(ops.filename)
# Test that the output directory exists and is empty.
if ops.out != None:
if os.path.isdir(ops.out) == False:
os.mkdir(ops.out)
print("Made new ouptut directory %s. I hope that's okay..." %ops.out)
elif len(os.listdir(ops.out)) != None:
if ops.force == True:
cont = 'y'
else:
cont = raw_input('Output directory is not empty! Continue? [y/n]: ')
# loop through the files
for i,f in enumerate(files):
# consider only the fcs files
if f.endswith('.fcs'):
# read the file
meta, data = fcsparser.parse(f)
# if there are set channels, get all the channels
if ops.channels != None:
data = data.loc[:, ops.channels]
#parse the file name to change the extension
filename = re.sub('.fcs', '.csv', f)
#Determine if they should be saved to an output directory or not.
if ops.out == None:
data.to_csv(filename, index=False)
if ops.verbose == True:
print(f + ' -> ' + filename)
else:
find_split = filename.rsplit('/', 1)
if len(find_split) != 1:
filename = filename.rsplit('/', 1)[1]
# Determine how to save the file.
if len(os.listdir(ops.out)) != None:
if cont.lower() == 'y':
data.to_csv(ops.out + '/' + filename, index=False)
if ops.verbose == True:
print(f + ' -> ' + ops.out + '/' + filename)
else:
raise ValueError('output directory is not empty.')
if __name__ == '__main__':
main()
print('thank you -- come again')
|
[
"os.listdir",
"optparse.OptionParser",
"numpy.array",
"os.path.isdir",
"fcsparser.parse",
"os.mkdir",
"re.sub"
] |
[((386, 409), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (407, 409), False, 'import optparse\n'), ((1906, 1955), 'numpy.array', 'np.array', (['[(ops.pattern in f) for f in usr_files]'], {}), '([(ops.pattern in f) for f in usr_files])\n', (1914, 1955), True, 'import numpy as np\n'), ((1804, 1828), 'os.listdir', 'os.listdir', (['ops.inputdir'], {}), '(ops.inputdir)\n', (1814, 1828), False, 'import os\n'), ((2281, 2303), 'os.path.isdir', 'os.path.isdir', (['ops.out'], {}), '(ops.out)\n', (2294, 2303), False, 'import os\n'), ((2326, 2343), 'os.mkdir', 'os.mkdir', (['ops.out'], {}), '(ops.out)\n', (2334, 2343), False, 'import os\n'), ((2824, 2842), 'fcsparser.parse', 'fcsparser.parse', (['f'], {}), '(f)\n', (2839, 2842), False, 'import fcsparser\n'), ((3076, 3101), 're.sub', 're.sub', (['""".fcs"""', '""".csv"""', 'f'], {}), "('.fcs', '.csv', f)\n", (3082, 3101), False, 'import re\n'), ((2443, 2462), 'os.listdir', 'os.listdir', (['ops.out'], {}), '(ops.out)\n', (2453, 2462), False, 'import os\n'), ((3627, 3646), 'os.listdir', 'os.listdir', (['ops.out'], {}), '(ops.out)\n', (3637, 3646), False, 'import os\n')]
|
from traits.api import HasTraits, Bool, Enum, List, Str
from numpy import array, cos, sin
class ElementalRotationDefinition(HasTraits):
'''
A definition of an elemental rotation and its angle's name
'''
angle_name = Str("undefined angle")
axis = Enum('around_x', 'around_y', 'around_z')
isClockwiseCameraSystemRotation = Bool(False)
class TaitBryanAnglesDefinition(HasTraits):
'''
Tait-Bryan angle rotations are defined by three rotation angles around
the x,y & z-axis.
The resulting rotation will be different according to
1. The order in which the rotations are applied
2. The rotation direction (clockwise vs. counter-clockwise)
'''
angles_in_order_applied = List(ElementalRotationDefinition)
def angles_yaw_pitch_roll():
'''
Returns a definition of the "Yaw, Pitch, Roll" Tait-Bryan angles set widespread in aerospace applications.
'''
definition = TaitBryanAnglesDefinition()
# first roll is applied
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Roll", axis='around_x', isClockwiseCameraSystemRotation=False))
# then pitch
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Pitch", axis='around_y', isClockwiseCameraSystemRotation=False))
# then yaw
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Yaw", axis='around_z', isClockwiseCameraSystemRotation=False))
return definition
def angles_pix4d_omega_phi_kappa():
'''
Returns a definition of the "Omega, Phi, Kappa" Tait-Bryan angles set used by pix4d.
'''
definition = TaitBryanAnglesDefinition()
# first kappa is applied
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Kappa", axis='around_z', isClockwiseCameraSystemRotation=False))
# then phi
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Phi", axis='around_y', isClockwiseCameraSystemRotation=False))
# last omega
definition.angles_in_order_applied.append(
ElementalRotationDefinition(angle_name="Omega", axis='around_x', isClockwiseCameraSystemRotation=False))
return definition
def camera_to_world_rotation_around_x(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates.
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[1., 0., 0.],
[0., cos(cc_angle), -sin(cc_angle)],
[0., sin(cc_angle), cos(cc_angle)]])
def camera_to_world_rotation_around_y(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates.
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[cos(cc_angle), 0., sin(cc_angle)],
[0., 1., 0.],
[-sin(cc_angle), 0., cos(cc_angle)]])
def camera_to_world_rotation_around_z(cc_angle = 0):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
when the camera(system) rotates counter-clockwise.
(Seeing the camera(system) as fixed, the rotation
would transform points clockwise around its x axis)
'''
return array([[cos(cc_angle), -sin(cc_angle), 0.],
[sin(cc_angle), cos(cc_angle), 0.],
[0., 0., 1.]])
def world_angle(angle, world_axis):
'''
Correction on the angle for possibly inverted axes
due to the world system definition (w.r.t. the mayavi world system)
'''
if(world_axis in ['Down', 'West', 'South']):
angle = -angle
return angle
def elemental_rotation(angle_and_definition, worldsystem):
'''
Returns an elemental rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
given an euler angle and its definition.
'''
angle, definition = angle_and_definition
if (definition.isClockwiseCameraSystemRotation):
angle = -angle
if definition.axis == 'around_x':
return camera_to_world_rotation_around_x(world_angle(angle, worldsystem.x_axis))
if definition.axis == 'around_y':
return camera_to_world_rotation_around_y(world_angle(angle, worldsystem.y_axis))
if definition.axis == 'around_z':
return camera_to_world_rotation_around_z(world_angle(angle, worldsystem.z_axis))
def camera_to_world_rotation_matrix(first_angle_and_definition,
second_angle_and_definition,
last_angle_and_definition,
world_system):
'''
Compute a rotation matrix that is used to transform
a point in camera coordinates to a point in world coordinates
given Tait-Bryan angles and their definition.
Note: Matrices application order is opposite to reading order
'''
return elemental_rotation(last_angle_and_definition, world_system).dot(
elemental_rotation(second_angle_and_definition, world_system)).dot(
elemental_rotation(first_angle_and_definition, world_system))
|
[
"traits.api.Enum",
"traits.api.Str",
"numpy.cos",
"numpy.sin",
"traits.api.Bool",
"traits.api.List"
] |
[((234, 256), 'traits.api.Str', 'Str', (['"""undefined angle"""'], {}), "('undefined angle')\n", (237, 256), False, 'from traits.api import HasTraits, Bool, Enum, List, Str\n'), ((268, 308), 'traits.api.Enum', 'Enum', (['"""around_x"""', '"""around_y"""', '"""around_z"""'], {}), "('around_x', 'around_y', 'around_z')\n", (272, 308), False, 'from traits.api import HasTraits, Bool, Enum, List, Str\n'), ((347, 358), 'traits.api.Bool', 'Bool', (['(False)'], {}), '(False)\n', (351, 358), False, 'from traits.api import HasTraits, Bool, Enum, List, Str\n'), ((722, 755), 'traits.api.List', 'List', (['ElementalRotationDefinition'], {}), '(ElementalRotationDefinition)\n', (726, 755), False, 'from traits.api import HasTraits, Bool, Enum, List, Str\n'), ((2700, 2713), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (2703, 2713), False, 'from numpy import array, cos, sin\n'), ((2758, 2771), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (2761, 2771), False, 'from numpy import array, cos, sin\n'), ((2774, 2787), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (2777, 2787), False, 'from numpy import array, cos, sin\n'), ((3170, 3183), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (3173, 3183), False, 'from numpy import array, cos, sin\n'), ((3192, 3205), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (3195, 3205), False, 'from numpy import array, cos, sin\n'), ((3295, 3308), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (3298, 3308), False, 'from numpy import array, cos, sin\n'), ((3690, 3703), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (3693, 3703), False, 'from numpy import array, cos, sin\n'), ((3748, 3761), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (3751, 3761), False, 'from numpy import array, cos, sin\n'), ((3766, 3779), 'numpy.cos', 'cos', (['cc_angle'], {}), '(cc_angle)\n', (3769, 3779), False, 'from numpy import array, cos, sin\n'), ((2717, 2730), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (2720, 2730), False, 'from numpy import array, cos, sin\n'), ((3274, 3287), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (3277, 3287), False, 'from numpy import array, cos, sin\n'), ((3709, 3722), 'numpy.sin', 'sin', (['cc_angle'], {}), '(cc_angle)\n', (3712, 3722), False, 'from numpy import array, cos, sin\n')]
|
import torch
from torch.utils.data import Dataset, DataLoader
from torch.distributions.multivariate_normal import MultivariateNormal
import numpy as np
from tqdm import tqdm
import random
def get_rotation(theta):
rad = np.radians(theta)
c, s = np.cos(rad), np.sin(rad)
R = np.array([[c, -s],
[s, c]])
return R
class CircleDataset(Dataset):
def __init__(self, n_samples, n_centers=9, sigma=0.1, ysigma=0.01, include_zero=True,
target_label=1., seed = None, radius=1.):
super().__init__()
if seed != None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
self.include_zero = include_zero
self.nus = []
if include_zero:
self.nus.append(torch.zeros(2))
self.sigma = sigma
self.ysigma = ysigma
self.radius = radius
for i in range(n_centers-include_zero):
R = get_rotation(i*360/(n_centers-include_zero))
self.nus.append(torch.tensor([radius, 0] @ R, dtype=torch.float))
classes = torch.multinomial(torch.ones(n_centers), n_samples,
replacement=True)
data = []
target = []
for i in range(n_centers):
n_samples_class = torch.sum(classes == i)
if n_samples_class == 0:
continue
dist = MultivariateNormal(self.nus[i],
torch.eye(2)*sigma**2)
data.append(dist.sample([n_samples_class.item()]))
enc = torch.full((n_samples_class, n_centers), -target_label)
enc[:, i] = target_label
target.append(enc + ysigma * torch.randn(n_samples_class)[:, None])
self.data = torch.cat(data).float()
self.target = torch.cat(target).float()
def __getitem__(self, idx):
return self.data[idx], self.target[idx]
def __len__(self):
return self.data.shape[0]
def gaussian_sampler_2d(gaussian_center, cov_matrix):
mu_distr = MultivariateNormal(gaussian_center, cov_matrix)
return mu_distr
def gaussian_data_sampling(gaussian_center, cov_matrix, data_num, seed = None):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
sampler = gaussian_sampler_2d(gaussian_center, cov_matrix)
data = sampler.sample(sample_shape=torch.Size([data_num]))
return data
def gaussian_mixture_data_sampling(centers, cov_matrix, data_num, seed = None, device = None):
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
index_to_choice = np.random.randint(centers.shape[0], size = data_num)
data_clusters = gaussian_data_sampling(centers[index_to_choice[0]], cov_matrix, 1)
for i in range(1, data_num):
cur_data = gaussian_data_sampling(centers[index_to_choice[i]], cov_matrix, 1)
data_clusters = torch.cat((data_clusters, cur_data), 0)
return data_clusters
def model_1d(data):
real_labels = torch.sin(12*data) + 0.66*torch.cos(25*data) + 3
return real_labels
def noise_labels_model(real_labels, sigma_noise, seed = None):
loc = 0. # mean zero
scale = 1.
normal = torch.distributions.Normal(loc, scale) # create a normal distribution object
if seed is not None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
x = normal.rsample([real_labels.shape[0]])
real_labels = real_labels + x*sigma_noise
return real_labels
def get_sample_regression(n_samples, noise = 0.1, seed = 42):
"""
Returns (x_train, y_train), (x_true, y_true)
"""
gaussian_centers = torch.Tensor([[-1.0/(2**0.5)], [1.0/(2**0.5)]])
data_num = n_samples
data_sigma_noise = noise
sigma = 0.01
init_cov_matrix = torch.eye(1)
cov_matrix_default = sigma*init_cov_matrix
data_1d = gaussian_mixture_data_sampling(gaussian_centers,
cov_matrix_default,
data_num,
seed)
real_labels = model_1d(data_1d[:, 0])
noise_labels = noise_labels_model(real_labels,
sigma_noise = data_sigma_noise,
seed = seed).reshape((real_labels.shape[0], 1))
range_for_real_labels = torch.linspace(-1, 1, steps = 1000)
real_labels_range = model_1d(range_for_real_labels)
# data, range_for_real_labels, real_labels, noise_labels,
return (data_1d[:, 0], noise_labels[:, 0]), (range_for_real_labels, real_labels_range)
|
[
"numpy.radians",
"torch.sin",
"numpy.array",
"torch.cos",
"torch.sum",
"numpy.sin",
"torch.eye",
"numpy.random.seed",
"torch.randn",
"torch.distributions.Normal",
"torch.Tensor",
"numpy.cos",
"torch.Size",
"torch.cat",
"torch.manual_seed",
"torch.full",
"random.seed",
"torch.tensor",
"numpy.random.randint",
"torch.distributions.multivariate_normal.MultivariateNormal",
"torch.zeros",
"torch.linspace",
"torch.ones"
] |
[((224, 241), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (234, 241), True, 'import numpy as np\n'), ((286, 313), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (294, 313), True, 'import numpy as np\n'), ((2140, 2187), 'torch.distributions.multivariate_normal.MultivariateNormal', 'MultivariateNormal', (['gaussian_center', 'cov_matrix'], {}), '(gaussian_center, cov_matrix)\n', (2158, 2187), False, 'from torch.distributions.multivariate_normal import MultivariateNormal\n'), ((2878, 2928), 'numpy.random.randint', 'np.random.randint', (['centers.shape[0]'], {'size': 'data_num'}), '(centers.shape[0], size=data_num)\n', (2895, 2928), True, 'import numpy as np\n'), ((3457, 3495), 'torch.distributions.Normal', 'torch.distributions.Normal', (['loc', 'scale'], {}), '(loc, scale)\n', (3483, 3495), False, 'import torch\n'), ((3965, 4016), 'torch.Tensor', 'torch.Tensor', (['[[-1.0 / 2 ** 0.5], [1.0 / 2 ** 0.5]]'], {}), '([[-1.0 / 2 ** 0.5], [1.0 / 2 ** 0.5]])\n', (3977, 4016), False, 'import torch\n'), ((4106, 4118), 'torch.eye', 'torch.eye', (['(1)'], {}), '(1)\n', (4115, 4118), False, 'import torch\n'), ((4677, 4710), 'torch.linspace', 'torch.linspace', (['(-1)', '(1)'], {'steps': '(1000)'}), '(-1, 1, steps=1000)\n', (4691, 4710), False, 'import torch\n'), ((253, 264), 'numpy.cos', 'np.cos', (['rad'], {}), '(rad)\n', (259, 264), True, 'import numpy as np\n'), ((266, 277), 'numpy.sin', 'np.sin', (['rad'], {}), '(rad)\n', (272, 277), True, 'import numpy as np\n'), ((2322, 2339), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2333, 2339), False, 'import random\n'), ((2348, 2368), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2362, 2368), True, 'import numpy as np\n'), ((2377, 2400), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2394, 2400), False, 'import torch\n'), ((2727, 2744), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2738, 2744), False, 'import random\n'), ((2753, 2773), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2767, 2773), True, 'import numpy as np\n'), ((2782, 2805), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2799, 2805), False, 'import torch\n'), ((3161, 3200), 'torch.cat', 'torch.cat', (['(data_clusters, cur_data)', '(0)'], {}), '((data_clusters, cur_data), 0)\n', (3170, 3200), False, 'import torch\n'), ((3567, 3584), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3578, 3584), False, 'import random\n'), ((3593, 3613), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3607, 3613), True, 'import numpy as np\n'), ((3622, 3645), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3639, 3645), False, 'import torch\n'), ((592, 609), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (603, 609), False, 'import random\n'), ((622, 642), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (636, 642), True, 'import numpy as np\n'), ((655, 678), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (672, 678), False, 'import torch\n'), ((1173, 1194), 'torch.ones', 'torch.ones', (['n_centers'], {}), '(n_centers)\n', (1183, 1194), False, 'import torch\n'), ((1374, 1397), 'torch.sum', 'torch.sum', (['(classes == i)'], {}), '(classes == i)\n', (1383, 1397), False, 'import torch\n'), ((1654, 1709), 'torch.full', 'torch.full', (['(n_samples_class, n_centers)', '(-target_label)'], {}), '((n_samples_class, n_centers), -target_label)\n', (1664, 1709), False, 'import torch\n'), ((2553, 2575), 'torch.Size', 'torch.Size', (['[data_num]'], {}), '([data_num])\n', (2563, 2575), False, 'import torch\n'), ((3266, 3286), 'torch.sin', 'torch.sin', (['(12 * data)'], {}), '(12 * data)\n', (3275, 3286), False, 'import torch\n'), ((849, 863), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (860, 863), False, 'import torch\n'), ((1087, 1135), 'torch.tensor', 'torch.tensor', (['([radius, 0] @ R)'], {'dtype': 'torch.float'}), '([radius, 0] @ R, dtype=torch.float)\n', (1099, 1135), False, 'import torch\n'), ((1847, 1862), 'torch.cat', 'torch.cat', (['data'], {}), '(data)\n', (1856, 1862), False, 'import torch\n'), ((1893, 1910), 'torch.cat', 'torch.cat', (['target'], {}), '(target)\n', (1902, 1910), False, 'import torch\n'), ((3292, 3312), 'torch.cos', 'torch.cos', (['(25 * data)'], {}), '(25 * data)\n', (3301, 3312), False, 'import torch\n'), ((1550, 1562), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (1559, 1562), False, 'import torch\n'), ((1788, 1816), 'torch.randn', 'torch.randn', (['n_samples_class'], {}), '(n_samples_class)\n', (1799, 1816), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 23 11:28:30 2020
@author: rener
"""
import numpy as np
import pandas as pd
import os
from datetime import date
import time
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
#%% For the various companies we have data going back differently far.
#
#
frames=[]
for file in os.listdir('Stocks'):
frames.append(
pd.read_csv('Stocks/' +file,index_col=0))
# For the various companies we have data going back differently far.
# So there is decision to make: We could discard look for the shortest
# available timeseries, and trim all other datasets to the same length.
# But then whenever we compute a covariance for two longer datasets
# we will not use all available information.
# So we only trim every pair in the covariance computing function.
df=pd.concat(frames)
# Add column with Estimated Average of the day
df['EstAvg'] = df[['open','high','low','close']].apply(np.mean,axis=1)
df.to_csv('fulltable.csv')
#%%
pivot = df.pivot(columns = 'symbol', values = 'EstAvg')
# Note that we are taking the symbols from the Pivot Table.
# This is the case, because when the Alphavantage API does not give
# us a dataset for some symbol, it does not appear in the pivot table,
# so we avoid a Key Error.
symbols = pivot.columns
# Next we initialize an 'empty' dataframe, and start filling it.
CovMatrix = pd.DataFrame(index=symbols,columns=symbols)
#%%
def covariance(a,b):
return np.mean((a-np.mean(a)*(b-np.mean(b))))
for col in CovMatrix:
for row in CovMatrix.index:
CovMatrix[row][col]=covariance(pivot[row], pivot[col])
|
[
"numpy.mean",
"os.listdir",
"pandas.read_csv",
"os.chdir",
"os.path.realpath",
"pandas.DataFrame",
"pandas.concat"
] |
[((238, 256), 'os.chdir', 'os.chdir', (['dir_path'], {}), '(dir_path)\n', (246, 256), False, 'import os\n'), ((362, 382), 'os.listdir', 'os.listdir', (['"""Stocks"""'], {}), "('Stocks')\n", (372, 382), False, 'import os\n'), ((856, 873), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (865, 873), True, 'import pandas as pd\n'), ((1411, 1455), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'symbols', 'columns': 'symbols'}), '(index=symbols, columns=symbols)\n', (1423, 1455), True, 'import pandas as pd\n'), ((210, 236), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (226, 236), False, 'import os\n'), ((411, 453), 'pandas.read_csv', 'pd.read_csv', (["('Stocks/' + file)"], {'index_col': '(0)'}), "('Stocks/' + file, index_col=0)\n", (422, 453), True, 'import pandas as pd\n'), ((1503, 1513), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (1510, 1513), True, 'import numpy as np\n'), ((1517, 1527), 'numpy.mean', 'np.mean', (['b'], {}), '(b)\n', (1524, 1527), True, 'import numpy as np\n')]
|
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import loopy as lp
from pystella.field import Field, index_fields
from pystella.elementwise import ElementWiseMap
from pymbolic import var
from pymbolic.primitives import Subscript, Variable
__doc__ = """
.. currentmodule:: pystella.step
.. autoclass:: Stepper
.. currentmodule:: pystella
Low-storage Runge-Kutta methods
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. currentmodule:: pystella.step
.. autoclass:: LowStorageRKStepper
.. currentmodule:: pystella
.. autoclass:: LowStorageRK54
.. autoclass:: LowStorageRK3Williamson
.. autoclass:: LowStorageRK3Inhomogeneous
.. autoclass:: LowStorageRK3SSP
Classical Runge-Kutta methods
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"Classical" Runge-Kutta methods are also implemented, though are not recommended
over the low-storage methods above.
.. currentmodule:: pystella.step
.. autoclass:: RungeKuttaStepper
.. currentmodule:: pystella
.. autoclass:: RungeKutta4
.. autoclass:: RungeKutta3SSP
.. autoclass:: RungeKutta3Heun
.. autoclass:: RungeKutta3Nystrom
.. autoclass:: RungeKutta3Ralston
.. autoclass:: RungeKutta2Midpoint
.. autoclass:: RungeKutta2Ralston
"""
class Stepper:
"""
The base class for time steppers, with no implementation of a particular time
stepper.
:arg input: May be one of the following:
* a :class:`dict` whose values represent the right-hand side
of the ODEs to solve, i.e., `(key, value)` pairs corresponding to
:math:`(y, f)` such that
.. math::
\\frac{\\mathrm{d} y}{\\mathrm{d} t} = f,
where :math:`f` is an arbitrary function of kernel data.
Both keys and values must be :mod:`pymbolic` expressions.
* a :class:`~pystella.Sector`. In this case, the right-hand side
dictionary will be obtained from :attr:`~pystella.Sector.rhs_dict`.
* a :class:`list` of :class:`~pystella.Sector`\\ s. In this case,
the input obtained from each :class:`~pystella.Sector`
(as described above) will be combined.
The following keyword arguments are recognized:
:arg MapKernel: The kernel class which each substep/stage will be an
instance of---i.e., one of :class:`~pystella.ElementWiseMap` or its
subclasses. Defaults to :class:`~pystella.ElementWiseMap`.
:arg dt: A :class:`float` fixing the value of the timestep interval.
Defaults to *None*, in which case it is not fixed at kernel creation.
The remaining arguments are passed to :meth:`MapKernel` for
each substep of the timestepper (i.e., see the documentation of
:class:`~pystella.ElementWiseMap`).
.. automethod:: __call__
.. attribute:: num_stages
The number of substeps/stages per timestep.
.. attribute:: expected_order
The expected convergence order of *global* error, i.e.
:math:`n` such that the global error is :math:`\\mathcal{O}(\\Delta t^n)`.
.. attribute:: num_unknowns
The number of unknown degrees of freedom which are evolved.
"""
num_stages = None
expected_order = None
num_copies = None
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
raise NotImplementedError
def __init__(self, input, MapKernel=ElementWiseMap, **kwargs):
single_stage = kwargs.pop("single_stage", True)
from pystella import Sector
if isinstance(input, Sector):
self.rhs_dict = input.rhs_dict
elif isinstance(input, list):
self.rhs_dict = dict(i for s in input for i in s.rhs_dict.items())
elif isinstance(input, dict):
self.rhs_dict = input
if not single_stage:
prepend_with = (self.num_copies,)
else:
prepend_with = None
args = kwargs.pop("args", [...])
args = args + [lp.ValueArg("dt")]
from pystella import get_field_args
inferred_args = get_field_args(self.rhs_dict, prepend_with=prepend_with)
from pystella.elementwise import append_new_args
self.args = append_new_args(args, inferred_args)
dt = kwargs.pop("dt", None)
fixed_parameters = kwargs.pop("fixed_parameters", dict())
if dt is not None:
fixed_parameters.update(dict(dt=dt))
self.num_unknowns = len(self.rhs_dict.keys())
self.steps = self.make_steps(**kwargs, fixed_parameters=fixed_parameters)
def __call__(self, stage, queue=None, **kwargs):
"""
Calls substep/stage ``stage`` (:attr:`steps[stage]`) of the timestepper,
i.e., :func:`pystella.ElementWiseMap.__call__` for the kernel for
substep/stage ``stage``.
:arg stage: The substep/stage of time timestepper to call.
:returns: The :class:`pyopencl.Event` associated with the kernel invocation.
"""
evt, _ = self.steps[stage](queue, **kwargs)
return evt
class RungeKuttaStepper(Stepper):
"""
The base implementation of classical, explicit Runge-Kutta time steppers,
which operate by storing and operating on multiple copies of each unknown
array. Subclasses must provide an implementation of :meth:`step_statements`
which returns a key-value pair implementing a specific substep of the
particular timestepper.
.. warning::
To minimize the required storage per unknown (i.e., number of
temporaries), the implementation of most subclasses overwrite arrays that
are being read as input to compute right-hand sides. This means that any
non-local (stencil-type) operations must be precomputed and cached
*globally* (unless otherwise noted).
:raises ValueError: if the keys of :attr:`rhs_dict` are not
:class:`~pystella.Field`\\ s (or :class:`pymbolic.primitives.Subscript`\\ s
thereof). This is required for :meth:`make_steps` to be able to prepend
unknown arrays' subscripts with the index corresponding to the temporary
storage axis.
"""
def __init__(self, input, **kwargs):
super().__init__(input, single_stage=False, **kwargs)
def step_statements(self, stage, f, dt, rhs):
raise NotImplementedError
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
rhs = var("rhs")
dt = var("dt")
q = var("q")
fixed_parameters = kwargs.pop("fixed_parameters", dict())
rhs_statements = {rhs[i]: index_fields(value, prepend_with=(q,))
for i, value in enumerate(self.rhs_dict.values())}
steps = []
for stage in range(self.num_stages):
RK_dict = {}
for i, f in enumerate(self.rhs_dict.keys()):
# ensure that key is either a Field or a Subscript of a Field
# so that index_fields can prepend the q index
key_has_field = False
if isinstance(f, Field):
key_has_field = True
elif isinstance(f, Subscript):
if isinstance(f.aggregate, Field):
key_has_field = True
if not key_has_field:
raise ValueError("rhs_dict keys must be Field instances")
statements = self.step_statements(stage, f, dt, rhs[i])
for k, v in statements.items():
RK_dict[k] = v
fixed_parameters.update(q=0 if stage == 0 else 1)
options = lp.Options(enforce_variable_access_ordered="no_check")
step = MapKernel(RK_dict, tmp_instructions=rhs_statements,
args=self.args, **kwargs, options=options,
fixed_parameters=fixed_parameters)
steps.append(step)
return steps
class RungeKutta4(RungeKuttaStepper):
"""
The classical, four-stage, fourth-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 4
expected_order = 4
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[0] + dt/6 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[2] + dt/3 * rhs}
elif stage == 2:
return {fq[1]: fq[0] + dt * rhs,
fq[2]: fq[2] + dt/3 * rhs}
elif stage == 3:
return {fq[0]: fq[2] + dt/6 * rhs}
class RungeKutta3Heun(RungeKuttaStepper):
"""
Heun's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/3 * rhs,
fq[2]: fq[0] + dt/4 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*2/3 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*3/4 * rhs}
class RungeKutta3Nystrom(RungeKuttaStepper):
"""
Nystrom's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[2]: fq[0] + dt*2/8 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[2]: fq[2] + dt*3/8 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*3/8 * rhs}
class RungeKutta3Ralston(RungeKuttaStepper):
"""
Ralston's three-stage, third-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length three.
"""
num_stages = 3
expected_order = 3
num_copies = 3
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs,
fq[2]: fq[0] + dt*2/9 * rhs}
elif stage == 1:
return {fq[1]: fq[0] + dt*3/4 * rhs,
fq[2]: fq[2] + dt*1/3 * rhs}
elif stage == 2:
return {fq[0]: fq[2] + dt*4/9 * rhs}
class RungeKutta3SSP(RungeKuttaStepper):
"""
A three-stage, third-order strong-stability preserving Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
"""
num_stages = 3
expected_order = 3
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(3)]
if stage == 0:
return {fq[1]: fq[0] + dt * rhs}
elif stage == 1:
return {fq[1]: 3/4 * fq[0] + 1/4 * fq[1] + dt/4 * rhs}
elif stage == 2:
return {fq[0]: 1/3 * fq[0] + 2/3 * fq[1] + dt*2/3 * rhs}
class RungeKutta2Midpoint(RungeKuttaStepper):
"""
The "midpoint" method, a two-stage, second-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
Note that right-hand side operations *can* safely involve non-local computations
of unknown arrays for this method.
"""
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt/2 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt * rhs}
# possible order reduction
class RungeKutta2Heun(RungeKuttaStepper):
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt * rhs,
fq[0]: fq[0] + dt/2 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt/2 * rhs}
class RungeKutta2Ralston(RungeKuttaStepper):
"""
Ralstons's two-stage, second-order Runge-Kutta method.
Requires unknown arrays to have temporary storage axes of length two.
"""
num_stages = 2
expected_order = 2
num_copies = 2
def step_statements(self, stage, f, dt, rhs):
fq = [index_fields(f, prepend_with=(q,)) for q in range(2)]
if stage == 0:
return {fq[1]: fq[0] + dt*2/3 * rhs,
fq[0]: fq[0] + dt/4 * rhs}
elif stage == 1:
return {fq[0]: fq[0] + dt*3/4 * rhs}
def get_name(expr):
if isinstance(expr, Field):
return get_name(expr.child)
elif isinstance(expr, Subscript):
return get_name(expr.aggregate)
elif isinstance(expr, Variable):
return expr.name
elif isinstance(expr, str):
return expr
def gen_tmp_name(expr, prefix="_", suffix="_tmp"):
name = get_name(expr)
return prefix + name + suffix
def copy_and_rename(expr):
if isinstance(expr, Field):
return expr.copy(child=copy_and_rename(expr.child))
elif isinstance(expr, Subscript):
return Subscript(copy_and_rename(expr.aggregate), expr.index)
elif isinstance(expr, Variable):
return Variable(gen_tmp_name(expr))
elif isinstance(expr, str):
return gen_tmp_name(expr)
class LowStorageRKStepper(Stepper):
"""
The base implementation of low-storage, explicit Runge-Kutta time steppers,
which operate by storing and operating on a single copy of each unknown array,
plus an auxillary temporary array.
The substeps are expressed in a standard form, drawing coefficients from
a subclass's provided values of :attr:`_A`, :attr:`_B`, and :attr:`_C`.
Allocation of the auxillary arrays is handled internally by:
.. automethod:: get_tmp_arrays_like
:meth:`get_tmp_arrays_like` is called the first time
:meth:`__call__` is called, with the result stored in the attribute
:attr:`tmp_arrays`.
These arrays must not be modified between substages of a single timestep,
but may be safely modified in between timesteps.
.. versionchanged:: 2020.2
Auxillary arrays handled internally by :meth:`get_tmp_arrays_like`.
Previously, manual allocation (and passing) of a single temporary
array ``k_tmp`` was required.
"""
_A = []
_B = []
_C = []
tmp_arrays = {}
def make_steps(self, MapKernel=ElementWiseMap, **kwargs):
tmp_arrays = [copy_and_rename(key) for key in self.rhs_dict.keys()]
self.dof_names = {get_name(key) for key in self.rhs_dict.keys()}
rhs_statements = {var(gen_tmp_name(key, suffix=f"_rhs_{i}")): val
for i, (key, val) in enumerate(self.rhs_dict.items())}
steps = []
for stage in range(self.num_stages):
RK_dict = {}
for i, (f, k) in enumerate(zip(self.rhs_dict.keys(), tmp_arrays)):
rhs = var(gen_tmp_name(f, suffix=f"_rhs_{i}"))
RK_dict[k] = self._A[stage] * k + var("dt") * rhs
RK_dict[f] = f + self._B[stage] * k
step = MapKernel(RK_dict, tmp_instructions=rhs_statements,
args=self.args, **kwargs)
steps.append(step)
return steps
def get_tmp_arrays_like(self, **kwargs):
"""
Allocates required temporary arrays matching those passed via keyword.
:returns: A :class:`dict` of named arrays, suitable for passing via
dictionary expansion.
.. versionadded:: 2020.2
"""
tmp_arrays = {}
for name in self.dof_names:
f = kwargs[name]
tmp_name = gen_tmp_name(name)
import pyopencl.array as cla
if isinstance(f, cla.Array):
tmp_arrays[tmp_name] = cla.empty_like(f)
elif isinstance(f, np.ndarray):
tmp_arrays[tmp_name] = np.empty_like(f)
else:
raise ValueError(f"Could not generate tmp array for {f}"
f"of type {type(f)}")
tmp_arrays[tmp_name][...] = 0.
return tmp_arrays
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for step in self.steps:
step.knl = lp.add_inames_for_unused_hw_axes(step.knl)
def __call__(self, stage, *, queue=None, **kwargs):
if len(self.tmp_arrays) == 0:
self.tmp_arrays = self.get_tmp_arrays_like(**kwargs)
return super().__call__(stage, queue=queue, **kwargs, **self.tmp_arrays)
class LowStorageRK54(LowStorageRKStepper):
"""
A five-stage, fourth-order, low-storage Runge-Kutta method.
See
<NAME>., and <NAME>., Fourth-order-2N-storage
Runge-Kutta schemes, NASA Langley Tech Report TM 109112, 1994
"""
num_stages = 5
expected_order = 4
_A = [
0,
-567301805773 / 1357537059087,
-2404267990393 / 2016746695238,
-3550918686646 / 2091501179385,
-1275806237668 / 842570457699,
]
_B = [
1432997174477 / 9575080441755,
5161836677717 / 13612068292357,
1720146321549 / 2090206949498,
3134564353537 / 4481467310338,
2277821191437 / 14882151754819,
]
_C = [
0,
1432997174477 / 9575080441755,
2526269341429 / 6820363962896,
2006345519317 / 3224310063776,
2802321613138 / 2924317926251,
]
class LowStorageRK144(LowStorageRKStepper):
"""
A 14-stage, fourth-order low-storage Runge-Kutta method optimized for elliptic
stability regions.
See
Niegemann, Jens & Diehl, Richard & <NAME>. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 14
expected_order = 4
_A = [
0,
-0.7188012108672410,
-0.7785331173421570,
-0.0053282796654044,
-0.8552979934029281,
-3.9564138245774565,
-1.5780575380587385,
-2.0837094552574054,
-0.7483334182761610,
-0.7032861106563359,
0.0013917096117681,
-0.0932075369637460,
-0.9514200470875948,
-7.1151571693922548
]
_B = [
0.0367762454319673,
0.3136296607553959,
0.1531848691869027,
0.0030097086818182,
0.3326293790646110,
0.2440251405350864,
0.3718879239592277,
0.6204126221582444,
0.1524043173028741,
0.0760894927419266,
0.0077604214040978,
0.0024647284755382,
0.0780348340049386,
5.5059777270269628
]
_C = [
0,
0.0367762454319673,
0.1249685262725025,
0.2446177702277698,
0.2476149531070420,
0.2969311120382472,
0.3978149645802642,
0.5270854589440328,
0.6981269994175695,
0.8190890835352128,
0.8527059887098624,
0.8604711817462826,
0.8627060376969976,
0.8734213127600976
]
class LowStorageRK134(LowStorageRKStepper):
"""
A 13-stage, fourth-order low-storage Runge-Kutta method optimized for circular
stability regions.
See
Niegemann, Jens & Diehl, Richard & Busch, Kurt. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 13
expected_order = 4
_A = [
0,
0.6160178650170565,
0.4449487060774118,
1.0952033345276178,
1.2256030785959187,
0.2740182222332805,
0.0411952089052647,
0.179708489915356,
1.1771530652064288,
0.4078831463120878,
0.8295636426191777,
4.789597058425229,
0.6606671432964504
]
_B = [
0.0271990297818803,
0.1772488819905108,
0.0378528418949694,
0.6086431830142991,
0.21543139743161,
0.2066152563885843,
0.0415864076069797,
0.0219891884310925,
0.9893081222650993,
0.0063199019859826,
0.3749640721105318,
1.6080235151003195,
0.0961209123818189
]
_C = [
0,
0.0271990297818803,
0.0952594339119365,
0.1266450286591127,
0.1825883045699772,
0.3737511439063931,
0.5301279418422206,
0.5704177433952291,
0.5885784947099155,
0.6160769826246714,
0.6223252334314046,
0.6897593128753419,
0.9126827615920843
]
class LowStorageRK124(LowStorageRKStepper):
"""
A 12-stage, fourth-order low-storage Runge-Kutta method optimized for inviscid
problems.
See
Niegemann, Jens & <NAME> & <NAME>. (2012). Efficient low-storage
Runge-Kutta schemes with optimized stability regions. J. Comput. Physics. 231.
364-372. 10.1016/j.jcp.2011.09.003.
"""
num_stages = 12
expected_order = 4
_A = [
0,
0.0923311242368072,
0.9441056581158819,
4.327127324757639,
2.155777132902607,
0.9770727190189062,
0.7581835342571139,
1.79775254708255,
2.691566797270077,
4.646679896026814,
0.1539613783825189,
0.5943293901830616
]
_B = [
0.0650008435125904,
0.0161459902249842,
0.5758627178358159,
0.1649758848361671,
0.3934619494248182,
0.0443509641602719,
0.2074504268408778,
0.6914247433015102,
0.3766646883450449,
0.0757190350155483,
0.2027862031054088,
0.2167029365631842
]
_C = [
0,
0.0650008435125904,
0.0796560563081853,
0.1620416710085376,
0.2248877362907778,
0.2952293985641261,
0.3318332506149405,
0.4094724050198658,
0.6356954475753369,
0.6806551557645497,
0.714377371241835,
0.9032588871651854,
]
class LowStorageRK3Williamson(LowStorageRKStepper):
"""
A three-stage, third-order, low-storage Runge-Kutta method.
See
<NAME>., Low-storage Runge-Kutta schemes,
J. Comput. Phys., 35, 48-56, 1980
"""
num_stages = 3
expected_order = 3
_A = [0, -5/9, -153/128]
_B = [1/3, 15/16, 8/15]
_C = [0, 4/9, 15/32]
class LowStorageRK3Inhomogeneous(LowStorageRKStepper):
"""
A three-stage, third-order, low-storage Runge-Kutta method.
"""
num_stages = 3
expected_order = 3
_A = [0, -17/32, -32/27]
_B = [1/4, 8/9, 3/4]
_C = [0, 15/32, 4/9]
# possible order reduction
class LowStorageRK3Symmetric(LowStorageRKStepper):
num_stages = 3
expected_order = 3
_A = [0, -2/3, -1]
_B = [1/3, 1, 1/2]
_C = [0, 1/3, 2/3]
# possible order reduction
class LowStorageRK3PredictorCorrector(LowStorageRKStepper):
num_stages = 3
expected_order = 3
_A = [0, -1/4, -4/3]
_B = [1/2, 2/3, 1/2]
_C = [0, 1/2, 1]
c2 = .924574
z1 = np.sqrt(36 * c2**4 + 36 * c2**3 - 135 * c2**2 + 84 * c2 - 12)
z2 = 2 * c2**2 + c2 - 2
z3 = 12 * c2**4 - 18 * c2**3 + 18 * c2**2 - 11 * c2 + 2
z4 = 36 * c2**4 - 36 * c2**3 + 13 * c2**2 - 8 * c2 + 4
z5 = 69 * c2**3 - 62 * c2**2 + 28 * c2 - 8
z6 = 34 * c2**4 - 46 * c2**3 + 34 * c2**2 - 13 * c2 + 2
B1 = c2
B2 = ((12 * c2 * (c2 - 1) * (3 * z2 - z1) - (3 * z2 - z1)**2)
/ (144 * c2 * (3 * c2 - 2) * (c2 - 1)**2))
B3 = (- 24 * (3 * c2 - 2) * (c2 - 1)**2
/ ((3 * z2 - z1)**2 - 12 * c2 * (c2 - 1) * (3 * z2 - z1)))
A2 = ((- z1 * (6 * c2**2 - 4 * c2 + 1) + 3 * z3)
/ ((2 * c2 + 1) * z1 - 3 * (c2 + 2) * (2 * c2 - 1)**2))
A3 = ((- z4 * z1 + 108 * (2 * c2 - 1) * c2**5 - 3 * (2 * c2 - 1) * z5)
/ (24 * z1 * c2 * (c2 - 1)**4 + 72 * c2 * z6 + 72 * c2**6 * (2 * c2 - 13)))
class LowStorageRK3SSP(LowStorageRKStepper):
"""
A three-stage, third-order, strong-stability preserving, low-storage
Runge-Kutta method.
"""
num_stages = 3
expected_order = 3
_A = [0, A2, A3]
_B = [B1, B2, B3]
_C = [0, B1, B1 + B2 * (A2 + 1)]
all_steppers = [RungeKutta4, RungeKutta3SSP, RungeKutta3Heun, RungeKutta3Nystrom,
RungeKutta3Ralston, RungeKutta2Midpoint,
RungeKutta2Ralston, LowStorageRK54, LowStorageRK144,
LowStorageRK3Williamson, LowStorageRK3Inhomogeneous,
LowStorageRK3SSP]
|
[
"pyopencl.array.empty_like",
"loopy.Options",
"loopy.ValueArg",
"numpy.sqrt",
"pymbolic.var",
"pystella.elementwise.append_new_args",
"numpy.empty_like",
"loopy.add_inames_for_unused_hw_axes",
"pystella.field.index_fields",
"pystella.get_field_args"
] |
[((25354, 25421), 'numpy.sqrt', 'np.sqrt', (['(36 * c2 ** 4 + 36 * c2 ** 3 - 135 * c2 ** 2 + 84 * c2 - 12)'], {}), '(36 * c2 ** 4 + 36 * c2 ** 3 - 135 * c2 ** 2 + 84 * c2 - 12)\n', (25361, 25421), True, 'import numpy as np\n'), ((5142, 5198), 'pystella.get_field_args', 'get_field_args', (['self.rhs_dict'], {'prepend_with': 'prepend_with'}), '(self.rhs_dict, prepend_with=prepend_with)\n', (5156, 5198), False, 'from pystella import get_field_args\n'), ((5278, 5314), 'pystella.elementwise.append_new_args', 'append_new_args', (['args', 'inferred_args'], {}), '(args, inferred_args)\n', (5293, 5314), False, 'from pystella.elementwise import append_new_args\n'), ((7538, 7548), 'pymbolic.var', 'var', (['"""rhs"""'], {}), "('rhs')\n", (7541, 7548), False, 'from pymbolic import var\n'), ((7563, 7572), 'pymbolic.var', 'var', (['"""dt"""'], {}), "('dt')\n", (7566, 7572), False, 'from pymbolic import var\n'), ((7586, 7594), 'pymbolic.var', 'var', (['"""q"""'], {}), "('q')\n", (7589, 7594), False, 'from pymbolic import var\n'), ((7699, 7737), 'pystella.field.index_fields', 'index_fields', (['value'], {'prepend_with': '(q,)'}), '(value, prepend_with=(q,))\n', (7711, 7737), False, 'from pystella.field import Field, index_fields\n'), ((8754, 8808), 'loopy.Options', 'lp.Options', ([], {'enforce_variable_access_ordered': '"""no_check"""'}), "(enforce_variable_access_ordered='no_check')\n", (8764, 8808), True, 'import loopy as lp\n'), ((9412, 9446), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (9424, 9446), False, 'from pystella.field import Field, index_fields\n'), ((10237, 10271), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (10249, 10271), False, 'from pystella.field import Field, index_fields\n'), ((10904, 10938), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (10916, 10938), False, 'from pystella.field import Field, index_fields\n'), ((11625, 11659), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (11637, 11659), False, 'from pystella.field import Field, index_fields\n'), ((12358, 12392), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (12370, 12392), False, 'from pystella.field import Field, index_fields\n'), ((13152, 13186), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (13164, 13186), False, 'from pystella.field import Field, index_fields\n'), ((13559, 13593), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (13571, 13593), False, 'from pystella.field import Field, index_fields\n'), ((14144, 14178), 'pystella.field.index_fields', 'index_fields', (['f'], {'prepend_with': '(q,)'}), '(f, prepend_with=(q,))\n', (14156, 14178), False, 'from pystella.field import Field, index_fields\n'), ((18279, 18321), 'loopy.add_inames_for_unused_hw_axes', 'lp.add_inames_for_unused_hw_axes', (['step.knl'], {}), '(step.knl)\n', (18311, 18321), True, 'import loopy as lp\n'), ((5053, 5070), 'loopy.ValueArg', 'lp.ValueArg', (['"""dt"""'], {}), "('dt')\n", (5064, 5070), True, 'import loopy as lp\n'), ((17793, 17810), 'pyopencl.array.empty_like', 'cla.empty_like', (['f'], {}), '(f)\n', (17807, 17810), True, 'import pyopencl.array as cla\n'), ((17896, 17912), 'numpy.empty_like', 'np.empty_like', (['f'], {}), '(f)\n', (17909, 17912), True, 'import numpy as np\n'), ((16973, 16982), 'pymbolic.var', 'var', (['"""dt"""'], {}), "('dt')\n", (16976, 16982), False, 'from pymbolic import var\n')]
|
# -*- coding: utf-8 -*-
#
# <NAME> 2021 gpSTS
###########################################
###Configuration File######################
###for gpSTS steering of experiments######
###########################################
import os
import numpy as np
from gpsts.NanonisInterface.nanonis_interface import Nanonis
from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo
from gpsts.NanonisInterface.kernel import kernel_l2
import json
###############################
###Initialize##################
###############################
nanonis_config = {
"Nanonis_Settings": {
"File": "gpSTSinit",
"ExperimentName": "Test Out",
"Version": "0.0.1",
"ImageStart": "test_img001.sxm",
"FolderLocation": "C:\\gpSTS\\src\\",
"DataLocation": "C:\\gpSTS\\src\\data\\",
"Channel": "Z",
"ImDirection": "forward",
"SpectralRange": [-1,1],
"NumSpectralPoints": 1200,
"Center_Point": [174,34],
"Search_Window": 40,
"Feature_Window": 20,
"ScanCurrent": 30e-12,
"SpecCurrent": 200e-12,
"STSbias": "Bias calc (V)",
"STSsignal": "Current (A)"
},
"Neural_Network": {
"TrainingPath": "C:\\gpSTS\\src\\train\\",
"EpochNumber": 2,
"ClassNumber": 4,
"LearningRate": 0.001,
"BatchSizeTrain": 5,
"BatchSizeVal": 1,
"BatchSizeTest": 1
}
}
with open('data/'+str(nanonis_config['Nanonis_Settings']['File'])+'.json','w') as fil:
json.dump(nanonis_config, fil, sort_keys = True, indent = 4, ensure_ascii = False)
Vals = ScanData()
Vals.update_file_info(nanonis_config['Nanonis_Settings']['FolderLocation'],
nanonis_config['Nanonis_Settings']['ImageStart'], nanonis_config['Nanonis_Settings']['Channel'],
nanonis_config['Nanonis_Settings']['ImDirection'])
Vals.update_search_conditions(nanonis_config['Nanonis_Settings']['Center_Point'],
nanonis_config['Nanonis_Settings']['Search_Window'],nanonis_config['Nanonis_Settings']['Feature_Window'],
nanonis_config['Nanonis_Settings']['SpectralRange'])
fil_path, imfile, channel, imdirection = Vals.get_file_info()
try:
imoff, impix, imsize = Nanonis.readheader(fil_path+'data'+'\\',imfile)
except Exception as e:
print('Error. Please save '+str(imfile)+' within '+str(fil_path)+'data\\')
raise e
Vals.update_scan_conditions(imoff, impix, imsize)
imdirectory = fil_path+'data'+'\\'+'impath'
if not os.path.exists(imdirectory):
os.makedirs(imdirectory)
datadirectory = fil_path+'data'
if not os.path.exists(datadirectory):
os.makedirs(datadirectory)
def return_scandata():
return Vals
spec_counter = SpecCounter()
spec_counter.update_maxcnt(10)
def return_cnt():
return spec_counter
recorded_points = PointList()
def return_pntlist():
return recorded_points
imout = Nanonis.readimage(fil_path+'data'+'\\'+imfile,channel,imdirection)
current_image = ImageInfo(imout)
def return_image():
return current_image
Nanonis.sxm_plot(imout,imdirectory,'current',recorded_points.get_list())
center_point, search_window, feature_window, spec_range = Vals.get_search_conditions()
imx1, imx2 = int((center_point[0]-(feature_window/2))), int((center_point[0]+(feature_window/2)))
imy1, imy2 = int((center_point[1]-(feature_window/2))), int((center_point[1]+(feature_window/2)))
imtrack = imout[imx1:imx2,imy1:imy2]
Nanonis.sxm_plot(imtrack,imdirectory,'feature',recorded_points.get_list())
###############################
###General#####################
###############################
from controls import perform_NanonisExp_BiasSpec, perform_experiment_overlap2
from gpsts.NanonisInterface.graph import plot_2d_function
parameters = {
"x1": {
"element interval": [1,int(impix[0][0])],
},
"x2": {
"element interval": [1,int(impix[0][0])],
},
}
###acquisition functions###
def my_ac_func(x,obj):
mean = obj.posterior_mean(x)["f(x)"]
cov = obj.posterior_covariance(x)["v(x)"]
sig = obj.shannon_information_gain(x)["sig"]
ucb = mean + 3.0 * np.sqrt(cov)
return cov
gaussian_processes = {
"model_1": {
"kernel function": kernel_l2,
"hyperparameters": [1.0,1.0,1.0],
"hyperparameter bounds": [[1.0,100.0],[0.10,100.0],[0.10,100.0]],
"input hyper parameters": [1.0,1.0,1.0],
"output hyper parameters": [1.0],
"input hyper parameter bounds": [[0.01,1000000.0],[0.01,10.0],[0.01,10.0]],
"output hyper parameter bounds":[[0.9,1.1]],
"number of returns": 1,
"dimensionality of return": 1,
"variance optimization tolerance": 0.001,
"adjust optimization threshold": [True,0.1],
"steering mode": "covariance",
"run function in every iteration": None,
"data acquisition function": perform_NanonisExp_BiasSpec,
"acquisition function": my_ac_func,
"objective function": None,
"mean function": None,
"cost function": None,
"cost update function": None,
"cost function parameters": {"offset": 10,"slope":2.0},
"cost function optimization bounds": [[0.0,10.0],[0.0,10.0]],
"cost optimization chance" : 0.1,
"plot function": plot_2d_function,
"acquisition function optimization tolerance": 0.001
},
}
compute_device = "cpu"
sparse = False
compute_inverse = False
initial_likelihood_optimization_method = "global"
training_dask_client = False
prediction_dask_client = False
likelihood_optimization_tolerance = 1e-12
likelihood_optimization_max_iter = 200
automatic_signal_variance_range_determination = True
acquisition_function_optimization_method = "global"
chance_for_local_acquisition_function_optimization = 0.5
acquisition_function_optimization_population_size = 20
acquisition_function_optimization_max_iter = 20
global_likelihood_optimization_at = [200]
hgdl_likelihood_optimization_at = []
local_likelihood_optimization_at = []
breaking_error = 1e-18
########################################
###Variance Optimization################
########################################
objective_function_optimization_population_size = 20
likelihood_optimization_population_size = 20
number_of_suggested_measurements = 1
########################################
###Computation Parameters###############
########################################
global_kernel_optimization_frequency = 0.2
local_kernel_optimization_frequency = 0.5
gpu_acceleration = False
rank_n_update = [False,0.2]
gp_system_solver = "inv" # "inv", "cg" or "minres"
switch_system_solver_to_after = [True, "cg", 5000]
###############################
###DATA ACQUISITION############
###############################
initial_data_set_size = 1
max_number_of_measurements = 10
#####################################################################
###############END###################################################
#####################################################################
|
[
"gpsts.NanonisInterface.data_class.PointList",
"os.path.exists",
"gpsts.NanonisInterface.data_class.SpecCounter",
"numpy.sqrt",
"os.makedirs",
"json.dump",
"gpsts.NanonisInterface.nanonis_interface.Nanonis.readheader",
"gpsts.NanonisInterface.nanonis_interface.Nanonis.readimage",
"gpsts.NanonisInterface.data_class.ImageInfo",
"gpsts.NanonisInterface.data_class.ScanData"
] |
[((1551, 1561), 'gpsts.NanonisInterface.data_class.ScanData', 'ScanData', ([], {}), '()\n', (1559, 1561), False, 'from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo\n'), ((2616, 2629), 'gpsts.NanonisInterface.data_class.SpecCounter', 'SpecCounter', ([], {}), '()\n', (2627, 2629), False, 'from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo\n'), ((2721, 2732), 'gpsts.NanonisInterface.data_class.PointList', 'PointList', ([], {}), '()\n', (2730, 2732), False, 'from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo\n'), ((2790, 2864), 'gpsts.NanonisInterface.nanonis_interface.Nanonis.readimage', 'Nanonis.readimage', (["(fil_path + 'data' + '\\\\' + imfile)", 'channel', 'imdirection'], {}), "(fil_path + 'data' + '\\\\' + imfile, channel, imdirection)\n", (2807, 2864), False, 'from gpsts.NanonisInterface.nanonis_interface import Nanonis\n'), ((2873, 2889), 'gpsts.NanonisInterface.data_class.ImageInfo', 'ImageInfo', (['imout'], {}), '(imout)\n', (2882, 2889), False, 'from gpsts.NanonisInterface.data_class import ScanData, SpecCounter, PointList, ImageInfo\n'), ((1461, 1537), 'json.dump', 'json.dump', (['nanonis_config', 'fil'], {'sort_keys': '(True)', 'indent': '(4)', 'ensure_ascii': '(False)'}), '(nanonis_config, fil, sort_keys=True, indent=4, ensure_ascii=False)\n', (1470, 1537), False, 'import json\n'), ((2139, 2191), 'gpsts.NanonisInterface.nanonis_interface.Nanonis.readheader', 'Nanonis.readheader', (["(fil_path + 'data' + '\\\\')", 'imfile'], {}), "(fil_path + 'data' + '\\\\', imfile)\n", (2157, 2191), False, 'from gpsts.NanonisInterface.nanonis_interface import Nanonis\n'), ((2402, 2429), 'os.path.exists', 'os.path.exists', (['imdirectory'], {}), '(imdirectory)\n', (2416, 2429), False, 'import os\n'), ((2435, 2459), 'os.makedirs', 'os.makedirs', (['imdirectory'], {}), '(imdirectory)\n', (2446, 2459), False, 'import os\n'), ((2499, 2528), 'os.path.exists', 'os.path.exists', (['datadirectory'], {}), '(datadirectory)\n', (2513, 2528), False, 'import os\n'), ((2534, 2560), 'os.makedirs', 'os.makedirs', (['datadirectory'], {}), '(datadirectory)\n', (2545, 2560), False, 'import os\n'), ((3996, 4008), 'numpy.sqrt', 'np.sqrt', (['cov'], {}), '(cov)\n', (4003, 4008), True, 'import numpy as np\n')]
|
import numpy as np
from numpy import array
from numpy.linalg import det
from numpy.linalg import matrix_rank
from numpy.linalg import solve
"""
*** remember the following useful tools***
from numpy import transpose
from numpy import dot
from numpy import argmax
from numpy import abs
from numpy.linalg import eig
from numpy.linalg import inv
"""
def linear_equations(matrix, vector) -> array:
"""
this function resolve a system of linear equations
:param matrix: matrix of coefficients
:param vector: vector of constant terms
>>> linear_equations(np.eye(2),np.array([1,1]))
The system has a single unique solution.
[1. 1.]
>>> linear_equations(np.array([[1,0],[1,0]]),np.array([1,0]))
The system has no solution.
"""
B = np.c_[matrix, vector]
rank_A = matrix_rank(matrix)
rank_B = matrix_rank(B)
if rank_A == rank_B:
if rank_A == len(matrix):
print(f'\n The system has a single unique solution.\n {solve(matrix, vector)}\n ')
return solve(matrix, vector)
else:
print('\n The system has infinitely many solutions. \n')
if input('Do you want the matrix after the gauss_elimination elimination? [y/n]\n') == 'y':
S = gauss_elimination(B)
print(S)
return S
else:
print('\n The system has no solution.\n')
return None
# esercizio 2
def linear_dependence(matrix: array) -> int:
"""
This function answer to the question "Are these vectors linearly independent?"
:param matrix: matrix with vectors as rows
:return: the number of linearly independent vectors
"""
rank = matrix_rank(matrix)
if rank == matrix.shape[0]:
print('The vectors are linearly independents')
else:
print(f'The vectors are linearly dependents and only {rank} of them are linearly independents')
if input('Do you want the matrix after the gauss_elimination elimination? [y/n]\n') == 'y':
S = gauss_elimination(matrix)
print(S)
return rank
# esercizio3
def cartesian_representation_line(vec_1: np.array, vec_2: np.array, type: int = 1) -> None:
"""
This function print the cartesian presentation of a line
a: numpy-array of the
b: numpy-array of the
:param vec_1: first point
:param vec_2: direction (type = 0) or the second point (type = 1)
:param type: it switches between two points and one point and a direction
"""
if type:
vec_2 = vec_2 - vec_1
for i in range(len(vec_1)):
print(f' x_{i + 1} = {vec_1[i]} + {vec_2[i]}t')
return None
def gauss_elimination(matrix) -> np.array:
"""
This function compute Gauss elimination process
:param matrix: generic matrix
:return: matrix after the Gauss elimination
"""
import sympy
return np.array(sympy.Matrix(matrix).rref()[0])
def conic_section_classification(coeff: list) -> None:
"""
This function provides a classification of a conic section
:param coeff: list of the coefficient of the equation of the conic section
if the equation is
A x^2 + B xy + C y^2 + D x + E y + F = 0
then the array coeff is
[A,B,C,D,E,F]
"""
A = array([[coeff[0], coeff[1] / 2, coeff[3] / 2], [coeff[1] / 2, coeff[2], coeff[4] / 2],
[coeff[3], coeff[4] / 2, coeff[5]]])
rank = matrix_rank(A)
if rank == 3:
d = det(A[:2, :2])
# remember that we have a finite precision on floats, for this reason we consider 1e-09 as tolerance
if d > 1e-09:
print('This conic section is an ellipse')
elif d < -1e-09:
print('This conic section is a hyperbola')
else:
print('This conic section is a parabola')
elif rank == 2:
print('This conic section is a degenerate conic, ', end="")
d = det(A[:2, :2])
if d > 1e-09:
print('in particular we have one point')
elif d < -1e-09:
print('in particular we have two incident lines')
else:
print('in particular we have two parallel lines')
else:
print('This conic section is a degenerate conic, in particular we have two coincident lines')
return None
|
[
"numpy.linalg.matrix_rank",
"numpy.linalg.solve",
"sympy.Matrix",
"numpy.linalg.det",
"numpy.array"
] |
[((811, 830), 'numpy.linalg.matrix_rank', 'matrix_rank', (['matrix'], {}), '(matrix)\n', (822, 830), False, 'from numpy.linalg import matrix_rank\n'), ((844, 858), 'numpy.linalg.matrix_rank', 'matrix_rank', (['B'], {}), '(B)\n', (855, 858), False, 'from numpy.linalg import matrix_rank\n'), ((1689, 1708), 'numpy.linalg.matrix_rank', 'matrix_rank', (['matrix'], {}), '(matrix)\n', (1700, 1708), False, 'from numpy.linalg import matrix_rank\n'), ((3262, 3390), 'numpy.array', 'array', (['[[coeff[0], coeff[1] / 2, coeff[3] / 2], [coeff[1] / 2, coeff[2], coeff[4] /\n 2], [coeff[3], coeff[4] / 2, coeff[5]]]'], {}), '([[coeff[0], coeff[1] / 2, coeff[3] / 2], [coeff[1] / 2, coeff[2], \n coeff[4] / 2], [coeff[3], coeff[4] / 2, coeff[5]]])\n', (3267, 3390), False, 'from numpy import array\n'), ((3412, 3426), 'numpy.linalg.matrix_rank', 'matrix_rank', (['A'], {}), '(A)\n', (3423, 3426), False, 'from numpy.linalg import matrix_rank\n'), ((3457, 3471), 'numpy.linalg.det', 'det', (['A[:2, :2]'], {}), '(A[:2, :2])\n', (3460, 3471), False, 'from numpy.linalg import det\n'), ((1032, 1053), 'numpy.linalg.solve', 'solve', (['matrix', 'vector'], {}), '(matrix, vector)\n', (1037, 1053), False, 'from numpy.linalg import solve\n'), ((3907, 3921), 'numpy.linalg.det', 'det', (['A[:2, :2]'], {}), '(A[:2, :2])\n', (3910, 3921), False, 'from numpy.linalg import det\n'), ((2885, 2905), 'sympy.Matrix', 'sympy.Matrix', (['matrix'], {}), '(matrix)\n', (2897, 2905), False, 'import sympy\n'), ((985, 1006), 'numpy.linalg.solve', 'solve', (['matrix', 'vector'], {}), '(matrix, vector)\n', (990, 1006), False, 'from numpy.linalg import solve\n')]
|
"""
Module to execute the simulation for a given instance.
"""
""" import packages """
import logging
from importlib import import_module
import numpy.random as rdm
import copy
import numpy as np
""" import project configurations """
import configurations.settings_simulation as config
""" import project libraries """
import modules.data.datamgm as dtm
from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log
# Global logger
logger = dtm.initialise_logger(__name__)
"""
GLOBAL VARIABLES
----------------
- These variables must be resetted after every simulation run
"""
#: now Simulation Clock
now = -1
#: last_now Last event
last_now = 0
#:event_queue Event queue
event_queue = []
#:trams List of running trams
trams = []
#:stops List of stops
stops = []
#:cargo List of cargo
cargo = []
#:updates List of updates
updates = set()
#:numEvents Number of total events
numEvents = 0
def reset_variables():
"""
Function to reset all global variables
"""
global now, last_now, numEvents, trams, stops, event_queue, cargo, updates
now = -1
last_now = 0
numEvents = 0
if trams:
trams[0].reset()
trams.clear()
for stop in stops:
stop.reset()
stops.clear()
event_queue.clear()
Passengers.reset()
if cargo:
cargo[0].reset()
cargo.clear()
updates.clear()
"""
SIMULATION LOGGING
------------------
- Simluation log (Text File): Includes all information about the events in the simulation
- Entities Log (csv file): Includes the relevant data information of single entities
"""
# "Simulation Log": What does in a single simulation run happen? (Descriptive)
sim_log = logging.getLogger("simulation")
# "Entities Log": How do the variables change during one simulation run?
ent_log = logging.getLogger("entities")
"""
SIMULATION METHODS
------------------
"""
def run(instance, passengerData, seed=False, index_child_seed=False):
"""
Run the simulation
:param instance: Path to the instance file
:param passengerData: Path to the passenger data file
:param seed: Seed to replicate the simulation
:param index_child_see: Index of the child of the global seedsequence
"""
# Used global variables
global inst, now, last_now, event_queue, numEvents
""" Initialise random generator """
# Check seed for random generator
if seed:
# seed sequence
entropy = seed.entropy
else:
seed = rdm.SeedSequence()
entropy = seed.entropy
# Import instance (from .py-file)
inst = dtm.import_instance(instance)
# Initialize the simulation
passenger = initialize(seed, passengerData)
# Run the simulation
running = True
while running:
# sort the upcoming events according to the time they occur
event_queue = sorted(event_queue,key = lambda i: i['time'])
if event_queue:
if event_queue[0]['time'] != now:
if now >= 0:
status(now)
for entity in updates:
if entity == "passenger":
entity = passenger
entity.last_event = now
write_entities_log(entity,now)
updates.clear()
last_now = now
now = event_queue[0]['time']
sim_log.info("\n-----------------------------------------------------------------------------------")
sim_log.info(f"Events at {now}:")
sim_log.info("***")
next_event()
numEvents+= 1
event_queue.pop(0)
# No more events
else:
last_time_period(inst.numPeriods-1,passenger)
running = False
# Save values for replicability
sim_log.info(f"\nentropy:\n{entropy}\n")
sim_log.info(f"index_child_seed:\n{entropy}\n")
# Reset after simulation run
reset_variables()
# Initialisation
def initialize(seed, passengerData):
"""
This function initialises the simulation run, i.e., creates the needed variables and adds the first events to the event log.
:param seed: Seed for replicability
:type seed: int
:param passengerData: Path to passenger data file
:type passengerData: string or path
:return: Global passenger object to track number of passengers
:rtype: Passengers object
"""
global event_queue
sim_log.info("Initialisation...\n--------------------------------------")
# Create child seedsequence per entity
seeds = seed.spawn(10)
# Entities Log
init_entities_log()
# initialize stops
for s in range(inst.numStops):
#sim_log.info("Creating Stop {}.".format(s))
distance_to = {"Stop": inst.stops_distance[s],"Customer": [0]}
distance_from = {"Stop": [inst.stops_distance[j][s] for j in range(inst.numStops)], "Customer": [0]}
if s == 0:
stops.append(Stop(distance_to,distance_from,True))
else:
stops.append(Stop(distance_to,distance_from))
pas = dtm.import_instance(passengerData)
""" Initialize passengers """
passenger_seeds = seeds[0].spawn(6)
if config.random_passenger_arrival:
arriving = pas.arriving_intensity
config.random_passenger_arrival = passenger_seeds[0]
else:
arriving = pas.passenger_arriving
# instantiate passenger arrivals
nonzero = np.nonzero(arriving)
for i in range(len(nonzero[0])):
p = nonzero[0][i]
s = nonzero[1][i]
create_event(p, 6, [s])
if config.random_passenger_boarding:
config.random_passenger_boarding = passenger_seeds[1]
if config.random_passenger_alighting:
config.random_passenger_boarding = passenger_seeds[2]
if config.random_passenger_changing:
config.random_passenger_changing = passenger_seeds[3]
if config.random_boarding_time:
config.random_boarding_time = passenger_seeds[4]
if config.random_alighting_time:
config.random_alighting_time = passenger_seeds[5]
""" Global passenger variables """
passenger = Passengers(
# passenger arrival
random_arrival = config.random_passenger_arrival,
arriving_passengers = arriving,
arriving_passengers_cum = pas.passenger_arriving_acc,
# passenger boarding
random_boarding = config.random_passenger_boarding,
boarding_rate = [1 for tram in range(inst.numTrams)],
# passenger alighting
random_alighting = config.random_passenger_alighting,
alighting_rate = pas.passenger_allighting_rate,
# passenger changing
random_changing = config.random_passenger_changing,
changing_rate = [0 for tram in range(inst.numStops)],
# time
random_boarding_time = config.random_boarding_time,
random_alighting_time = config.random_alighting_time,
service_time = inst.passenger_service_time_board,
service_time_alight = inst.passenger_service_time_alight,
)
# Initialize the starting times of each tram
tram_seeds = seeds[1].spawn(inst.numTrams)
for t in range(inst.numTrams):
sim_log.info(f"Tram {t} will start at {inst.tram_time_arrival[t][0]}.")
Tram.numTotal += 1
create_event(inst.tram_time_arrival[t][0],1,[t,tram_seeds[t]])
# Initialize the cargo release
cargo_seeds = seeds[2].spawn(inst.numCargo)
for c in range(inst.numCargo):
sim_log.info(f"Cargo request {c} will start at {inst.cargo_release[c]}.")
create_event(inst.cargo_release[c],5,[c,cargo_seeds[c]])
# sort the event queue according to the time
event_queue = sorted(event_queue,key = lambda i: i['time'])
sim_log.info("\n-----------------------------------------------------------------------------------\n")
return passenger
def last_time_period(time,passenger):
"""
Write the log for the last period of the simulation
:param time: last period
:type time: float
:param passenger: passenger object
:type passenger: Passengers object
"""
status(time)
for t in trams:
write_entities_log(t,time)
for s in stops:
write_entities_log(s,time)
write_entities_log(passenger,time)
for c in cargo:
c.estimate_delay(time)
write_entities_log(c,time)
def status(time):
"""
Add the status of all entities to the simulation log
:param time: Time of update
:type time: float
"""
global updates
sim_log.info("\n*~* Status *~*")
for t in trams:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers, "delay": t.delay} )
for t in stops:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers} )
CargoRequest.info()
Passengers.info()
"""
METHODS FOR HANDLING EVENTS
---------------------------
"""
def create_event(t,event_id,par):
"""
Creating a new event given an event id and a list of parameters (if the event is within the time horizon)
:param t: time
:type t: float
:param event_id: event id
:type event_id: int
:param par: event parameters
:type par: list
"""
if np.ceil(t) < inst.numPeriods:
event_queue.append({"time": t, "id":event_id,"par":par})
def next_event():
"""
Execute the next event in the event queue
"""
# Choose the next event
event = event_queue[0]
# Extract event id and parameters
event_id = event["id"]
par = event["par"]
# Event-id: 1
# Description: Starting a new tram
if event_id == 1:
starting_tram(par[0],seed=par[1])
# Event-id: 2
# Description: Tram reaches stop (but does not enter yet)
if event_id == 2:
tram_reaches_stop(par[0])
# Event-id: 3
# Description: Tram enters stop
if event_id == 3:
tram_entering_stop(par[0])
# Event-id: 4
# Description: Tram leaves stop (and next tram can enter this stop)
if event_id == 4:
tram_leaves_stop(par[0])
# Event-id: 5
# Description: Cargo is released
if event_id == 5:
starting_cargo(par[0], seed=par[1])
# Event-id 6:
# Description: Update passengers
if event_id == 6:
passenger_update(par[0])
"""
EVENT METHODS
-----------------------------------
"""
def starting_tram(index,seed):
"""
Event no. 1: Starting a tram
:param index: Index of the tram
:type index: int
:param seed: Seed for replicability
:type seed: int
"""
global now, updates
tram_id = len(trams)
if config.random_travel_time:
config.random_travel_time = seed
# debugging
#logger.debug(f"tram_travel_deviation: {config.tram_travel_deviation}")
# if passengers and cargo share vehicles
if inst.scheme == "SV":
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity-inst.tram_capacity_min_cargo,
capacity_cargo = inst.tram_capacity-inst.tram_capacity_min_passenger,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
# if passengers and cargo have dedicated vehicles
elif inst.scheme == "SI":
if index in inst.cargo_tram_assignment:
# cargo tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = 0,
capacity_cargo = inst.tram_capacity_cargo,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
else:
# passenger tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity,
capacity_cargo = 0,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
tram = trams[-1]
if tram.is_operating:
tram_reaches_stop(tram_id)
else:
updates.add(tram)
def tram_reaches_stop(tram_id):
"""
Event no. 2: Tram reaches stop. It either queues up or enters the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now
tram = trams[tram_id]
tram.reach_next_location(now)
stop = stops[tram.tour[tram.position]]
if stop.check_queue(tram):
tram_entering_stop(tram_id)
else:
updates.add(tram)
def tram_entering_stop(tram_id):
"""
Event no. 3: Tram enters the platform of the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now, updates
tram = trams[tram_id]
stop=stops[tram.tour[tram.position]]
tram.enter_next_stop(stop,now)
boarding_time = 0
alighting_time = 0
# Update passengers
if tram.passenger_transport:
boarding_time, alighting_time = passenger_update(stop.index,True,True)
# Compute leaving time with passengers only
leaving_time = tram.compute_leaving_time(now,boarding_time,alighting_time)
new_leaving_time = False
if tram.cargo_transport:
# unloading
tram_cargoload = copy.copy(tram.cargoload)
for c in tram_cargoload:
request = cargo[c]
if request.end_stop == stop.index:
unloading_time = request.unload(tram,stop,now)
new_leaving_time = tram.compute_leaving_time(now,unloading_time=unloading_time)
updates.add(request)
tram_cargoload.clear()
# loading
stop_cargoload = copy.copy(stop.cargoload)
for c in stop_cargoload:
request = cargo[c]
if request.assigned_vehicle == tram.index:
loading_time = request.load(tram,stop)
new_leaving_time = tram.compute_leaving_time(now,loading_time=loading_time)
updates.add(request)
stop_cargoload.clear()
updates.add(tram)
create_event(tram.leaving_time, 4, [tram_id])
return updates
def tram_leaves_stop(tram_id):
"""
Event no. 4: Tram leaves the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now
tram = trams[tram_id]
stop = stops[tram.tour[tram.position]]
if tram.leaving_time == now:
travel_time = tram.leave_location(stop,now)
updates.add(tram)
updates.add(stop)
if tram.is_operating:
create_event(now + travel_time, 2, [tram_id])
next_tram = stop.next_tram_in_queue(tram)
if next_tram >= 0:
create_event(now + inst.min_time_next_tram , 3, [next_tram])
def starting_cargo(index,seed):
"""
Event no. 5: New cargo request arrives
:param index: cargo index
:type index: int
:param seed: seed for randomisation
:type seed: int
"""
global now, updates, trams
# Generate new cargo request
cargo.append(CargoRequest(
release = inst.cargo_release[index],
deadline = inst.cargo_station_deadline[index],
end_stop = inst.cargo_station_destination[index],
assigned_vehicle = inst.cargo_tram_assignment[index],
stop = stops[0],
service_time = inst.cargo_service_time_load,
service_time_unload = inst.cargo_service_time_unload,
size = inst.cargo_size,
random_service_time = seed,
)
)
request = cargo[-1]
# Check if tram is currently at platform
stop = stops[request.start_stop]
# Update the log of stop and request
updates.add(stop)
updates.add(request)
# If the assigned vehicle is currently at the depot
if stop.current_tram == request.assigned_vehicle:
# load tram
tram = trams[request.assigned_vehicle]
# update the current loading and leaving time of the tram
loading_time = request.load(tram, stop)
leaving_time = tram.compute_leaving_time(now,loading_time = loading_time)
# update the log of the tram
updates.add(tram)
# Did the leaving time change?
if leaving_time:
# -> Create a new event for leaving the stop
create_event(leaving_time, 4, [tram.index])
def passenger_update(stop_id,recent_tram_arrival = False, consider_tram=False):
"""
Event no. 6: New passengers arrive and/or alight and board a vehicle
:param stop_id: Index of the stop
:type stop_id: int
:param recent_tram_arrival: New arrival of tram (True) or update while tram is waiting (False)?, defaults to False
:type recent_tram_arrival: bool, optional
:param consider_tram: Consider boarding and alighting process (True) or only arrival (False), defaults to False
:type consider_tram: bool, optional
:return: boarding and alighting time
:rtype: tuple
"""
global now, updates
stop = stops[stop_id]
if consider_tram:
tram_id = stop.current_tram
else:
tram_id = -1
# Update arriving passengers
Passengers.arrival(now,stop)
boarding_time = 0
alighting_time = 0
# if currently a tram waits at the platform
if tram_id >= 0:
tram = trams[tram_id]
if recent_tram_arrival or tram.leaving_time != now:
if recent_tram_arrival:
# compute number and time for alighting passengers
alighting_passengers, alighting_time = Passengers.alighting(stop,tram,now)
# compute number and time for boarding passengers
boarding_passengers, boarding_time = Passengers.boarding(stop,tram,now)
if recent_tram_arrival:
# compute number and time for changing passengers
changing_passengers = Passengers.changing(stop,alighting_passengers,now)
# Update leaving time
if not recent_tram_arrival:
leaving_time = tram.compute_leaving_time(now,boarding_time,alighting_time, 0, 0)
updates.add(tram)
#write_entities_log(tram,now)
# Did the leaving time change?
if leaving_time:
create_event(leaving_time, 4, [tram_id])
#next_arrival = Passengers.compute_next_arrival_time(now,stop,tram)
#if next_arrival:
# create new event (for passengers that may arrive before the current tram leaves)
#create_event(next_arrival, 6, [stop_id])
updates.add(stop)
updates.add("passenger")
return boarding_time, alighting_time
|
[
"logging.getLogger",
"modules.simulation.entities.Tram",
"modules.simulation.entities.Passengers.reset",
"modules.simulation.entities.Passengers.boarding",
"modules.simulation.entities.Passengers.alighting",
"modules.simulation.entities.Stop",
"modules.data.datamgm.initialise_logger",
"modules.simulation.entities.Passengers.arrival",
"copy.copy",
"modules.data.datamgm.import_instance",
"modules.simulation.entities.Passengers.info",
"numpy.random.SeedSequence",
"modules.simulation.entities.write_entities_log",
"numpy.ceil",
"modules.simulation.entities.CargoRequest",
"modules.simulation.entities.CargoRequest.info",
"numpy.nonzero",
"modules.simulation.entities.Passengers.changing",
"modules.simulation.entities.init_entities_log"
] |
[((506, 537), 'modules.data.datamgm.initialise_logger', 'dtm.initialise_logger', (['__name__'], {}), '(__name__)\n', (527, 537), True, 'import modules.data.datamgm as dtm\n'), ((1746, 1777), 'logging.getLogger', 'logging.getLogger', (['"""simulation"""'], {}), "('simulation')\n", (1763, 1777), False, 'import logging\n'), ((1863, 1892), 'logging.getLogger', 'logging.getLogger', (['"""entities"""'], {}), "('entities')\n", (1880, 1892), False, 'import logging\n'), ((1335, 1353), 'modules.simulation.entities.Passengers.reset', 'Passengers.reset', ([], {}), '()\n', (1351, 1353), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((2674, 2703), 'modules.data.datamgm.import_instance', 'dtm.import_instance', (['instance'], {}), '(instance)\n', (2693, 2703), True, 'import modules.data.datamgm as dtm\n'), ((4872, 4891), 'modules.simulation.entities.init_entities_log', 'init_entities_log', ([], {}), '()\n', (4889, 4891), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((5355, 5389), 'modules.data.datamgm.import_instance', 'dtm.import_instance', (['passengerData'], {}), '(passengerData)\n', (5374, 5389), True, 'import modules.data.datamgm as dtm\n'), ((8672, 8707), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['passenger', 'time'], {}), '(passenger, time)\n', (8690, 8707), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((9408, 9427), 'modules.simulation.entities.CargoRequest.info', 'CargoRequest.info', ([], {}), '()\n', (9425, 9427), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((9441, 9458), 'modules.simulation.entities.Passengers.info', 'Passengers.info', ([], {}), '()\n', (9456, 9458), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((19199, 19228), 'modules.simulation.entities.Passengers.arrival', 'Passengers.arrival', (['now', 'stop'], {}), '(now, stop)\n', (19217, 19228), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((2565, 2583), 'numpy.random.SeedSequence', 'rdm.SeedSequence', ([], {}), '()\n', (2581, 2583), True, 'import numpy.random as rdm\n'), ((5734, 5754), 'numpy.nonzero', 'np.nonzero', (['arriving'], {}), '(arriving)\n', (5744, 5754), True, 'import numpy as np\n'), ((8575, 8602), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['t', 'time'], {}), '(t, time)\n', (8593, 8602), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((8636, 8663), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['s', 'time'], {}), '(s, time)\n', (8654, 8663), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((8772, 8799), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['c', 'time'], {}), '(c, time)\n', (8790, 8799), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((9852, 9862), 'numpy.ceil', 'np.ceil', (['t'], {}), '(t)\n', (9859, 9862), True, 'import numpy as np\n'), ((15133, 15158), 'copy.copy', 'copy.copy', (['tram.cargoload'], {}), '(tram.cargoload)\n', (15142, 15158), False, 'import copy\n'), ((15608, 15633), 'copy.copy', 'copy.copy', (['stop.cargoload'], {}), '(stop.cargoload)\n', (15617, 15633), False, 'import copy\n'), ((17054, 17424), 'modules.simulation.entities.CargoRequest', 'CargoRequest', ([], {'release': 'inst.cargo_release[index]', 'deadline': 'inst.cargo_station_deadline[index]', 'end_stop': 'inst.cargo_station_destination[index]', 'assigned_vehicle': 'inst.cargo_tram_assignment[index]', 'stop': 'stops[0]', 'service_time': 'inst.cargo_service_time_load', 'service_time_unload': 'inst.cargo_service_time_unload', 'size': 'inst.cargo_size', 'random_service_time': 'seed'}), '(release=inst.cargo_release[index], deadline=inst.\n cargo_station_deadline[index], end_stop=inst.cargo_station_destination[\n index], assigned_vehicle=inst.cargo_tram_assignment[index], stop=stops[\n 0], service_time=inst.cargo_service_time_load, service_time_unload=inst\n .cargo_service_time_unload, size=inst.cargo_size, random_service_time=seed)\n', (17066, 17424), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((11590, 12076), 'modules.simulation.entities.Tram', 'Tram', ([], {'tour': 'inst.tram_tour[index]', 'capacity_passenger': '(inst.tram_capacity - inst.tram_capacity_min_cargo)', 'capacity_cargo': '(inst.tram_capacity - inst.tram_capacity_min_passenger)', 'capacity_total': 'inst.tram_capacity', 'schedule_arrival': 'inst.tram_time_arrival[index]', 'schedule_departure': 'inst.tram_time_departure[index]', 'speed': 'inst.tram_speed', 'random_travel_time': 'config.random_travel_time', 'travel_deviation': 'config.tram_travel_deviation', 'max_service': 'inst.tram_max_service'}), '(tour=inst.tram_tour[index], capacity_passenger=inst.tram_capacity -\n inst.tram_capacity_min_cargo, capacity_cargo=inst.tram_capacity - inst.\n tram_capacity_min_passenger, capacity_total=inst.tram_capacity,\n schedule_arrival=inst.tram_time_arrival[index], schedule_departure=inst\n .tram_time_departure[index], speed=inst.tram_speed, random_travel_time=\n config.random_travel_time, travel_deviation=config.\n tram_travel_deviation, max_service=inst.tram_max_service)\n', (11594, 12076), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((19796, 19832), 'modules.simulation.entities.Passengers.boarding', 'Passengers.boarding', (['stop', 'tram', 'now'], {}), '(stop, tram, now)\n', (19815, 19832), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((5230, 5268), 'modules.simulation.entities.Stop', 'Stop', (['distance_to', 'distance_from', '(True)'], {}), '(distance_to, distance_from, True)\n', (5234, 5268), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((5307, 5339), 'modules.simulation.entities.Stop', 'Stop', (['distance_to', 'distance_from'], {}), '(distance_to, distance_from)\n', (5311, 5339), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((19632, 19669), 'modules.simulation.entities.Passengers.alighting', 'Passengers.alighting', (['stop', 'tram', 'now'], {}), '(stop, tram, now)\n', (19652, 19669), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((19989, 20041), 'modules.simulation.entities.Passengers.changing', 'Passengers.changing', (['stop', 'alighting_passengers', 'now'], {}), '(stop, alighting_passengers, now)\n', (20008, 20041), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((3371, 3402), 'modules.simulation.entities.write_entities_log', 'write_entities_log', (['entity', 'now'], {}), '(entity, now)\n', (3389, 3402), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((12460, 12865), 'modules.simulation.entities.Tram', 'Tram', ([], {'tour': 'inst.tram_tour[index]', 'capacity_passenger': '(0)', 'capacity_cargo': 'inst.tram_capacity_cargo', 'capacity_total': 'inst.tram_capacity', 'schedule_arrival': 'inst.tram_time_arrival[index]', 'schedule_departure': 'inst.tram_time_departure[index]', 'speed': 'inst.tram_speed', 'random_travel_time': 'config.random_travel_time', 'travel_deviation': 'config.tram_travel_deviation', 'max_service': 'inst.tram_max_service'}), '(tour=inst.tram_tour[index], capacity_passenger=0, capacity_cargo=inst.\n tram_capacity_cargo, capacity_total=inst.tram_capacity,\n schedule_arrival=inst.tram_time_arrival[index], schedule_departure=inst\n .tram_time_departure[index], speed=inst.tram_speed, random_travel_time=\n config.random_travel_time, travel_deviation=config.\n tram_travel_deviation, max_service=inst.tram_max_service)\n', (12464, 12865), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n'), ((13191, 13590), 'modules.simulation.entities.Tram', 'Tram', ([], {'tour': 'inst.tram_tour[index]', 'capacity_passenger': 'inst.tram_capacity', 'capacity_cargo': '(0)', 'capacity_total': 'inst.tram_capacity', 'schedule_arrival': 'inst.tram_time_arrival[index]', 'schedule_departure': 'inst.tram_time_departure[index]', 'speed': 'inst.tram_speed', 'random_travel_time': 'config.random_travel_time', 'travel_deviation': 'config.tram_travel_deviation', 'max_service': 'inst.tram_max_service'}), '(tour=inst.tram_tour[index], capacity_passenger=inst.tram_capacity,\n capacity_cargo=0, capacity_total=inst.tram_capacity, schedule_arrival=\n inst.tram_time_arrival[index], schedule_departure=inst.\n tram_time_departure[index], speed=inst.tram_speed, random_travel_time=\n config.random_travel_time, travel_deviation=config.\n tram_travel_deviation, max_service=inst.tram_max_service)\n', (13195, 13590), False, 'from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg as la
def PCA(dat, center=False, percentage=0.8):
M, N = dat.shape
if center:
mu = np.mean(dat,0)
dat -= mu
U, L, Vh = la.svd(dat, full_matrices=False)
V = Vh.T.conjugate()
SIGMA = np.diag(L)
X = U.dot(SIGMA)
Lam = L**2
normalized_eigenvalues = Lam/Lam.sum(dtype=float)
csum = [normalized_eigenvalues[:i+1].sum() for i in xrange(N)]
n_components = [x < percentage for x in csum].index(False) + 1
return (normalized_eigenvalues,
V[:,0:n_components],
SIGMA[0:n_components,0:n_components],
X[:,0:n_components])
def scree(normalized_eigenvalues):
fig = plt.figure()
plt.plot(normalized_eigenvalues,'b-', normalized_eigenvalues, 'bo')
plt.xlabel("Principal Components")
plt.ylabel("Percentage of Variance")
return fig
|
[
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.diag",
"matplotlib.pyplot.figure",
"scipy.linalg.svd"
] |
[((225, 257), 'scipy.linalg.svd', 'la.svd', (['dat'], {'full_matrices': '(False)'}), '(dat, full_matrices=False)\n', (231, 257), True, 'from scipy import linalg as la\n'), ((300, 310), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (307, 310), True, 'import numpy as np\n'), ((738, 750), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (748, 750), True, 'import matplotlib.pyplot as plt\n'), ((755, 823), 'matplotlib.pyplot.plot', 'plt.plot', (['normalized_eigenvalues', '"""b-"""', 'normalized_eigenvalues', '"""bo"""'], {}), "(normalized_eigenvalues, 'b-', normalized_eigenvalues, 'bo')\n", (763, 823), True, 'import matplotlib.pyplot as plt\n'), ((827, 861), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Principal Components"""'], {}), "('Principal Components')\n", (837, 861), True, 'import matplotlib.pyplot as plt\n'), ((866, 902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage of Variance"""'], {}), "('Percentage of Variance')\n", (876, 902), True, 'import matplotlib.pyplot as plt\n'), ((176, 191), 'numpy.mean', 'np.mean', (['dat', '(0)'], {}), '(dat, 0)\n', (183, 191), True, 'import numpy as np\n')]
|
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import torch
import torch.nn.functional as F
import numpy as np
from core import networks
from core.utils import *
from core.loss import *
import IPython
import time
class Agent(object):
"""
A general agent class
"""
def __init__(self, num_inputs, action_space, args, name):
for key, val in args.items():
setattr(self, key, val)
self.name = name
self.device = "cuda"
self.update_step = 1
self.init_step = 1
self.action_dim = action_space.shape[0]
self.has_critic = self.name != "BC"
self.action_space = action_space
self.num_inputs = num_inputs + self.num_input_extra
self.traj_feat = None
self.latent_sample = None
self.test_mode = False
self.use_debug_latent = False
self.gaddpg_pred = 0.
if has_check(self, 'traj_goal_mutual_conditioned') :
self.num_inputs += self.policy_traj_latent_size
self.policy, self.policy_optim, self.policy_scheduler, self.policy_target = get_policy_class('GaussianPolicy', self)
def unpack_batch(
self,
state,
point_state=None,
vis=False,
gt_goal=None,
val=False,
grasp_set=None,
vis_image=False,
repeat=False,
traj_latent=None,
separate=True
):
"""
Extract features from point cloud input
"""
if type(point_state) is list or type(point_state) is np.ndarray:
point_state = torch.cuda.FloatTensor(point_state )
if type(state) is list or type(state) is np.ndarray:
state = torch.cuda.FloatTensor(state)
state_feature, network_input = self.state_feature_extractor(
point_state,
feature_2=val,
traj_latent=traj_latent,
train=not self.test_mode)
if len(state_feature) != 2 or type(state_feature) is torch.Tensor: state_feature = [state_feature, None]
return state_feature
def gaddpg_step(self, state, remain_timestep, curr_joint ):
""" use GADDPG to forward pass """
state = select_target_point(state)
gaddpg_remain_step = max(min(remain_timestep + 1, 25), 1)
return self.gaddpg.select_action(state, remain_timestep=gaddpg_remain_step, curr_joint=curr_joint)
@torch.no_grad()
def batch_select_action(
self,
state,
actions=None,
goal_state=None,
vis=False,
remain_timestep=0,
repeat=False,
curr_joint=None,
gt_traj=None,
sample_num=None
):
"""
run policy forward pass in batch simulation
"""
self.set_mode(True)
traj = None
curr_joint_th = torch.cuda.FloatTensor(curr_joint)[:, :7]
img_state = torch.cuda.FloatTensor(state[0][1])
point_state = torch.cuda.FloatTensor(state[0][0])
timestep = remain_timestep
self.timestep = timestep
agent = self
feature, extra = agent.extract_feature( img_state,
point_state,
time_batch=timestep,
goal_batch=goal_state,
vis=vis,
value=False,
train=False,
repeat=repeat,
curr_joint=curr_joint_th )
actions = agent.policy.sample(feature)
action = actions[0].detach().cpu().numpy()
extra_pred = actions[1].detach().cpu().numpy()
action_sample = actions[2].detach().cpu().numpy()
aux_pred = actions[3].detach().cpu().numpy()
return action, traj, extra_pred, aux_pred
@torch.no_grad()
def select_action(
self,
state,
actions=None,
goal_state=None,
vis=False,
remain_timestep=0,
repeat=False,
curr_joint=None,
gt_traj=None,
sample_num=None
):
"""
policy output in test time
"""
self.set_mode(True)
multi_sample = has_check(self, 'multi_traj_sample') and gt_traj is None
if multi_sample and hasattr(self, 'critic') and self.train_traj_sampler and self.critic_mpc:
return self.critic_select_action(state, remain_timestep, curr_joint, vis=vis)
if self.name == 'DQN_HRL' and gt_traj is None and vis:
return self.critic_select_action(state, remain_timestep, curr_joint, vis=vis)
curr_joint_th = torch.Tensor([curr_joint.flatten()]).float().cuda()[:, :7]
img_state = torch.cuda.FloatTensor(state[0][1])[None]
point_state = torch.cuda.FloatTensor(state[0][0])[None]
timestep = torch.cuda.FloatTensor([remain_timestep])
self.timestep = timestep
if has_check(self, 'train_traj_sampler') and gt_traj is None and has_check(self, 'train_traj_feature'):
if multi_sample: # multiple traj samples
traj = self.select_traj(img_state,
point_state.repeat((self.test_traj_num, 1, 1)),
goal_state,
vis=vis,
remain_timestep=remain_timestep,
curr_joint=curr_joint_th.repeat((self.test_traj_num, 1)))
timestep = torch.Tensor([remain_timestep]).float().cuda()
opt_idx = 0
self.traj_feat = self.traj_feat[[opt_idx]]
else:
traj = self.select_traj(img_state, point_state, goal_state,
vis=vis, remain_timestep=remain_timestep,
curr_joint=curr_joint_th )
else:
traj = None
# policy
feature, extra = self.extract_feature( img_state,
point_state,
time_batch=timestep,
goal_batch=goal_state,
value=False,
train=False,
repeat=repeat,
curr_joint=curr_joint_th[:,:7] )
if self.name == 'DQN_HRL' and vis and hasattr(self, 'sampler_traj_feat'):
self.compute_critic_value( img_state, point_state, timestep, curr_joint_th, goal_state)
actions = self.policy.sample(feature)
action = actions[0].detach().cpu().numpy()[0]
extra_pred = actions[1].detach().cpu().numpy()[0]
action_sample = actions[2].detach().cpu().numpy()[0]
aux_pred = actions[3].detach().cpu().numpy()[0]
return action, traj, extra_pred, aux_pred
def update_parameters(self, batch_data, updates, k):
"""
To be inherited
"""
return {}
def compute_loss(self):
"""
compute loss for policy and trajectory embedding
"""
self.policy_grasp_aux_loss = goal_pred_loss(self.aux_pred[self.target_goal_reward_mask, :7], self.target_grasp_batch[self.target_goal_reward_mask, :7] )
self.bc_loss = traj_action_loss(self, self.pi, self.traj_expert_action_batch, self.target_expert_mask)
return sum([getattr(self, name) for name in self.loss_info if name.endswith('loss') and not name.startswith('critic')])
def prepare_data(self, batch_data):
"""
load batch data dictionary and compute extra data
"""
update_step = self.update_step - self.init_step
self.loss_info = list(get_loss_info_dict().keys())
for name in self.loss_info:
setattr(self, name, torch.zeros(1, device=torch.device('cuda')))
for k, v in batch_data.items():
setattr(self, k, torch.cuda.FloatTensor(v))
self.traj_time_batch = self.traj_idx_batch[:, 1, None]
self.cont_traj_inbatch_index = self.traj_idx_batch[:, 0].cuda().long()
self.traj_feat = None
self.reward_mask = (self.return_batch > 0).view(-1)
self.expert_mask = (self.expert_flag_batch >= 1).view(-1)
self.expert_reward_mask = self.reward_mask * (self.expert_flag_batch >= 1).squeeze()
self.perturb_flag_batch = self.perturb_flag_batch.bool()
self.traj_expert_reward_mask = self.expert_reward_mask[self.cont_traj_inbatch_index]
self.train_traj_idx_batch = self.cont_traj_inbatch_index
self.sparsify_sim_traj_time_batch = self.sparsify_sim_traj_idx_batch[:, 1, None]
self.sparsify_sim_cont_traj_inbatch_index = self.sparsify_sim_traj_idx_batch[:, 0].cuda().long()
self.sparsify_sim_traj_expert_reward_mask = self.expert_reward_mask[self.sparsify_sim_cont_traj_inbatch_index]
self.goal_reward_mask = torch.ones_like(self.time_batch).bool()
self.traj_goal_reward_mask = torch.ones_like(self.traj_integer_time_batch).bool()
self.target_grasp_batch = self.traj_goal_batch[:, :7] if self.full_traj_embedding else self.goal_batch[:, :7]
self.target_goal_reward_mask = self.goal_reward_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.goal_reward_mask
self.target_reward_mask = self.reward_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.reward_mask
self.target_return = self.return_batch[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.return_batch
self.target_expert_mask = self.expert_mask[self.cont_traj_inbatch_index] if self.full_traj_embedding else self.expert_mask
self.target_gaddpg_batch = (self.gaddpg_batch * self.reward_mask)
self.target_expert_reward_mask = self.traj_expert_reward_mask if self.full_traj_embedding else self.expert_reward_mask
self.next_time_batch = self.time_batch - 1
self.next_traj_time_batch = self.traj_integer_time_batch - 1
self.target_reward_batch = self.traj_reward_batch if self.full_traj_embedding else self.reward_batch
self.target_mask_batch = self.traj_mask_batch if self.full_traj_embedding else self.mask_batch
def log_stat(self):
"""
log grad and param statistics for tensorboard
"""
self.policy_grad = module_max_gradient(self.policy)
self.feat_grad = module_max_gradient(self.state_feature_extractor.module.encoder)
self.feat_param = module_max_param(self.state_feature_extractor.module.encoder)
self.val_feat_grad = module_max_gradient(self.state_feature_extractor.module.value_encoder)
self.val_feat_param = module_max_param(self.state_feature_extractor.module.value_encoder)
self.policy_param = module_max_param(self.policy)
self.reward_mask_num = self.reward_mask.float().sum()
self.max_traj_sample_len = torch.unique(self.cont_traj_inbatch_index, return_counts=True)[1].max()
self.traj_num = len(self.reward_mask)
self.train_batch_size = len(self.target_expert_reward_mask)
if hasattr(self, 'traj_feature_extractor'):
self.traj_grad = module_max_gradient(self.traj_feature_extractor)
self.traj_param = module_max_param(self.traj_feature_extractor)
if hasattr(self, 'sampler_gaussian'):
self.sampler_mean = self.sampler_gaussian[0].mean().item()
self.sampler_logsigma = self.sampler_gaussian[1].mean().item()
if self.train_traj_sampler and hasattr(self, 'sampler_traj_feat'):
self.traj_sampler_grad = module_max_gradient(self.traj_feature_sampler)
self.traj_sampler_param = module_max_param(self.traj_feature_sampler)
if self.has_critic:
self.value_mean, self.value_mean_2 = self.qf1.mean(), self.qf2.mean()
self.target_mean = self.next_q_value.mean()
self.return_mean = self.traj_return_batch.mean()
self.value_min, self.value_max = self.qf1.min(), self.qf1.max()
self.expert_reward_mask_num = self.expert_reward_mask.sum()
self.goal_reward_mask_num = self.goal_reward_mask.sum()
self.reward_mask_num = self.reward_mask.sum()
self.return_min, self.return_max = self.return_batch.min(), self.return_batch.max()
self.critic_grad = module_max_gradient(self.critic)
self.critic_param = module_max_param(self.critic)
def set_mode(self, test):
"""
set training or test mode for network
"""
self.test_mode = test
if not test:
self.state_feature_extractor.train()
self.policy.train()
if hasattr(self, "critic"):
self.critic.train()
self.critic_optim.zero_grad()
self.state_feat_val_encoder_optim.zero_grad()
if hasattr(self, 'traj_feature_extractor'):
if self.train_traj_feature and not self.fix_traj_feature:
self.traj_feature_extractor.train()
else:
self.traj_feature_extractor.eval()
if self.train_traj_sampler:
self.traj_feature_sampler.train()
else:
torch.no_grad()
self.policy.eval()
self.state_feature_extractor.eval()
if hasattr(self, "critic"): self.critic.eval()
if hasattr(self, "traj_feature_extractor"): self.traj_feature_extractor.eval()
if hasattr(self, "traj_feature_sampler"): self.traj_feature_sampler.eval()
def setup_feature_extractor(self, net_dict, test_time=False):
"""
Load networks
"""
if "traj_feature_extractor" in net_dict:
self.traj_feature_extractor = net_dict["traj_feature_extractor"]["net"]
self.traj_feature_extractor_opt = net_dict["traj_feature_extractor"]["opt"]
self.traj_feature_extractor_sch = net_dict["traj_feature_extractor"]["scheduler"]
else:
self.traj_feature_extractor = net_dict["state_feature_extractor"]["net"]
if 'traj_feature_sampler' in net_dict:
self.traj_feature_sampler = net_dict["traj_feature_sampler"]["net"]
self.traj_feature_sampler_opt = net_dict["traj_feature_sampler"]["opt"]
self.traj_feature_sampler_sch = net_dict["traj_feature_sampler"]["scheduler"]
self.state_feature_extractor = net_dict["state_feature_extractor"]["net"]
self.state_feature_extractor_optim = net_dict["state_feature_extractor"]["opt"]
self.state_feature_extractor_scheduler = net_dict["state_feature_extractor"]["scheduler"]
self.state_feat_encoder_optim = net_dict["state_feature_extractor"][ "encoder_opt" ]
self.state_feat_encoder_scheduler = net_dict["state_feature_extractor"][ "encoder_scheduler" ]
self.state_feat_val_encoder_optim = net_dict["state_feature_extractor"][ "val_encoder_opt" ]
self.state_feat_val_encoder_scheduler = net_dict["state_feature_extractor"][ "val_encoder_scheduler" ]
self.test_time = test_time
def get_mix_ratio(self, update_step):
"""
Get a mixed schedule for supervised learning and RL
"""
idx = int((self.update_step > np.array(self.mix_milestones)).sum())
mix_policy_ratio = get_valid_index(self.mix_policy_ratio_list, idx)
mix_policy_ratio = min(mix_policy_ratio, self.ddpg_coefficients[4])
mix_value_ratio = get_valid_index(self.mix_value_ratio_list, idx)
mix_value_ratio = min(mix_value_ratio, self.ddpg_coefficients[3])
return mix_value_ratio, mix_policy_ratio
def get_lr(self):
"""
Get network learning rates
"""
lrs = {
"policy_lr": self.policy_optim.param_groups[0]["lr"],
"feature_lr": self.state_feature_extractor_optim.param_groups[0]["lr"],
}
if self.train_traj_feature:
lrs["traj_feature_lr"] = self.traj_feature_extractor_opt.param_groups[0]["lr"]
if self.train_traj_sampler:
lrs["traj_sampler_lr"] = self.traj_feature_sampler_opt.param_groups[0]["lr"]
if hasattr(self, 'critic_optim'):
lrs["value_lr"] = self.critic_optim.param_groups[0]["lr"]
lrs["val_feat_lr"] = self.state_feat_val_encoder_optim.param_groups[0]["lr"]
headers = ["network", "learning rate"]
data = [(name, lr) for name, lr in lrs.items()]
return lrs
def optimize(self, loss, update_step):
"""
Backward loss and update optimizer
"""
self.state_feat_encoder_optim.zero_grad()
self.policy_optim.zero_grad()
if self.train_traj_feature:
self.traj_feature_extractor_opt.zero_grad()
if self.train_traj_sampler:
self.traj_feature_sampler_opt.zero_grad()
loss.backward(retain_graph=self.re_sampler_step)
self.policy_optim.step()
if self.train_feature:
self.state_feat_encoder_optim.step()
if self.train_traj_feature:
self.traj_feature_extractor_opt.step()
if self.train_traj_sampler:
self.traj_feature_sampler_opt.step()
def step_scheduler(self, step=None):
"""
Update network scheduler
"""
if self.train_traj_sampler:
self.traj_feature_sampler_sch.step()
if self.train_traj_feature:
self.traj_feature_extractor_sch.step()
if hasattr(self, "critic"):
self.critic_scheduler.step()
if hasattr(self, "policy"):
self.policy_scheduler.step()
if self.train_feature or self.train_value_feature:
self.state_feature_extractor_scheduler.step()
self.state_feat_encoder_scheduler.step()
if self.train_value_feature and hasattr(self, 'state_feat_val_encoder_scheduler'):
self.state_feat_val_encoder_scheduler.step()
def save_model(
self,
step,
output_dir="",
surfix="latest",
actor_path=None,
critic_path=None,
traj_feat_path=None,
state_feat_path=None,
):
"""
save model
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path = get_model_path(output_dir,
self.name, self.env_name, surfix)
print("Saving models to {} and {}".format(actor_path, critic_path))
if hasattr(self, "policy"):
torch.save(
{
"net": self.policy.state_dict(),
"opt": self.policy_optim.state_dict(),
"sch": self.policy_scheduler.state_dict(),
},
actor_path,
)
if hasattr(self, "critic"):
torch.save(
{
"net": self.critic.state_dict(),
"opt": self.critic_optim.state_dict(),
"sch": self.critic_scheduler.state_dict(),
},
critic_path,
)
if hasattr(self, 'traj_feature_extractor_opt'):
torch.save(
{
"net": self.traj_feature_extractor.state_dict(),
"opt": self.traj_feature_extractor_opt.state_dict(),
"sch": self.traj_feature_extractor_sch.state_dict(),
},
traj_feat_path,
)
if hasattr(self, 'traj_feature_sampler_opt'):
torch.save(
{
"net": self.traj_feature_sampler.state_dict(),
"opt": self.traj_feature_sampler_opt.state_dict(),
"sch": self.traj_feature_sampler_sch.state_dict(),
},
traj_sampler_path,
)
torch.save(
{
"net": self.state_feature_extractor.state_dict(),
"opt": self.state_feature_extractor_optim.state_dict(),
"encoder_opt": self.state_feat_encoder_optim.state_dict(),
"sch": self.state_feature_extractor_scheduler.state_dict(),
"encoder_sch": self.state_feat_encoder_scheduler.state_dict(),
"val_encoder_opt": self.state_feat_val_encoder_optim.state_dict(),
"val_encoder_sch": self.state_feat_val_encoder_scheduler.state_dict(),
"step": step,
},
state_feat_path,
)
def load_model(
self, output_dir, surfix="latest", set_init_step=False, reinit_value_feat=False
):
"""
Load saved model
"""
actor_path, critic_path, traj_feat_path, traj_sampler_path, state_feat_path = get_model_path(output_dir,
self.name, self.env_name, surfix)
if hasattr(self, "policy") and os.path.exists(actor_path):
net_dict = torch.load(actor_path)
self.policy.load_state_dict(net_dict["net"])
self.policy_optim.load_state_dict(net_dict["opt"])
self.policy_scheduler.load_state_dict(net_dict["sch"])
if self.reinit_optim and set_init_step:
for g in self.policy_optim.param_groups:
g["lr"] = self.reinit_lr
self.policy_scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.policy_optim, milestones=self.policy_milestones, gamma=0.5 )
self.policy_scheduler.initial_lr = self.reinit_lr
self.policy_scheduler.base_lrs[0] = self.reinit_lr
print("reinit policy optim")
print("load policy weight: {:.3f} from {} !!!!".format(module_max_param(self.policy), actor_path))
hard_update(self.policy_target, self.policy, self.tau)
if hasattr(self, "critic") and os.path.exists(critic_path):
net_dict = torch.load(critic_path)
self.critic.load_state_dict(net_dict["net"])
self.critic_optim.load_state_dict(net_dict["opt"])
self.critic_scheduler.load_state_dict(net_dict["sch"])
print("load critic weight: {:.3f} !!!!".format(module_max_param(self.critic)))
hard_update(self.critic_target, self.critic, self.tau)
if hasattr(self, 'traj_feature_extractor') and os.path.exists(traj_feat_path):
net_dict = torch.load(traj_feat_path)
self.traj_feature_extractor.load_state_dict(net_dict["net"], strict=False)
print('load traj feature weight: {:.3f} from {} !!!!'.format(module_max_param(self.traj_feature_extractor), traj_feat_path))
try:
self.traj_feature_extractor_opt.load_state_dict(net_dict["opt"])
self.traj_feature_extractor_sch.load_state_dict(net_dict["sch"])
except:
pass
if hasattr(self, 'train_traj_sampler') and os.path.exists(traj_sampler_path):
net_dict = torch.load(traj_sampler_path)
self.traj_feature_sampler.load_state_dict(net_dict["net"], strict=False)
print('load traj sampler weight: {:.3f} from {} !!!!'.format(module_max_param(self.traj_feature_sampler), traj_sampler_path))
try:
self.traj_feature_sampler_opt.load_state_dict(net_dict["opt"])
self.traj_feature_sampler_sch.load_state_dict(net_dict["sch"])
except:
pass
if os.path.exists(state_feat_path):
net_dict = torch.load(state_feat_path)
if has_check(self, 'reinit_feat_opt'):
self.state_feature_extractor.load_state_dict(dict([(n, p) for n, p in net_dict["net"].items() if 'value' not in n ]),strict=False)
else:
self.state_feature_extractor.load_state_dict(net_dict["net"] )
self.state_feature_extractor_optim.load_state_dict(net_dict["opt"])
self.state_feature_extractor_scheduler.load_state_dict( net_dict["sch"] )
self.state_feat_encoder_optim.load_state_dict( net_dict["encoder_opt"] )
self.state_feat_encoder_scheduler.load_state_dict( net_dict["encoder_sch"] )
if not has_check(self, 'reinit_feat_opt'):
self.state_feat_val_encoder_optim.load_state_dict(
net_dict["val_encoder_opt"] )
self.state_feat_val_encoder_scheduler.load_state_dict(
net_dict["val_encoder_sch"] )
print(
"load feature weight: {} !!!! from: {} step :{}".format(
module_max_param(self.state_feature_extractor), state_feat_path, net_dict["step"]))
self.update_step = net_dict["step"]
self.init_step = self.update_step
return self.update_step
return 0
|
[
"os.path.exists",
"torch.ones_like",
"torch.unique",
"torch.cuda.FloatTensor",
"torch.optim.lr_scheduler.MultiStepLR",
"os.makedirs",
"torch.load",
"torch.Tensor",
"numpy.array",
"torch.no_grad",
"torch.device"
] |
[((2639, 2654), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2652, 2654), False, 'import torch\n'), ((4199, 4214), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4212, 4214), False, 'import torch\n'), ((3116, 3151), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state[0][1]'], {}), '(state[0][1])\n', (3138, 3151), False, 'import torch\n'), ((3174, 3209), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state[0][0]'], {}), '(state[0][0])\n', (3196, 3209), False, 'import torch\n'), ((5202, 5243), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['[remain_timestep]'], {}), '([remain_timestep])\n', (5224, 5243), False, 'import torch\n'), ((24157, 24188), 'os.path.exists', 'os.path.exists', (['state_feat_path'], {}), '(state_feat_path)\n', (24171, 24188), False, 'import os\n'), ((1706, 1741), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['point_state'], {}), '(point_state)\n', (1728, 1741), False, 'import torch\n'), ((1824, 1853), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state'], {}), '(state)\n', (1846, 1853), False, 'import torch\n'), ((3054, 3088), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['curr_joint'], {}), '(curr_joint)\n', (3076, 3088), False, 'import torch\n'), ((5077, 5112), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state[0][1]'], {}), '(state[0][1])\n', (5099, 5112), False, 'import torch\n'), ((5141, 5176), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state[0][0]'], {}), '(state[0][0])\n', (5163, 5176), False, 'import torch\n'), ((13765, 13780), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13778, 13780), False, 'import torch\n'), ((18770, 18796), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (18784, 18796), False, 'import os\n'), ((18810, 18833), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (18821, 18833), False, 'import os\n'), ((21583, 21609), 'os.path.exists', 'os.path.exists', (['actor_path'], {}), '(actor_path)\n', (21597, 21609), False, 'import os\n'), ((21634, 21656), 'torch.load', 'torch.load', (['actor_path'], {}), '(actor_path)\n', (21644, 21656), False, 'import torch\n'), ((22560, 22587), 'os.path.exists', 'os.path.exists', (['critic_path'], {}), '(critic_path)\n', (22574, 22587), False, 'import os\n'), ((22612, 22635), 'torch.load', 'torch.load', (['critic_path'], {}), '(critic_path)\n', (22622, 22635), False, 'import torch\n'), ((23039, 23069), 'os.path.exists', 'os.path.exists', (['traj_feat_path'], {}), '(traj_feat_path)\n', (23053, 23069), False, 'import os\n'), ((23094, 23120), 'torch.load', 'torch.load', (['traj_feat_path'], {}), '(traj_feat_path)\n', (23104, 23120), False, 'import torch\n'), ((23618, 23651), 'os.path.exists', 'os.path.exists', (['traj_sampler_path'], {}), '(traj_sampler_path)\n', (23632, 23651), False, 'import os\n'), ((23676, 23705), 'torch.load', 'torch.load', (['traj_sampler_path'], {}), '(traj_sampler_path)\n', (23686, 23705), False, 'import torch\n'), ((24213, 24240), 'torch.load', 'torch.load', (['state_feat_path'], {}), '(state_feat_path)\n', (24223, 24240), False, 'import torch\n'), ((8394, 8419), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['v'], {}), '(v)\n', (8416, 8419), False, 'import torch\n'), ((9385, 9417), 'torch.ones_like', 'torch.ones_like', (['self.time_batch'], {}), '(self.time_batch)\n', (9400, 9417), False, 'import torch\n'), ((9462, 9507), 'torch.ones_like', 'torch.ones_like', (['self.traj_integer_time_batch'], {}), '(self.traj_integer_time_batch)\n', (9477, 9507), False, 'import torch\n'), ((22039, 22145), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['self.policy_optim'], {'milestones': 'self.policy_milestones', 'gamma': '(0.5)'}), '(self.policy_optim, milestones=self.\n policy_milestones, gamma=0.5)\n', (22075, 22145), False, 'import torch\n'), ((11399, 11461), 'torch.unique', 'torch.unique', (['self.cont_traj_inbatch_index'], {'return_counts': '(True)'}), '(self.cont_traj_inbatch_index, return_counts=True)\n', (11411, 11461), False, 'import torch\n'), ((8301, 8321), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (8313, 8321), False, 'import torch\n'), ((15804, 15833), 'numpy.array', 'np.array', (['self.mix_milestones'], {}), '(self.mix_milestones)\n', (15812, 15833), True, 'import numpy as np\n'), ((5863, 5894), 'torch.Tensor', 'torch.Tensor', (['[remain_timestep]'], {}), '([remain_timestep])\n', (5875, 5894), False, 'import torch\n')]
|
import gym
import datetime
import os
import numpy as np
from agent import DeepQAgent
def main():
env = gym.make("LunarLander-v2")
timestamp = '{:%Y-%m-%d-%H:%M}'.format(datetime.datetime.now())
o_dir = "LunarLander-v2/{}/models".format(timestamp)
if not os.path.exists(o_dir):
os.makedirs(o_dir)
nof_episodes = 500
# 8 values in [0, 1]
state_size = env.observation_space.shape[0]
# 0, 1, 2, 3
action_size = env.action_space.n
agent = DeepQAgent(state_size, action_size, model=2)
batch_size = 32
for episode in range(nof_episodes):
state = env.reset()
state = np.reshape(state, [1, state_size])
done = False
t = 0
episode_reward = 0
# Iterate over the timesteps
while not done:
env.render()
# Instruct the agent to choose an action based on the current state of the environment
# This may be a random action depending on the value of the exploration_rate(epsilon)
action = agent.act(state)
# Execute said action
next_state, reward, done, _ = env.step(action)
episode_reward += reward
next_state = np.reshape(next_state, [1, state_size])
agent.memorize(state, action, reward, next_state, done)
state = next_state
if done:
print("episode: {}/{}, time: {}, total_reward: {}"
.format(episode, nof_episodes - 1, t, episode_reward))
t += 1
if len(agent.memory) / batch_size > 1:
agent.train(batch_size)
# Save model after training
if episode % batch_size == 1:
agent.save(o_dir + "/model_" + str(episode) + ".hdf5")
if __name__ == "__main__":
main()
|
[
"os.path.exists",
"numpy.reshape",
"os.makedirs",
"agent.DeepQAgent",
"datetime.datetime.now",
"gym.make"
] |
[((110, 136), 'gym.make', 'gym.make', (['"""LunarLander-v2"""'], {}), "('LunarLander-v2')\n", (118, 136), False, 'import gym\n'), ((487, 531), 'agent.DeepQAgent', 'DeepQAgent', (['state_size', 'action_size'], {'model': '(2)'}), '(state_size, action_size, model=2)\n', (497, 531), False, 'from agent import DeepQAgent\n'), ((181, 204), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (202, 204), False, 'import datetime\n'), ((274, 295), 'os.path.exists', 'os.path.exists', (['o_dir'], {}), '(o_dir)\n', (288, 295), False, 'import os\n'), ((305, 323), 'os.makedirs', 'os.makedirs', (['o_dir'], {}), '(o_dir)\n', (316, 323), False, 'import os\n'), ((637, 671), 'numpy.reshape', 'np.reshape', (['state', '[1, state_size]'], {}), '(state, [1, state_size])\n', (647, 671), True, 'import numpy as np\n'), ((1212, 1251), 'numpy.reshape', 'np.reshape', (['next_state', '[1, state_size]'], {}), '(next_state, [1, state_size])\n', (1222, 1251), True, 'import numpy as np\n')]
|
import glob
import os
import sys
from sgfmill.sgfmill import sgf
import global_vars_go as gvg
import loader
import utils
import board3d as go_board
import numpy as np
kifuPath = "./kifu"
num_games = gvg.num_games
from_game = gvg.from_test_games
lb_size = 250.
correct = 0
total = 0
num_lb = int((num_games-1)/lb_size) + 1 # Number of loading batches
model = loader.load_model_from_file(gvg.nn_type)
for lb in range(num_lb):
games = []
print("Loading game data...")
i = 0
for filename in glob.glob(os.path.join(kifuPath, "*.sgf")):
load_limit = min((lb+1) * lb_size, num_games)
if from_game + (lb) * lb_size <= i < from_game + load_limit:
with open(filename, "rb") as f:
games.append(sgf.Sgf_game.from_bytes(f.read()))
i += 1
print("Done loading {} games".format(len(games)))
print("Being data processing...")
train_boards = []
train_next_moves = []
for game_index in range(len(games)):
board = go_board.setup_board(games[game_index])
for node in games[game_index].get_main_sequence():
board = go_board.switch_player_perspec(board) # Changes player perspective, black becomes white and vice versa
node_move = node.get_move()[1]
if node_move is not None:
train_boards.append(go_board.get_encoded_board(board))
next_move = np.zeros(gvg.board_size * gvg.board_size).reshape(gvg.board_size, gvg.board_size)
next_move[node_move[0], node_move[1]] = gvg.filled # y = an array in the form [board_x_position, board_y_position]
train_next_moves.append(next_move.reshape(gvg.board_size * gvg.board_size))
board = go_board.make_move(board, node_move, gvg.bot_channel, gvg.player_channel) # Update board with new move
if board is None:
print("ERROR! Illegal move, {}, while training".format(node_move))
print("Finished data processing...")
print("Begin testing...")
for i in range(len(train_boards)):
pred = np.asarray(model.predict(train_boards[i].reshape(1, gvg.board_size, gvg.board_size, gvg.enc_board_channels))) \
.reshape(gvg.board_size * gvg.board_size)
if pred.argmax() == train_next_moves[i].argmax():
correct += 1
total += 1
print("Accuracy: {}".format(correct/total))
print("Finished testing")
|
[
"board3d.make_move",
"os.path.join",
"board3d.get_encoded_board",
"board3d.setup_board",
"numpy.zeros",
"board3d.switch_player_perspec",
"loader.load_model_from_file"
] |
[((380, 420), 'loader.load_model_from_file', 'loader.load_model_from_file', (['gvg.nn_type'], {}), '(gvg.nn_type)\n', (407, 420), False, 'import loader\n'), ((544, 575), 'os.path.join', 'os.path.join', (['kifuPath', '"""*.sgf"""'], {}), "(kifuPath, '*.sgf')\n", (556, 575), False, 'import os\n'), ((1036, 1075), 'board3d.setup_board', 'go_board.setup_board', (['games[game_index]'], {}), '(games[game_index])\n', (1056, 1075), True, 'import board3d as go_board\n'), ((1157, 1194), 'board3d.switch_player_perspec', 'go_board.switch_player_perspec', (['board'], {}), '(board)\n', (1187, 1194), True, 'import board3d as go_board\n'), ((1780, 1853), 'board3d.make_move', 'go_board.make_move', (['board', 'node_move', 'gvg.bot_channel', 'gvg.player_channel'], {}), '(board, node_move, gvg.bot_channel, gvg.player_channel)\n', (1798, 1853), True, 'import board3d as go_board\n'), ((1382, 1415), 'board3d.get_encoded_board', 'go_board.get_encoded_board', (['board'], {}), '(board)\n', (1408, 1415), True, 'import board3d as go_board\n'), ((1446, 1487), 'numpy.zeros', 'np.zeros', (['(gvg.board_size * gvg.board_size)'], {}), '(gvg.board_size * gvg.board_size)\n', (1454, 1487), True, 'import numpy as np\n')]
|
import os
import numpy as np
from allennlp.predictors import Predictor
from isanlp.annotation_rst import DiscourseUnit
from symbol_map import SYMBOL_MAP
class AllenNLPSegmenter:
def __init__(self, model_dir_path, cuda_device=-1):
self._model_path = os.path.join(model_dir_path, 'segmenter_neural', 'model.tar.gz')
self._cuda_device = cuda_device
self.predictor = Predictor.from_path(self._model_path, cuda_device=self._cuda_device)
self._separator = 'U-S'
self._symbol_map = SYMBOL_MAP
def __call__(self, annot_text, annot_tokens, annot_sentences, annot_lemma, annot_postag, annot_synt_dep_tree,
start_id=0):
return self._build_discourse_units(annot_text, annot_tokens,
self._predict(annot_tokens, annot_sentences), start_id)
def _predict(self, tokens, sentences):
"""
:return: numbers of tokens predicted as EDU left boundaries
"""
_sentences = []
for sentence in sentences:
text = ' '.join([self._prepare_token(token.text) for token in tokens[sentence.begin:sentence.end]]).strip()
if text:
_sentences.append(text)
predictions = self.predictor.predict_batch_json([{'sentence': sentence} for sentence in _sentences])
result = []
for i, prediction in enumerate(predictions):
pred = np.array(prediction['tags'][:sentences[i].end - sentences[i].begin]) == self._separator
# The first token in a sentence is a separator
# if it is not a point in a list
if len(pred) > 0:
if i > 0:
if predictions[i - 1]['words'][1] == '.' and predictions[i - 1]['words'][0] in "0123456789":
pred[0] = False
else:
pred[0] = True
# No single-token EDUs
for j, token in enumerate(pred[:-1]):
if token and pred[j + 1]:
if j == 0:
pred[j + 1] = False
else:
pred[j] = False
result += list(pred)
return np.argwhere(np.array(result) == True)[:, 0]
def _build_discourse_units(self, text, tokens, numbers, start_id):
"""
:param text: original text
:param list tokens: isanlp.annotation.Token
:param numbers: positions of tokens predicted as EDU left boundaries (beginners)
:return: list of DiscourseUnit
"""
edus = []
if numbers.shape[0]:
for i in range(0, len(numbers) - 1):
new_edu = DiscourseUnit(start_id + i,
start=tokens[numbers[i]].begin,
end=tokens[numbers[i + 1]].begin - 1,
text=text[tokens[numbers[i]].begin:tokens[numbers[i + 1]].begin],
relation='elementary',
nuclearity='_')
edus.append(new_edu)
if numbers.shape[0] == 1:
i = -1
new_edu = DiscourseUnit(start_id + i + 1,
start=tokens[numbers[-1]].begin,
end=tokens[-1].end,
text=text[tokens[numbers[-1]].begin:tokens[-1].end],
relation='elementary',
nuclearity='_')
edus.append(new_edu)
return edus
def _prepare_token(self, token):
for key, value in self._symbol_map.items():
token = token.replace(key, value)
for keyword in ['www', 'http']:
if keyword in token:
return '_html_'
return token
|
[
"allennlp.predictors.Predictor.from_path",
"numpy.array",
"os.path.join",
"isanlp.annotation_rst.DiscourseUnit"
] |
[((265, 329), 'os.path.join', 'os.path.join', (['model_dir_path', '"""segmenter_neural"""', '"""model.tar.gz"""'], {}), "(model_dir_path, 'segmenter_neural', 'model.tar.gz')\n", (277, 329), False, 'import os\n'), ((395, 463), 'allennlp.predictors.Predictor.from_path', 'Predictor.from_path', (['self._model_path'], {'cuda_device': 'self._cuda_device'}), '(self._model_path, cuda_device=self._cuda_device)\n', (414, 463), False, 'from allennlp.predictors import Predictor\n'), ((3207, 3393), 'isanlp.annotation_rst.DiscourseUnit', 'DiscourseUnit', (['(start_id + i + 1)'], {'start': 'tokens[numbers[-1]].begin', 'end': 'tokens[-1].end', 'text': 'text[tokens[numbers[-1]].begin:tokens[-1].end]', 'relation': '"""elementary"""', 'nuclearity': '"""_"""'}), "(start_id + i + 1, start=tokens[numbers[-1]].begin, end=tokens\n [-1].end, text=text[tokens[numbers[-1]].begin:tokens[-1].end], relation\n ='elementary', nuclearity='_')\n", (3220, 3393), False, 'from isanlp.annotation_rst import DiscourseUnit\n'), ((1425, 1493), 'numpy.array', 'np.array', (["prediction['tags'][:sentences[i].end - sentences[i].begin]"], {}), "(prediction['tags'][:sentences[i].end - sentences[i].begin])\n", (1433, 1493), True, 'import numpy as np\n'), ((2682, 2894), 'isanlp.annotation_rst.DiscourseUnit', 'DiscourseUnit', (['(start_id + i)'], {'start': 'tokens[numbers[i]].begin', 'end': '(tokens[numbers[i + 1]].begin - 1)', 'text': 'text[tokens[numbers[i]].begin:tokens[numbers[i + 1]].begin]', 'relation': '"""elementary"""', 'nuclearity': '"""_"""'}), "(start_id + i, start=tokens[numbers[i]].begin, end=tokens[\n numbers[i + 1]].begin - 1, text=text[tokens[numbers[i]].begin:tokens[\n numbers[i + 1]].begin], relation='elementary', nuclearity='_')\n", (2695, 2894), False, 'from isanlp.annotation_rst import DiscourseUnit\n'), ((2215, 2231), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (2223, 2231), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# 2次元累積和 S の [x1, x2) × [y1, y2) 総和
def ac2(s, x1, x2, y1, y2):
return s[x2][y2] - s[x1][y2] - s[x2][y1] + s[x1][y1]
import numpy as np
_, *d = open(0)
n, k = map(int, _.split())
B = np.zeros((2*k, 2*k))
for e in d:
*z, c = e.split()
x, y = map(int, z)
B[x % (2*k)][(y + k * (z == "W")) % (2*k)] += 1
B.cumsum(axis = 0)
B.cumsum(axis = 1)
B = np.tile(B, (2,2))
print(B)
# 書きかけ
|
[
"numpy.tile",
"numpy.zeros"
] |
[((211, 235), 'numpy.zeros', 'np.zeros', (['(2 * k, 2 * k)'], {}), '((2 * k, 2 * k))\n', (219, 235), True, 'import numpy as np\n'), ((383, 401), 'numpy.tile', 'np.tile', (['B', '(2, 2)'], {}), '(B, (2, 2))\n', (390, 401), True, 'import numpy as np\n')]
|
from __future__ import print_function, absolute_import, division # makes these scripts backward compatible with python 2.6 and 2.7
# pyKratos imports
from .Element import Element
# Other imports
import numpy as np
class TriangleElement(Element):
def __init__(self, elem_id, nodes):
super(TriangleElement, self).__init__(elem_id, nodes)
if(len(self.GetNodes()) != 3):
raise Exception("wrong number of nodes! should be 3!")
for node in self.GetNodes():
if(node.Id < 0):
raise Exception("node with Id smaller than 0 found")
def ShapeFunctions(self, order=1):
'''this function provides the shape function values, derivatives and integration_weight
at the location of the gauss points. Order of integration is controlled
by the optional parameter "order".
N[gauss][i] contains the shape function of node i computed at the position of "gauss"
derivatives[gauss][i,k] contains the derivative of node i, component k at the position of gauss
weights[gauss] includes the integration weights, including the det of the jacobian, to be used
at the gauss point'''
derivatives = []
weights = []
Ncontainer = []
x10 = self.nodes[1].coordinates[0] - self.nodes[0].coordinates[0]
y10 = self.nodes[1].coordinates[1] - self.nodes[0].coordinates[1]
x20 = self.nodes[2].coordinates[0] - self.nodes[0].coordinates[0]
y20 = self.nodes[2].coordinates[1] - self.nodes[0].coordinates[1]
detJ = x10 * y20 - y10 * x20
DN_DX = np.zeros((3, 2), dtype=float)
DN_DX[0, 0] = -y20 + y10
DN_DX[0, 1] = x20 - x10
DN_DX[1, 0] = y20
DN_DX[1, 1] = -x20
DN_DX[2, 0] = -y10
DN_DX[2, 1] = x10
DN_DX /= detJ
if(order == 1): # give back 1 single integration point
one_third = 1.0 / 3.0
Ncontainer = [np.array([one_third, one_third, one_third])]
Area = 0.5 * detJ
weights = [Area]
derivatives = [DN_DX]
elif(order == 2): # gives back 3 integration points
one_sixt = 1.0 / 6.0
two_third = 2.0 / 3.0
Ncontainer.append(np.array([one_sixt, one_sixt, two_third]))
Ncontainer.append(np.array([one_sixt, two_third, one_sixt]))
Ncontainer.append(np.array([two_third, one_sixt, one_sixt]))
weights = [one_sixt * detJ, one_sixt * detJ, one_sixt * detJ]
derivatives = [DN_DX, DN_DX, DN_DX]
else:
raise Exception("integration order not implemented")
return [Ncontainer, derivatives, weights]
|
[
"numpy.array",
"numpy.zeros"
] |
[((1609, 1638), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'float'}), '((3, 2), dtype=float)\n', (1617, 1638), True, 'import numpy as np\n'), ((1958, 2001), 'numpy.array', 'np.array', (['[one_third, one_third, one_third]'], {}), '([one_third, one_third, one_third])\n', (1966, 2001), True, 'import numpy as np\n'), ((2256, 2297), 'numpy.array', 'np.array', (['[one_sixt, one_sixt, two_third]'], {}), '([one_sixt, one_sixt, two_third])\n', (2264, 2297), True, 'import numpy as np\n'), ((2329, 2370), 'numpy.array', 'np.array', (['[one_sixt, two_third, one_sixt]'], {}), '([one_sixt, two_third, one_sixt])\n', (2337, 2370), True, 'import numpy as np\n'), ((2402, 2443), 'numpy.array', 'np.array', (['[two_third, one_sixt, one_sixt]'], {}), '([two_third, one_sixt, one_sixt])\n', (2410, 2443), True, 'import numpy as np\n')]
|
import os
import tempfile
from glob import glob
import json
from collections import OrderedDict
import numpy as np
from .adding_features import adding_no_features
def iterate_json_data(filepath,
columns_to_keep=None,
feature_adder=adding_no_features,
data_filter=lambda datum: True,
missing_val_default={}):
inputfile = open(filepath, 'r')
for line in inputfile:
datum = json.loads(line)
datum = feature_adder(datum)
if not data_filter(datum):
continue
if columns_to_keep is not None:
filtered_datum = OrderedDict()
for column in columns_to_keep:
filtered_datum[column] = datum[column]
if column in missing_val_default.keys() and datum[column] is None:
filtered_datum[column] = missing_val_default[column]
yield filtered_datum
else:
yield OrderedDict(datum)
def iterate_json_files_directory(dir,
columns_to_keep=None,
feature_adder=adding_no_features,
data_filter=lambda datum: True,
missing_val_default={}
):
print('\tReading {}'.format(dir))
print('\tColumns: {}'.format(', '.join(columns_to_keep) if columns_to_keep is not None else 'ALL'))
for filepath in glob(os.path.join(dir, '*.json')):
for datum in iterate_json_data(filepath,
columns_to_keep=columns_to_keep,
feature_adder=feature_adder,
data_filter=data_filter,
missing_val_default=missing_val_default):
yield datum
def process_data(traindatafilepath, qual_features, binary_features, quant_features,
target_label,
feature_adder=adding_no_features,
nb_lines_per_tempfile=10000,
data_filter=lambda datum: True,
missing_val_default={},
filename_fmt='data_{0:09d}.json'):
tempdir = tempfile.TemporaryDirectory()
fileid = 0
tmpfile = None
nbdata = 0
for i, datum in enumerate(iterate_json_data(traindatafilepath,
columns_to_keep=qual_features+binary_features+quant_features+[target_label],
feature_adder=feature_adder,
data_filter=data_filter,
missing_val_default=missing_val_default)):
if i % nb_lines_per_tempfile == 0:
if tmpfile is not None:
tmpfile.close()
tmpfile = open(os.path.join(tempdir.name, filename_fmt.format(fileid)), 'w')
fileid += 1
print('\tRead {} lines...'.format(i))
nbdata += 1
tmpfile.write(json.dumps(datum)+'\n')
tmpfile.close()
return tempdir, nbdata
def assign_partitions(nbdata, cv_nfold, heldout_fraction, seed=None):
if seed is not None:
np.random.seed(seed)
return np.random.choice([-1] + list(range(cv_nfold)), # -1 indicating hold-out set
p=[heldout_fraction] + [(1 - heldout_fraction) / cv_nfold] * cv_nfold,
size=nbdata)
|
[
"tempfile.TemporaryDirectory",
"json.loads",
"collections.OrderedDict",
"json.dumps",
"os.path.join",
"numpy.random.seed"
] |
[((2255, 2284), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2282, 2284), False, 'import tempfile\n'), ((480, 496), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (490, 496), False, 'import json\n'), ((1497, 1524), 'os.path.join', 'os.path.join', (['dir', '"""*.json"""'], {}), "(dir, '*.json')\n", (1509, 1524), False, 'import os\n'), ((3259, 3279), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3273, 3279), True, 'import numpy as np\n'), ((659, 672), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (670, 672), False, 'from collections import OrderedDict\n'), ((992, 1010), 'collections.OrderedDict', 'OrderedDict', (['datum'], {}), '(datum)\n', (1003, 1010), False, 'from collections import OrderedDict\n'), ((3083, 3100), 'json.dumps', 'json.dumps', (['datum'], {}), '(datum)\n', (3093, 3100), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
"""
Combination of
http://scipy-central.org/item/52/1/zplane-function
and
http://www.dsprelated.com/showcode/244.php
with my own modifications
"""
# Copyright (c) 2011 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The following is derived from the slides presented by
# <NAME> for CS506/606 "Special Topics: Speech Signal Processing"
# CSLU / OHSU, Spring Term 2011.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.pyplot import axvline, axhline
from collections import defaultdict
def zplane(z, p, filename=None):
"""Plot the complex z-plane given zeros and poles.
"""
# get a figure/plot
ax = plt.subplot(2, 2, 1)
# TODO: should just inherit whatever subplot it's called in?
# Add unit circle and zero axes
unit_circle = patches.Circle((0,0), radius=1, fill=False,
color='black', ls='solid', alpha=0.1)
ax.add_patch(unit_circle)
axvline(0, color='0.7')
axhline(0, color='0.7')
# Plot the poles and set marker properties
poles = plt.plot(p.real, p.imag, 'x', markersize=9, alpha=0.5)
# Plot the zeros and set marker properties
zeros = plt.plot(z.real, z.imag, 'o', markersize=9,
color='none', alpha=0.5,
markeredgecolor=poles[0].get_color(), # same color as poles
)
# Scale axes to fit
r = 1.5 * np.amax(np.concatenate((abs(z), abs(p), [1])))
plt.axis('scaled')
plt.axis([-r, r, -r, r])
# ticks = [-1, -.5, .5, 1]
# plt.xticks(ticks)
# plt.yticks(ticks)
"""
If there are multiple poles or zeros at the same point, put a
superscript next to them.
TODO: can this be made to self-update when zoomed?
"""
# Finding duplicates by same pixel coordinates (hacky for now):
poles_xy = ax.transData.transform(np.vstack(poles[0].get_data()).T)
zeros_xy = ax.transData.transform(np.vstack(zeros[0].get_data()).T)
# dict keys should be ints for matching, but coords should be floats for
# keeping location of text accurate while zooming
# TODO make less hacky, reduce duplication of code
d = defaultdict(int)
coords = defaultdict(tuple)
for xy in poles_xy:
key = tuple(np.rint(xy).astype('int'))
d[key] += 1
coords[key] = xy
print(d)
for key, value in d.items():
if value > 1:
x, y = ax.transData.inverted().transform(coords[key])
plt.text(x, y,
r' ${}^{' + str(value) + '}$',
fontsize=13,
)
d = defaultdict(int)
coords = defaultdict(tuple)
for xy in zeros_xy:
key = tuple(np.rint(xy).astype('int'))
d[key] += 1
coords[key] = xy
for key, value in d.items():
if value > 1:
x, y = ax.transData.inverted().transform(coords[key])
plt.text(x, y,
r' ${}^{' + str(value) + '}$',
fontsize=13,
)
if filename is None:
plt.show()
else:
plt.savefig(filename)
print( 'Pole-zero plot saved to ' + str(filename))
if __name__ == "__main__":
from scipy.signal import (freqz, butter, bessel, cheby1, cheby2, ellip,
tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap
)
from numpy import asarray, tan, array, pi, arange, cos, log10, unwrap, angle
from matplotlib.pyplot import (stem, title, grid, show, plot, xlabel,
ylabel, subplot, xscale, figure, xlim,
margins)
# # Cosine function
# omega = pi/4
# b = array([1.0, -cos(omega)])
# a = array([1, -2*cos(omega), 1.0])
b, a = butter(2, [0.06, 0.7], 'bandpass')
# Get the poles and zeros
z, p, k = tf2zpk(b, a)
# Create zero-pole plot
figure(figsize=(16, 9))
subplot(2, 2, 1)
zplane(z, p)
grid(True, color='0.9', linestyle='-', which='both', axis='both')
title('Poles and zeros')
# Display zeros, poles and gain
print( str(len(z)) + " zeros: " + str(z))
print( str(len(p)) + " poles: " + str(p))
print( "gain: " + str(k))
# Impulse response
index = arange(0,20)
u = 1.0*(index==0)
y = lfilter(b, a, u)
subplot(2, 2, 3)
stem(index,y)
title('Impulse response')
margins(0, 0.1)
grid(True, color='0.9', linestyle='-', which='both', axis='both')
show()
# Frequency response
w, h = freqz(b, a)
subplot(2, 2, 2)
plot(w/pi, 20*log10(abs(h)))
xscale('log')
title('Frequency response')
xlabel('Normalized frequency')
ylabel('Amplitude [dB]')
margins(0, 0.1)
grid(True, color = '0.7', linestyle='-', which='major', axis='both')
grid(True, color = '0.9', linestyle='-', which='minor', axis='both')
show()
# Phase
subplot(2, 2, 4)
plot(w/pi, 180/pi * unwrap(angle(h)))
xscale('log')
xlabel('Normalized frequency')
ylabel('Phase [degrees]')
grid(True, color = '0.7', linestyle='-', which='major')
grid(True, color = '0.9', linestyle='-', which='minor')
show()
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.margins",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"numpy.rint",
"scipy.signal.tf2zpk",
"matplotlib.pyplot.axis",
"matplotlib.patches.Circle",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.stem",
"matplotlib.pyplot.title",
"scipy.signal.freqz",
"matplotlib.pyplot.show",
"scipy.signal.butter",
"numpy.angle",
"matplotlib.pyplot.figure",
"scipy.signal.lfilter",
"collections.defaultdict",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.xscale"
] |
[((1321, 1341), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1332, 1341), True, 'import matplotlib.pyplot as plt\n'), ((1466, 1552), 'matplotlib.patches.Circle', 'patches.Circle', (['(0, 0)'], {'radius': '(1)', 'fill': '(False)', 'color': '"""black"""', 'ls': '"""solid"""', 'alpha': '(0.1)'}), "((0, 0), radius=1, fill=False, color='black', ls='solid',\n alpha=0.1)\n", (1480, 1552), False, 'from matplotlib import patches\n'), ((1615, 1638), 'matplotlib.pyplot.axvline', 'axvline', (['(0)'], {'color': '"""0.7"""'}), "(0, color='0.7')\n", (1622, 1638), False, 'from matplotlib.pyplot import axvline, axhline\n'), ((1643, 1666), 'matplotlib.pyplot.axhline', 'axhline', (['(0)'], {'color': '"""0.7"""'}), "(0, color='0.7')\n", (1650, 1666), False, 'from matplotlib.pyplot import axvline, axhline\n'), ((1731, 1785), 'matplotlib.pyplot.plot', 'plt.plot', (['p.real', 'p.imag', '"""x"""'], {'markersize': '(9)', 'alpha': '(0.5)'}), "(p.real, p.imag, 'x', markersize=9, alpha=0.5)\n", (1739, 1785), True, 'import matplotlib.pyplot as plt\n'), ((2112, 2130), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (2120, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2159), 'matplotlib.pyplot.axis', 'plt.axis', (['[-r, r, -r, r]'], {}), '([-r, r, -r, r])\n', (2143, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2834), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2829, 2834), False, 'from collections import defaultdict\n'), ((2848, 2866), 'collections.defaultdict', 'defaultdict', (['tuple'], {}), '(tuple)\n', (2859, 2866), False, 'from collections import defaultdict\n'), ((3281, 3297), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3292, 3297), False, 'from collections import defaultdict\n'), ((3311, 3329), 'collections.defaultdict', 'defaultdict', (['tuple'], {}), '(tuple)\n', (3322, 3329), False, 'from collections import defaultdict\n'), ((4496, 4530), 'scipy.signal.butter', 'butter', (['(2)', '[0.06, 0.7]', '"""bandpass"""'], {}), "(2, [0.06, 0.7], 'bandpass')\n", (4502, 4530), False, 'from scipy.signal import freqz, butter, bessel, cheby1, cheby2, ellip, tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap\n'), ((4576, 4588), 'scipy.signal.tf2zpk', 'tf2zpk', (['b', 'a'], {}), '(b, a)\n', (4582, 4588), False, 'from scipy.signal import freqz, butter, bessel, cheby1, cheby2, ellip, tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap\n'), ((4622, 4645), 'matplotlib.pyplot.figure', 'figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (4628, 4645), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((4650, 4666), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (4657, 4666), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((4688, 4753), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""both"""', 'axis': '"""both"""'}), "(True, color='0.9', linestyle='-', which='both', axis='both')\n", (4692, 4753), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((4758, 4782), 'matplotlib.pyplot.title', 'title', (['"""Poles and zeros"""'], {}), "('Poles and zeros')\n", (4763, 4782), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((4986, 4999), 'numpy.arange', 'arange', (['(0)', '(20)'], {}), '(0, 20)\n', (4992, 4999), False, 'from numpy import asarray, tan, array, pi, arange, cos, log10, unwrap, angle\n'), ((5030, 5046), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'u'], {}), '(b, a, u)\n', (5037, 5046), False, 'from scipy.signal import freqz, butter, bessel, cheby1, cheby2, ellip, tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap\n'), ((5051, 5067), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (5058, 5067), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5072, 5086), 'matplotlib.pyplot.stem', 'stem', (['index', 'y'], {}), '(index, y)\n', (5076, 5086), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5090, 5115), 'matplotlib.pyplot.title', 'title', (['"""Impulse response"""'], {}), "('Impulse response')\n", (5095, 5115), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5120, 5135), 'matplotlib.pyplot.margins', 'margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (5127, 5135), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5140, 5205), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""both"""', 'axis': '"""both"""'}), "(True, color='0.9', linestyle='-', which='both', axis='both')\n", (5144, 5205), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5210, 5216), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5214, 5216), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5258, 5269), 'scipy.signal.freqz', 'freqz', (['b', 'a'], {}), '(b, a)\n', (5263, 5269), False, 'from scipy.signal import freqz, butter, bessel, cheby1, cheby2, ellip, tf2zpk, zpk2tf, lfilter, buttap, bilinear, cheb2ord, cheb2ap\n'), ((5274, 5290), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (5281, 5290), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5328, 5341), 'matplotlib.pyplot.xscale', 'xscale', (['"""log"""'], {}), "('log')\n", (5334, 5341), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5347, 5374), 'matplotlib.pyplot.title', 'title', (['"""Frequency response"""'], {}), "('Frequency response')\n", (5352, 5374), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5379, 5409), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""Normalized frequency"""'], {}), "('Normalized frequency')\n", (5385, 5409), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5414, 5438), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""Amplitude [dB]"""'], {}), "('Amplitude [dB]')\n", (5420, 5438), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5443, 5458), 'matplotlib.pyplot.margins', 'margins', (['(0)', '(0.1)'], {}), '(0, 0.1)\n', (5450, 5458), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5463, 5529), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.7"""', 'linestyle': '"""-"""', 'which': '"""major"""', 'axis': '"""both"""'}), "(True, color='0.7', linestyle='-', which='major', axis='both')\n", (5467, 5529), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5536, 5602), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""minor"""', 'axis': '"""both"""'}), "(True, color='0.9', linestyle='-', which='minor', axis='both')\n", (5540, 5602), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5609, 5615), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5613, 5615), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5633, 5649), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (5640, 5649), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5696, 5709), 'matplotlib.pyplot.xscale', 'xscale', (['"""log"""'], {}), "('log')\n", (5702, 5709), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5714, 5744), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""Normalized frequency"""'], {}), "('Normalized frequency')\n", (5720, 5744), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5749, 5774), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""Phase [degrees]"""'], {}), "('Phase [degrees]')\n", (5755, 5774), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5779, 5832), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.7"""', 'linestyle': '"""-"""', 'which': '"""major"""'}), "(True, color='0.7', linestyle='-', which='major')\n", (5783, 5832), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5839, 5892), 'matplotlib.pyplot.grid', 'grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""minor"""'}), "(True, color='0.9', linestyle='-', which='minor')\n", (5843, 5892), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((5899, 5905), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5903, 5905), False, 'from matplotlib.pyplot import stem, title, grid, show, plot, xlabel, ylabel, subplot, xscale, figure, xlim, margins\n'), ((3747, 3757), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3755, 3757), True, 'import matplotlib.pyplot as plt\n'), ((3776, 3797), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (3787, 3797), True, 'import matplotlib.pyplot as plt\n'), ((5681, 5689), 'numpy.angle', 'angle', (['h'], {}), '(h)\n', (5686, 5689), False, 'from numpy import asarray, tan, array, pi, arange, cos, log10, unwrap, angle\n'), ((2911, 2922), 'numpy.rint', 'np.rint', (['xy'], {}), '(xy)\n', (2918, 2922), True, 'import numpy as np\n'), ((3374, 3385), 'numpy.rint', 'np.rint', (['xy'], {}), '(xy)\n', (3381, 3385), True, 'import numpy as np\n')]
|
import os
import numpy as np
from constants import DATABASE_FILE_NAME, PLAYER_ONE, PLAYER_TWO, POSITION_TO_DATABASE
from Agents.random import Random
from othello import Othello
import multiprocessing as mp
class Database:
def __init__(self):
"""
load database data and store them in self._db_data
self._db_data = 3 dim array:
60 turns
9 game categories
[0] : won games of player1
[1] : won games of player2
[2] : total played games
"""
# check if database file exists
if not os.path.isfile(DATABASE_FILE_NAME):
self._create_new_database()
# load csv in self_data as 3 dim. array
csv = np.loadtxt(DATABASE_FILE_NAME, delimiter=';', dtype='int64')
self._db_data = csv.reshape((60, 9, 3))
def _create_new_database(self):
"""
Reset stored played / won games
change self._db_data to array of 0
"""
self._db_data = np.zeros(shape=(60, 9, 3), dtype='int64')
# save modified array in file
self.store_database()
def store_database(self):
"""
store database on filesystem
:return:
"""
with open(DATABASE_FILE_NAME, 'w') as outfile:
# write 3 dim. array as list of 2 dim. array's
for row in self._db_data:
# write one row (turn number) of matrix
np.savetxt(outfile, row, fmt='%d', delimiter=';')
def get_change_of_winning(self, move, turn_nr, current_player):
"""
calculate chance of winning for given move and turn_number
:param move: move is a pair <row, column> in available_moves
:param turn_nr: actual turn_number
:param current_player: use constants PLAYER_ONE and PLAYER_TWO
:return: chance of winning for given field at the given turn number
"""
# translate move to category in array
category = POSITION_TO_DATABASE[move]
# access data of one category in one turn number of the database to compute statistic
won_games_pl1, won_games_pl2, total_games_played = self._db_data[turn_nr][category]
# avoid dividing with 0
if total_games_played == 0:
return 0
# return win probability
if current_player == PLAYER_ONE:
return won_games_pl1 / total_games_played
return won_games_pl2 / total_games_played
def update_field_stat(self, turn_nr, field_type, winner):
"""
update database with new played move
:param turn_nr: turn number of move to store
:param field_type: field category of move
:param winner: winner of whole played game
:return: nothing
update self._db_data at given turn number and field type
"""
# get actual database entry
(won_games_pl1, won_games_pl2, total_games_played) = self._db_data[turn_nr][field_type]
if winner == PLAYER_ONE:
won_games_pl1 += 1
elif winner == PLAYER_TWO:
won_games_pl2 += 1
# store updated entry at same position in database
self._db_data[turn_nr][field_type] = (won_games_pl1, won_games_pl2, total_games_played + 1)
def update_fields_stats_for_single_game(self, moves, winner):
"""
update statistics of each taken move in game
:param moves: list of taken moves
:param winner: PLAYER_ONE or PLAYER_TWO
"""
for turn_nr in enumerate(moves):
# translate move 1,0 to position 8
position = POSITION_TO_DATABASE[moves[turn_nr]]
# update array at position position
self.update_field_stat(turn_nr, position, winner)
@staticmethod
def _play_n_random_games(count):
"""
play count random games
:param count: number of played games
:return: winning statistics
statistics = list of pair <taken moves, winner of this game>
"""
multi_stats = []
for i in range(count):
# print each 100 games actual game played position
if i % 100 == 0:
print(f"Game No: {i}")
g = Othello()
g.init_game()
# play whole game
while not g.game_is_over():
g.play_position(Random.get_move(g))
winner = g.get_winner()
# add winner and taken moves to statistic
multi_stats.append((g.get_taken_mv(), winner))
return multi_stats
def train_db_multi_threaded(self, count):
"""
play count random games and update database winning statistics
:param count: number of games to play
:return:
"""
# Create a pool of worker processes.
# Workload can be distributed equally on the processes when their number is known
number_of_processes = mp.cpu_count()
pool = mp.Pool()
# Use Worker processes asynchronous
# split calculation in number_of_processes parts to calculate multi threaded
list_of_stats = [pool.apply_async(self._play_n_random_games, args=(count // number_of_processes,))
for _ in range(number_of_processes)]
# Collect the result of the first worker
# update statistics of number_of_processes results sequential
for single_process_list in list_of_stats:
list_of_games = single_process_list.get()
for single_game in list_of_games:
moves, winner = single_game
self.update_fields_stats_for_single_game(moves, winner)
# Close the worker pool.
pool.close()
db = Database()
|
[
"othello.Othello",
"Agents.random.Random.get_move",
"multiprocessing.cpu_count",
"os.path.isfile",
"numpy.zeros",
"multiprocessing.Pool",
"numpy.savetxt",
"numpy.loadtxt"
] |
[((765, 825), 'numpy.loadtxt', 'np.loadtxt', (['DATABASE_FILE_NAME'], {'delimiter': '""";"""', 'dtype': '"""int64"""'}), "(DATABASE_FILE_NAME, delimiter=';', dtype='int64')\n", (775, 825), True, 'import numpy as np\n'), ((1042, 1083), 'numpy.zeros', 'np.zeros', ([], {'shape': '(60, 9, 3)', 'dtype': '"""int64"""'}), "(shape=(60, 9, 3), dtype='int64')\n", (1050, 1083), True, 'import numpy as np\n'), ((4957, 4971), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (4969, 4971), True, 'import multiprocessing as mp\n'), ((4987, 4996), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (4994, 4996), True, 'import multiprocessing as mp\n'), ((627, 661), 'os.path.isfile', 'os.path.isfile', (['DATABASE_FILE_NAME'], {}), '(DATABASE_FILE_NAME)\n', (641, 661), False, 'import os\n'), ((4253, 4262), 'othello.Othello', 'Othello', ([], {}), '()\n', (4260, 4262), False, 'from othello import Othello\n'), ((1485, 1534), 'numpy.savetxt', 'np.savetxt', (['outfile', 'row'], {'fmt': '"""%d"""', 'delimiter': '""";"""'}), "(outfile, row, fmt='%d', delimiter=';')\n", (1495, 1534), True, 'import numpy as np\n'), ((4391, 4409), 'Agents.random.Random.get_move', 'Random.get_move', (['g'], {}), '(g)\n', (4406, 4409), False, 'from Agents.random import Random\n')]
|
import argparse
import json
import os.path
import random
import re
import time
import cv2
import numpy as np
import requests
from PIL import Image
from config import config
class GermanLicensePlateImagesGenerator:
def __init__(self, output):
self.output = output
self.COUNTRY_MARKS = np.asarray([d['CM'] for d in json.loads(open(config.GERMAN_COUNTY_MARKS, encoding='utf-8').read())])
self.LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÜ"
self.DIGITS = "0123456789"
self.COUNTRIES = ['BW', 'BY', 'BE', 'BB', 'HB', 'HH', 'HE', 'MV', 'NI', 'NW', 'RP', 'SL', 'SN', 'ST', 'SH', 'TH']
self.MONTHS = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
self.YEARS = ['06', '07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17']
random.seed()
@staticmethod
def get_image_url(license_number, country, month, year):
license_number = license_number.replace("-", "%3A").replace("Ä", "%C4").replace("Ö", "%D6").replace("Ü", "%DC")
return "http://nummernschild.heisnbrg.net/fe/task?action=startTask&kennzeichen={0}&kennzeichenZeile2=&engschrift=false&pixelHoehe=32&breiteInMM=520&breiteInMMFest=true&sonder=FE&dd=01&mm=01&yy=00&kreis={1}&kreisName=&humm={2}&huyy={3}&sonderKreis=LEER&mm1=01&mm2=01&farbe=SCHWARZ&effekt=KEIN&tgaDownload=false".format(
license_number, country, month, year)
def __generate_license_number(self):
country = random.choice(self.COUNTRY_MARKS)
letter_count = random.randint(1, 2)
letters = "{}".format(random.choice(self.LETTERS)) if letter_count == 1 else "{}{}".format(
random.choice(self.LETTERS), random.choice(self.LETTERS))
min = 1 if letter_count == 2 else 1
digit_count = random.randint(min, max((8 - len(country) - letter_count), 4))
digits = ""
for i in range(digit_count):
digits += random.choice(self.DIGITS)
return "{}-{}{}".format(country, letters, digits)
def __create_license_plate_picture(self, n, license_number, country, front):
file_path = self.output + '/{0}#{1}.png'.format("F" if front else "R", license_number)
if os.path.exists(file_path):
return False
month = random.choice(self.MONTHS) if front else ''
year = random.choice(self.YEARS) if front else ''
create_image_url = GermanLicensePlateImagesGenerator.get_image_url(license_number, country, month, year)
r = requests.get(create_image_url)
if r.status_code != 200:
return False
id = re.compile('<id>(.*?)</id>', re.DOTALL | re.IGNORECASE).findall(
r.content.decode("utf-8"))[0]
status_url = 'http://nummernschild.heisnbrg.net/fe/task?action=status&id=%s' % id
time.sleep(.200)
r = requests.get(status_url)
if r.status_code != 200:
return False
show_image_url = 'http://nummernschild.heisnbrg.net/fe/task?action=showInPage&id=%s'
show_image_url = show_image_url % id
time.sleep(.200)
r = requests.get(show_image_url)
if r.status_code != 200:
return False
# sometimes the web service returns a corrupted image, check the image by getting the size and skip if corrupted
try:
numpyarray = np.fromstring(r.content, np.uint8)
image = cv2.imdecode(numpyarray, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
im = Image.fromarray(image) # don't use cv2.imwrite() because there is a bug with utf-8 encoded filepaths
im.save(file_path)
print("{0:06d} : {1}".format(n, file_path))
return True
except:
return False
def generate(self, items):
for n in range(items):
while True:
license_number = self.__generate_license_number()
country = random.choice(self.COUNTRIES)
if not self.__create_license_plate_picture(n, license_number, country, True):
break
time.sleep(.200)
self.__create_license_plate_picture(n, license_number, country, False)
time.sleep(.200)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--items", default="60000", help="Number of items to generate")
ap.add_argument("-o", "--output", default=config.PLATE_IMAGES, help="Output path")
args = vars(ap.parse_args())
lpdg = GermanLicensePlateImagesGenerator(os.path.abspath(args["output"]))
lpdg.generate(int(args["items"]))
|
[
"PIL.Image.fromarray",
"random.choice",
"argparse.ArgumentParser",
"re.compile",
"requests.get",
"random.seed",
"time.sleep",
"cv2.imdecode",
"cv2.cvtColor",
"numpy.fromstring",
"random.randint"
] |
[((4263, 4288), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4286, 4288), False, 'import argparse\n'), ((819, 832), 'random.seed', 'random.seed', ([], {}), '()\n', (830, 832), False, 'import random\n'), ((1470, 1503), 'random.choice', 'random.choice', (['self.COUNTRY_MARKS'], {}), '(self.COUNTRY_MARKS)\n', (1483, 1503), False, 'import random\n'), ((1528, 1548), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (1542, 1548), False, 'import random\n'), ((2499, 2529), 'requests.get', 'requests.get', (['create_image_url'], {}), '(create_image_url)\n', (2511, 2529), False, 'import requests\n'), ((2808, 2823), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2818, 2823), False, 'import time\n'), ((2837, 2861), 'requests.get', 'requests.get', (['status_url'], {}), '(status_url)\n', (2849, 2861), False, 'import requests\n'), ((3067, 3082), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3077, 3082), False, 'import time\n'), ((3096, 3124), 'requests.get', 'requests.get', (['show_image_url'], {}), '(show_image_url)\n', (3108, 3124), False, 'import requests\n'), ((1928, 1954), 'random.choice', 'random.choice', (['self.DIGITS'], {}), '(self.DIGITS)\n', (1941, 1954), False, 'import random\n'), ((2271, 2297), 'random.choice', 'random.choice', (['self.MONTHS'], {}), '(self.MONTHS)\n', (2284, 2297), False, 'import random\n'), ((2330, 2355), 'random.choice', 'random.choice', (['self.YEARS'], {}), '(self.YEARS)\n', (2343, 2355), False, 'import random\n'), ((3343, 3377), 'numpy.fromstring', 'np.fromstring', (['r.content', 'np.uint8'], {}), '(r.content, np.uint8)\n', (3356, 3377), True, 'import numpy as np\n'), ((3398, 3440), 'cv2.imdecode', 'cv2.imdecode', (['numpyarray', 'cv2.IMREAD_COLOR'], {}), '(numpyarray, cv2.IMREAD_COLOR)\n', (3410, 3440), False, 'import cv2\n'), ((3461, 3500), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3473, 3500), False, 'import cv2\n'), ((3518, 3540), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (3533, 3540), False, 'from PIL import Image\n'), ((1579, 1606), 'random.choice', 'random.choice', (['self.LETTERS'], {}), '(self.LETTERS)\n', (1592, 1606), False, 'import random\n'), ((1661, 1688), 'random.choice', 'random.choice', (['self.LETTERS'], {}), '(self.LETTERS)\n', (1674, 1688), False, 'import random\n'), ((1690, 1717), 'random.choice', 'random.choice', (['self.LETTERS'], {}), '(self.LETTERS)\n', (1703, 1717), False, 'import random\n'), ((3952, 3981), 'random.choice', 'random.choice', (['self.COUNTRIES'], {}), '(self.COUNTRIES)\n', (3965, 3981), False, 'import random\n'), ((4119, 4134), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4129, 4134), False, 'import time\n'), ((4239, 4254), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4249, 4254), False, 'import time\n'), ((2602, 2657), 're.compile', 're.compile', (['"""<id>(.*?)</id>"""', '(re.DOTALL | re.IGNORECASE)'], {}), "('<id>(.*?)</id>', re.DOTALL | re.IGNORECASE)\n", (2612, 2657), False, 'import re\n')]
|
import sys
sys.path.append("/home/ly/workspace/mmsa")
seed = 1938
import numpy as np
import torch
from torch import nn
from torch import optim
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
from models.bigru_rcnn_gate import *
from utils.train import *
from typing import *
from utils.load_raw_yelp import *
from utils.dataset import *
from utils.train import *
from utils.train import *
def main():
train_set, valid_set, test_set = load_glove_data(config)
batch_size = 2
workers = 2
train_loader, valid_loader, test_loader = get_loader(batch_size, workers, get_collate_fn(config),
train_set, valid_set, test_set)
model = Model(config)
#X, y = iter(valid_loader).next()
#res = model(X)
loss = nn.CrossEntropyLoss()
# get_parameter_number(model), loss
viz = get_Visdom()
lr = 1e-3
epoches = 20
optimizer = get_regal_optimizer(model, optim.AdamW, lr)
k_batch_train_visdom(model, optimizer, loss, valid_loader, viz, 30, 10, use_cuda=False)
if __name__ == "__main__":
# torch.cuda.set_device(1)
main()
|
[
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.nn.CrossEntropyLoss",
"numpy.random.seed",
"torch.cuda.manual_seed",
"sys.path.append"
] |
[((11, 53), 'sys.path.append', 'sys.path.append', (['"""/home/ly/workspace/mmsa"""'], {}), "('/home/ly/workspace/mmsa')\n", (26, 53), False, 'import sys\n'), ((144, 164), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (158, 164), True, 'import numpy as np\n'), ((165, 188), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (182, 188), False, 'import torch\n'), ((189, 217), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (211, 217), False, 'import torch\n'), ((218, 250), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (244, 250), False, 'import torch\n'), ((820, 841), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (839, 841), False, 'from torch import nn\n')]
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
########################################################################
# GNU General Public License v3.0
# GNU GPLv3
# Copyright (c) 2019, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
########################################################################
"""
Constants for project.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import platform
import numpy as np
DL_FRAMEWORKS = np.array(['caffe', 'tensorflow', 'pytorch', 'keras', 'caffe2'])
DL_FRAMEWORK = None
GPU_CORE_ID = 0
CNN_FEATURE_SIZES = np.array([2048, 2048, 1000, 1024, 1000, 2048, 2048])
CNN_FEATURE_TYPES = np.array(['fc6', 'fc7', 'fc1000', 'fc1024', 'fc365', 'prob', 'pool5', 'fc8a', 'res3b7', 'res4b35', 'res5c'])
CNN_MODEL_TYPES = np.array(['resnet152', 'googlenet1k', 'vgg16', 'places365-resnet152', 'places365-vgg', 'googlenet13k'])
RESIZE_TYPES = np.array(['resize', 'resize_crop', 'resize_crop_scaled', 'resize_keep_aspect_ratio_padded'])
ROOT_PATH_TYPES = np.array(['data', 'project'])
TRAIN_SCHEMES = np.array(['ete', 'tco'])
MODEL_CLASSIFICATION_TYPES = np.array(['ml', 'sl'])
MODEL_MULTISCALE_TYPES = np.array(['dl', 'ks'])
SOLVER_NAMES = np.array(['adam', 'sgd'])
DATASET_NAMES = np.array(['charades', 'kinetics400', 'breakfast_actions', 'you_cook_2', 'multi_thumos'])
DATA_ROOT_PATH = './data'
PROJECT_ROOT_PATH = '../'
MACHINE_NAME = platform.node()
|
[
"numpy.array",
"platform.node"
] |
[((1150, 1213), 'numpy.array', 'np.array', (["['caffe', 'tensorflow', 'pytorch', 'keras', 'caffe2']"], {}), "(['caffe', 'tensorflow', 'pytorch', 'keras', 'caffe2'])\n", (1158, 1213), True, 'import numpy as np\n'), ((1271, 1323), 'numpy.array', 'np.array', (['[2048, 2048, 1000, 1024, 1000, 2048, 2048]'], {}), '([2048, 2048, 1000, 1024, 1000, 2048, 2048])\n', (1279, 1323), True, 'import numpy as np\n'), ((1344, 1456), 'numpy.array', 'np.array', (["['fc6', 'fc7', 'fc1000', 'fc1024', 'fc365', 'prob', 'pool5', 'fc8a',\n 'res3b7', 'res4b35', 'res5c']"], {}), "(['fc6', 'fc7', 'fc1000', 'fc1024', 'fc365', 'prob', 'pool5',\n 'fc8a', 'res3b7', 'res4b35', 'res5c'])\n", (1352, 1456), True, 'import numpy as np\n'), ((1471, 1578), 'numpy.array', 'np.array', (["['resnet152', 'googlenet1k', 'vgg16', 'places365-resnet152',\n 'places365-vgg', 'googlenet13k']"], {}), "(['resnet152', 'googlenet1k', 'vgg16', 'places365-resnet152',\n 'places365-vgg', 'googlenet13k'])\n", (1479, 1578), True, 'import numpy as np\n'), ((1590, 1686), 'numpy.array', 'np.array', (["['resize', 'resize_crop', 'resize_crop_scaled',\n 'resize_keep_aspect_ratio_padded']"], {}), "(['resize', 'resize_crop', 'resize_crop_scaled',\n 'resize_keep_aspect_ratio_padded'])\n", (1598, 1686), True, 'import numpy as np\n'), ((1701, 1730), 'numpy.array', 'np.array', (["['data', 'project']"], {}), "(['data', 'project'])\n", (1709, 1730), True, 'import numpy as np\n'), ((1747, 1771), 'numpy.array', 'np.array', (["['ete', 'tco']"], {}), "(['ete', 'tco'])\n", (1755, 1771), True, 'import numpy as np\n'), ((1801, 1823), 'numpy.array', 'np.array', (["['ml', 'sl']"], {}), "(['ml', 'sl'])\n", (1809, 1823), True, 'import numpy as np\n'), ((1849, 1871), 'numpy.array', 'np.array', (["['dl', 'ks']"], {}), "(['dl', 'ks'])\n", (1857, 1871), True, 'import numpy as np\n'), ((1887, 1912), 'numpy.array', 'np.array', (["['adam', 'sgd']"], {}), "(['adam', 'sgd'])\n", (1895, 1912), True, 'import numpy as np\n'), ((1929, 2021), 'numpy.array', 'np.array', (["['charades', 'kinetics400', 'breakfast_actions', 'you_cook_2', 'multi_thumos']"], {}), "(['charades', 'kinetics400', 'breakfast_actions', 'you_cook_2',\n 'multi_thumos'])\n", (1937, 2021), True, 'import numpy as np\n'), ((2085, 2100), 'platform.node', 'platform.node', ([], {}), '()\n', (2098, 2100), False, 'import platform\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.