code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import unittest
import numpy as np
import sklearn_supp.random_coordinates as random_coordinates
class TestRandomCoordinateForestClassifier(unittest.TestCase):
"""These are just some simple sanity checks to make sure we don't get
exceptions.
"""
def test_simple(self):
X = [[0], [1]]
y = [0, 1]
classifier = random_coordinates.RandomCoordinateForestClassifier(
n_estimators=50)
classifier.fit(X, y)
y_pred = classifier.predict(X)
print(y_pred)
eq = np.all(y_pred == y)
self.assertTrue(eq)
def test_transform_dimension(self):
X = [[0, 0], [1, 1]]
X = np.array(X)
y = [0, 1]
classifier = random_coordinates.RandomCoordinateForestClassifier(
n_estimators=50, transform_dimension=2)
classifier.fit(X, y)
y_pred = classifier.predict(X)
print(y_pred)
eq = np.all(y_pred == y)
self.assertTrue(eq)
|
[
"numpy.array",
"numpy.all",
"sklearn_supp.random_coordinates.RandomCoordinateForestClassifier"
] |
[((352, 420), 'sklearn_supp.random_coordinates.RandomCoordinateForestClassifier', 'random_coordinates.RandomCoordinateForestClassifier', ([], {'n_estimators': '(50)'}), '(n_estimators=50)\n', (403, 420), True, 'import sklearn_supp.random_coordinates as random_coordinates\n'), ((537, 556), 'numpy.all', 'np.all', (['(y_pred == y)'], {}), '(y_pred == y)\n', (543, 556), True, 'import numpy as np\n'), ((667, 678), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (675, 678), True, 'import numpy as np\n'), ((719, 814), 'sklearn_supp.random_coordinates.RandomCoordinateForestClassifier', 'random_coordinates.RandomCoordinateForestClassifier', ([], {'n_estimators': '(50)', 'transform_dimension': '(2)'}), '(n_estimators=50,\n transform_dimension=2)\n', (770, 814), True, 'import sklearn_supp.random_coordinates as random_coordinates\n'), ((927, 946), 'numpy.all', 'np.all', (['(y_pred == y)'], {}), '(y_pred == y)\n', (933, 946), True, 'import numpy as np\n')]
|
# Author : <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sequence Ordering of Alignments
"""
import os
import numpy as np
import pandas as pd
from constants import (
folder,
alignment_file,
recipe_folder_name,
)
from utils import (
fetch_parsed_recipe,
fetch_action_ids,
)
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
class SequenceModel:
def test_sequence_model(self):
dish_list = os.listdir(folder)
dish_list = [dish for dish in dish_list if not dish.startswith(".")]
dish_list.sort()
correct_predictions = 0
num_actions = 0
for dish in dish_list:
data_folder = os.path.join(folder, dish) # dish folder
recipe_folder = os.path.join(data_folder, recipe_folder_name) # recipe folder
alignment_file_path = os.path.join(
data_folder, alignment_file
) # alignment file
# Gold Standard Alignments between all recipes for dish
alignments = pd.read_csv(
alignment_file_path, sep="\t", header=0, skiprows=0, encoding="utf-8"
)
# Group by Recipe pairs
group_alignments = alignments.groupby(["file1", "file2"])
dish_correct_predictions = 0
dish_num_actions = 0
for key in group_alignments.groups.keys():
#print('Recipe Pair: ')
#print(key)
recipe1_filename = os.path.join(recipe_folder, key[0] + ".conllu")
recipe2_filename = os.path.join(recipe_folder, key[1] + ".conllu")
parsed_recipe1 = fetch_parsed_recipe(recipe1_filename)
parsed_recipe2 = fetch_parsed_recipe(recipe2_filename)
action_ids1 = fetch_action_ids(parsed_recipe1)
#print('Actions in Recipe 1: ')
#print(action_ids1)
action_ids2 = fetch_action_ids(parsed_recipe2)
#print('Actions in Recipe 2: ')
#print(action_ids2)
if len(action_ids1) < len(action_ids2):
predictions = action_ids2[:len(action_ids1)]
else:
predictions = action_ids2
predictions.extend([0] * (len(action_ids1) - len(action_ids2)))
predictions = np.array(predictions)
#print('Predictions: ')
#print(predictions)
recipe_pair_alignment = group_alignments.get_group(key)
true_labels = list()
for i in action_ids1:
# True Action Id
action_line = recipe_pair_alignment.loc[
recipe_pair_alignment["token1"] == i
]
if not action_line.empty:
label = action_line["token2"].item()
true_labels.append(label)
else:
true_labels.append(0)
true_labels = np.array(true_labels)
#print('True Labels:')
#print(true_labels)
score = [predictions == true_labels]
dish_correct_predictions += np.sum(score)
dish_num_actions += len(action_ids1)
dish_accuracy = dish_correct_predictions * 100 / dish_num_actions
correct_predictions += dish_correct_predictions
num_actions += dish_num_actions
print("Accuracy of Dish {} : {:.2f}".format(dish, dish_accuracy))
model_accuracy = correct_predictions * 100 / num_actions
print("Model Accuracy: {:.2f}".format(model_accuracy))
|
[
"os.listdir",
"utils.fetch_action_ids",
"pandas.read_csv",
"os.path.join",
"numpy.array",
"numpy.sum",
"warnings.simplefilter",
"utils.fetch_parsed_recipe"
] |
[((824, 886), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (845, 886), False, 'import warnings\n'), ((974, 992), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (984, 992), False, 'import os\n'), ((1241, 1267), 'os.path.join', 'os.path.join', (['folder', 'dish'], {}), '(folder, dish)\n', (1253, 1267), False, 'import os\n'), ((1311, 1356), 'os.path.join', 'os.path.join', (['data_folder', 'recipe_folder_name'], {}), '(data_folder, recipe_folder_name)\n', (1323, 1356), False, 'import os\n'), ((1434, 1475), 'os.path.join', 'os.path.join', (['data_folder', 'alignment_file'], {}), '(data_folder, alignment_file)\n', (1446, 1475), False, 'import os\n'), ((1634, 1721), 'pandas.read_csv', 'pd.read_csv', (['alignment_file_path'], {'sep': '"""\t"""', 'header': '(0)', 'skiprows': '(0)', 'encoding': '"""utf-8"""'}), "(alignment_file_path, sep='\\t', header=0, skiprows=0, encoding=\n 'utf-8')\n", (1645, 1721), True, 'import pandas as pd\n'), ((2166, 2213), 'os.path.join', 'os.path.join', (['recipe_folder', "(key[0] + '.conllu')"], {}), "(recipe_folder, key[0] + '.conllu')\n", (2178, 2213), False, 'import os\n'), ((2249, 2296), 'os.path.join', 'os.path.join', (['recipe_folder', "(key[1] + '.conllu')"], {}), "(recipe_folder, key[1] + '.conllu')\n", (2261, 2296), False, 'import os\n'), ((2347, 2384), 'utils.fetch_parsed_recipe', 'fetch_parsed_recipe', (['recipe1_filename'], {}), '(recipe1_filename)\n', (2366, 2384), False, 'from utils import fetch_parsed_recipe, fetch_action_ids\n'), ((2418, 2455), 'utils.fetch_parsed_recipe', 'fetch_parsed_recipe', (['recipe2_filename'], {}), '(recipe2_filename)\n', (2437, 2455), False, 'from utils import fetch_parsed_recipe, fetch_action_ids\n'), ((2503, 2535), 'utils.fetch_action_ids', 'fetch_action_ids', (['parsed_recipe1'], {}), '(parsed_recipe1)\n', (2519, 2535), False, 'from utils import fetch_parsed_recipe, fetch_action_ids\n'), ((2667, 2699), 'utils.fetch_action_ids', 'fetch_action_ids', (['parsed_recipe2'], {}), '(parsed_recipe2)\n', (2683, 2699), False, 'from utils import fetch_parsed_recipe, fetch_action_ids\n'), ((3189, 3210), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (3197, 3210), True, 'import numpy as np\n'), ((4039, 4060), 'numpy.array', 'np.array', (['true_labels'], {}), '(true_labels)\n', (4047, 4060), True, 'import numpy as np\n'), ((4268, 4281), 'numpy.sum', 'np.sum', (['score'], {}), '(score)\n', (4274, 4281), True, 'import numpy as np\n')]
|
from owslib.wms import WebMapService
from owslib import crs
from PIL import Image, ImageEnhance, ImageFilter
import cv2
import numpy as np
from pyspark import SparkContext
from pyproj import Proj
c = crs.Crs('EPSG:3857')
wms = WebMapService('http://www.ign.es/wms-inspire/pnoa-ma', version='1.3.0')
box = 1000 # m?
x=236814#m?
y=5068880 #m?
picsize = 512
img = wms.getmap(
layers=['OI.OrthoimageCoverage'],
styles=[],
srs='EPSG:3857',
bbox=(x - box, y - box, x + box, y + box),
size=(picsize,picsize), #W,H px
format='image/png',
transparent=False
)
with open('image.png','wb') as out:
out.write(img.read())
def green_detection():
img = cv2.imread('image.png')
# Green fields detection
hsv= cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#Threshold for detecting green
lower = np.array([15, 58, 15])
upper = np.array([100,240,100])
# Apply mask
mask = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(img, img, mask = mask)
hsv_gray= cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
green_edges=cv2.Canny(hsv_gray,100,200)
cv2.imwrite('green_hsv.png',hsv)
cv2.imwrite('green.png',output)
cv2.imwrite('green_edges.png', green_edges)
def contour_detection():
# Get the contours
canny_img= cv2.imread('green_edges.png',0)
img= cv2.imread('image.png',1)
img2= cv2.imread('image.png',1)
_, contours, hierarchy=cv2.findContours(canny_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img,contours,-1,(0,255,0))
cv2.imwrite('image_contours.png',img)
#Filter the contours corresponding to football fields comparing the areas of the contours
k=0
filteredContours=[]
fields_x_positions=[]
fields_y_positions=[]
thresholdarea= 800
while(k < len(contours)):
epsilon = 0.1*cv2.arcLength(contours[k],True)
contours[k] = cv2.approxPolyDP(contours[k],epsilon,True)
area = cv2.contourArea(contours[k])
print("Area; ",area)
if thresholdarea + 200 > area > thresholdarea - 200:
filteredContours.append(contours[k])
k+=1
cv2.drawContours(img2,filteredContours,-1,(0,255,0))
cv2.imwrite('image_contours_filtered.png',img2)
return filteredContours
def location_convertor(contours,xm,ym,box,picsize):
p1 = Proj(init='epsg:3857')
c=[]
for i,e in enumerate(contours):
# We calculate the center point of each contour
print(e.tolist())
bounds= cv2.boundingRect(contours[i])
x1,y1,w,h = bounds
x=x1+w/2
y=y1+h/2
print(x,y)
# Pixel to meter conversion
x = (xm-box) + 2*box*(x1/picsize)
y = (ym+box) - 2*box*(y1/picsize)
# Meter to lon,lat conversion
lon, lat = p1(x,y,inverse=True)
c+=[[lon,lat]]
return c
if __name__ == "__main__":
green_detection()
c=contour_detection()
centers=location_convertor(c,x,y,box,picsize)
print(centers)
|
[
"cv2.imwrite",
"cv2.drawContours",
"owslib.wms.WebMapService",
"cv2.inRange",
"owslib.crs.Crs",
"cv2.arcLength",
"cv2.bitwise_and",
"cv2.contourArea",
"numpy.array",
"cv2.approxPolyDP",
"cv2.cvtColor",
"pyproj.Proj",
"cv2.findContours",
"cv2.Canny",
"cv2.imread",
"cv2.boundingRect"
] |
[((201, 221), 'owslib.crs.Crs', 'crs.Crs', (['"""EPSG:3857"""'], {}), "('EPSG:3857')\n", (208, 221), False, 'from owslib import crs\n'), ((228, 299), 'owslib.wms.WebMapService', 'WebMapService', (['"""http://www.ign.es/wms-inspire/pnoa-ma"""'], {'version': '"""1.3.0"""'}), "('http://www.ign.es/wms-inspire/pnoa-ma', version='1.3.0')\n", (241, 299), False, 'from owslib.wms import WebMapService\n'), ((682, 705), 'cv2.imread', 'cv2.imread', (['"""image.png"""'], {}), "('image.png')\n", (692, 705), False, 'import cv2\n'), ((744, 780), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (756, 780), False, 'import cv2\n'), ((829, 851), 'numpy.array', 'np.array', (['[15, 58, 15]'], {}), '([15, 58, 15])\n', (837, 851), True, 'import numpy as np\n'), ((864, 889), 'numpy.array', 'np.array', (['[100, 240, 100]'], {}), '([100, 240, 100])\n', (872, 889), True, 'import numpy as np\n'), ((921, 951), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower', 'upper'], {}), '(hsv, lower, upper)\n', (932, 951), False, 'import cv2\n'), ((965, 1001), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (980, 1001), False, 'import cv2\n'), ((1019, 1059), 'cv2.cvtColor', 'cv2.cvtColor', (['output', 'cv2.COLOR_BGR2GRAY'], {}), '(output, cv2.COLOR_BGR2GRAY)\n', (1031, 1059), False, 'import cv2\n'), ((1076, 1105), 'cv2.Canny', 'cv2.Canny', (['hsv_gray', '(100)', '(200)'], {}), '(hsv_gray, 100, 200)\n', (1085, 1105), False, 'import cv2\n'), ((1109, 1142), 'cv2.imwrite', 'cv2.imwrite', (['"""green_hsv.png"""', 'hsv'], {}), "('green_hsv.png', hsv)\n", (1120, 1142), False, 'import cv2\n'), ((1146, 1178), 'cv2.imwrite', 'cv2.imwrite', (['"""green.png"""', 'output'], {}), "('green.png', output)\n", (1157, 1178), False, 'import cv2\n'), ((1182, 1225), 'cv2.imwrite', 'cv2.imwrite', (['"""green_edges.png"""', 'green_edges'], {}), "('green_edges.png', green_edges)\n", (1193, 1225), False, 'import cv2\n'), ((1291, 1323), 'cv2.imread', 'cv2.imread', (['"""green_edges.png"""', '(0)'], {}), "('green_edges.png', 0)\n", (1301, 1323), False, 'import cv2\n'), ((1332, 1358), 'cv2.imread', 'cv2.imread', (['"""image.png"""', '(1)'], {}), "('image.png', 1)\n", (1342, 1358), False, 'import cv2\n'), ((1368, 1394), 'cv2.imread', 'cv2.imread', (['"""image.png"""', '(1)'], {}), "('image.png', 1)\n", (1378, 1394), False, 'import cv2\n'), ((1421, 1488), 'cv2.findContours', 'cv2.findContours', (['canny_img', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(canny_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1437, 1488), False, 'import cv2\n'), ((1491, 1539), 'cv2.drawContours', 'cv2.drawContours', (['img', 'contours', '(-1)', '(0, 255, 0)'], {}), '(img, contours, -1, (0, 255, 0))\n', (1507, 1539), False, 'import cv2\n'), ((1539, 1577), 'cv2.imwrite', 'cv2.imwrite', (['"""image_contours.png"""', 'img'], {}), "('image_contours.png', img)\n", (1550, 1577), False, 'import cv2\n'), ((2131, 2188), 'cv2.drawContours', 'cv2.drawContours', (['img2', 'filteredContours', '(-1)', '(0, 255, 0)'], {}), '(img2, filteredContours, -1, (0, 255, 0))\n', (2147, 2188), False, 'import cv2\n'), ((2188, 2236), 'cv2.imwrite', 'cv2.imwrite', (['"""image_contours_filtered.png"""', 'img2'], {}), "('image_contours_filtered.png', img2)\n", (2199, 2236), False, 'import cv2\n'), ((2329, 2351), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:3857"""'}), "(init='epsg:3857')\n", (2333, 2351), False, 'from pyproj import Proj\n'), ((1887, 1931), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['contours[k]', 'epsilon', '(True)'], {}), '(contours[k], epsilon, True)\n', (1903, 1931), False, 'import cv2\n'), ((1945, 1973), 'cv2.contourArea', 'cv2.contourArea', (['contours[k]'], {}), '(contours[k])\n', (1960, 1973), False, 'import cv2\n'), ((2473, 2502), 'cv2.boundingRect', 'cv2.boundingRect', (['contours[i]'], {}), '(contours[i])\n', (2489, 2502), False, 'import cv2\n'), ((1833, 1865), 'cv2.arcLength', 'cv2.arcLength', (['contours[k]', '(True)'], {}), '(contours[k], True)\n', (1846, 1865), False, 'import cv2\n')]
|
from __future__ import annotations
from typing import Union, Tuple, List
import warnings
import numpy as np
class Question:
"""Question is a thershold/matching concept for splitting the node of the Decision Tree
Args:
column_index (int): Column index to be chosen from the array passed at the matching time.
value (Union[int, str, float, np.int64, np.float64]): Threshold value/ matching value.
header (str): column/header name.
"""
def __init__(self, column_index: int, value: Union[int, str, float, np.int64, np.float64], header: str):
"""Constructor
"""
self.column_index = column_index
self.value = value
self.header = header
def match(self, example: Union[list, np.ndarray]) -> bool:
"""Matching function to decide based on example whether result is true or false.
Args:
example (Union[list, np.ndarray]): Example to compare with question parameters.
Returns:
bool: if the example is in threshold or value matches then results true or false.
"""
if isinstance(example, list):
example = np.array(example, dtype="O")
val = example[self.column_index]
# adding numpy int and float data types as well
if isinstance(val, (int, float, np.int64, np.float64)):
# a condition for question to return True or False for numeric value
return float(val) >= float(self.value)
else:
return str(val) == str(self.value) # categorical data comparison
def __repr__(self):
condition = "=="
if isinstance(self.value, (int, float, np.int64, np.float64)):
condition = ">="
return f"Is {self.header} {condition} {self.value} ?"
class Node:
"""A Tree node either Decision Node or Leaf Node
Args:
question (Question, optional): question object. Defaults to None.
true_branch (Node, optional): connection to node at true side of the branch. Defaults to None.
false_branch (Node, optional): connection to node at false side of the branch. Defaults to None.
uncertainty (float, optional): Uncertainty value like gini,entropy,variance etc. Defaults to None.
leaf_value (Union[dict,int,float], optional): Leaf node/final node's value. Defaults to None.
"""
def __init__(self, question: Question = None, true_branch: Node = None, false_branch: Node = None, uncertainty: float = None, *, leaf_value: Union[dict, int, float] = None):
"""Constructor
"""
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
self.uncertainty = uncertainty
self.leaf_value = leaf_value
@property
def _is_leaf_node(self) -> bool:
"""Check if this node is leaf node or not.
Returns:
bool: True if leaf node else false.
"""
return self.leaf_value is not None
class DecisionTreeClassifier:
"""Decision Tree Based Classification Model
Args:
max_depth (int, optional): max depth of the tree. Defaults to 100.
min_samples_split (int, optional): min size of the sample at the time of split. Defaults to 2.
criteria (str, optional): what criteria to use for information. Defaults to 'gini'. available 'gini','entropy'.
"""
def __init__(self, max_depth: int = 100, min_samples_split: int = 2, criteria: str = 'gini'):
"""Constructor
"""
self._X = None
self._y = None
self._feature_names = None
self._target_name = None
self._tree = None
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criteria = criteria
def _count_dict(self, a: np.ndarray) -> dict:
"""Count class frequecies and get a dictionary from it
Args:
a (np.ndarray): input array. shape should be (m,1) for m samples.
Returns:
dict: categories/classes freq dictionary.
"""
unique_values = np.unique(a, return_counts=True)
zipped = zip(*unique_values)
dict_obj = dict(zipped)
return dict_obj
def _gini_impurity(self, arr: np.ndarray) -> float:
"""Calculate Gini Impurity
Args:
arr (np.ndarray): input array.
Returns:
float: gini impurity value.
"""
classes, counts = np.unique(arr, return_counts=True)
gini_score = 1 - np.square(counts / arr.shape[0]).sum(axis=0)
return gini_score
def _entropy(self, arr: np.ndarray) -> float:
"""Calculate Entropy
Args:
arr (np.ndarray): input array.
Returns:
float: entropy result.
"""
classes, counts = np.unique(arr, return_counts=True)
p = counts / arr.shape[0]
entropy_score = (-p * np.log2(p)).sum(axis=0)
return entropy_score
def _uncertainty(self, a: np.ndarray) -> float:
"""calcualte uncertainty
Args:
a (np.ndarray): input array
Returns:
float: uncertainty value
"""
if self.criteria == "entropy":
value = self._entropy(a)
elif self.criteria == "gini":
value = self._gini_impurity(a)
else:
warnings.warn(f"{self.criteria} is not coded yet. returning to gini.")
value = self._gini_impurity(a)
return value
def _partition(self, rows: np.ndarray, question: Union[Question, None]) -> Tuple[list, list]:
"""partition the rows based on the question
Args:
rows (np.ndarray): input array to split.
question (Question): question object containing spltting concept.
Returns:
Tuple[list,list]: true idxs and false idxs.
"""
true_idx, false_idx = [], []
for idx, row in enumerate(rows):
if question.match(row):
true_idx.append(idx)
else:
false_idx.append(idx)
return true_idx, false_idx
def _info_gain(self, left: np.ndarray, right: np.ndarray, parent_uncertainty: float) -> float:
"""Calculate information gain after splitting
Args:
left (np.ndarray): left side array.
right (np.ndarray): right side array.
parent_uncertainty (float): parent node Uncertainity.
Returns:
flaot: information gain value.
"""
# calculating portion/ partition/ weightage
pr = left.shape[0] / (left.shape[0] + right.shape[0])
# calcualte child uncertainity
child_uncertainty = pr * \
self._uncertainty(left) - (1 - pr) * self._uncertainty(right)
# calculate information gain
info_gain_value = parent_uncertainty - child_uncertainty
return info_gain_value
def _find_best_split(self, X: np.ndarray, y: np.ndarray) -> Tuple[float, Union[Question, None], float]:
"""method to find best split possible for the sample
Args:
X (np.ndarray): Feature matrix.
y (np.ndarray): target matrix.
Returns:
Tuple[float,Union[Question,None],float]: maximum gain from the split, best question of it, and parent node uncertainty.
"""
max_gain = -1
best_split_question = None
parent_uncertainty = self._uncertainty(y)
m_samples, n_labels = X.shape
for col_index in range(n_labels): # iterate over feature columns
# get unique values from the feature
unique_values = np.unique(X[:, col_index])
for val in unique_values: # check for every value and find maximum info gain
ques = Question(
column_index=col_index,
value=val,
header=self._feature_names[col_index]
)
t_idx, f_idx = self._partition(X, ques)
# if it does not split the data
# skip it
if len(t_idx) == 0 or len(f_idx) == 0:
continue
true_y = y[t_idx, :]
false_y = y[f_idx, :]
# get information gain
gain = self._info_gain(true_y, false_y, parent_uncertainty)
if gain > max_gain:
max_gain, best_split_question = gain, ques
return max_gain, best_split_question, parent_uncertainty
def _build_tree(self, X: np.ndarray, y: np.ndarray, depth: int = 0) -> Node:
"""Recursive funtion to build tree.
Args:
X (np.ndarray): input features matrix.
y (np.ndarray): target matrix.
depth (int, optional): depth count of the recursion. Defaults to 0.
Returns:
Node: either leaf node or decision node
"""
m_samples, n_labels = X.shape
# if depth is greater than max depth defined or labels/features are left to 1
# or number of samples are less than the minimum size of samples to split then
# stop recursion and return a node
if (depth > self.max_depth or n_labels == 1 or m_samples < self.min_samples_split):
return Node(leaf_value=self._count_dict(y))
gain, ques, uncertainty = self._find_best_split(X, y)
# if gain is zero
# then no point grinding further here
if gain < 0:
return Node(leaf_value=self._count_dict(y))
t_idx, f_idx = self._partition(X, ques) # get partition idxs
true_branch = self._build_tree(
X[t_idx, :], y[t_idx, :], depth + 1) # recog true branch samples
false_branch = self._build_tree(
X[f_idx, :], y[f_idx, :], depth + 1) # recog false branch samples
return Node(
question=ques,
true_branch=true_branch,
false_branch=false_branch,
uncertainty=uncertainty
)
def train(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list], feature_name: list = None, target_name: list = None) -> None:
"""Train the model
Args:
X (Union[np.ndarray,list]): feature matrix.
y (Union[np.ndarray,list]): target matrix.
feature_name (list, optional): feature names list. Defaults to None.
target_name (list, optional): target name list. Defaults to None.
"""
X = np.array(X, dtype='O') if not isinstance(
X, (np.ndarray)) else X # converting to numpy array
y = np.array(y, dtype='O') if not isinstance(
y, (np.ndarray)) else y # converting to numpy array
# reshaping to vectors
self._X = X.reshape(-1, 1) if len(X.shape) == 1 else X
self._y = y.reshape(-1, 1) if len(y.shape) == 1 else y
# creating feature names if not mentioned
self._feature_names = feature_name or [
f"C_{i}" for i in range(self._X.shape[1])]
# creating target name if not mentioned
self._target_name = target_name or ['target']
# BOOOM
# building the tree
self._tree = self._build_tree(
X=self._X,
y=self._y
)
def print_tree(self, node: Union[Node, None] = None, spacing: str = "|-") -> None:
"""print the tree
Args:
node (Union[Node,None], optional): starting node. Defaults to None. then it will go to the root node of the tree.
spacing (str, optional): printing separater. Defaults to "|-".
"""
node = node or self._tree
if node._is_leaf_node:
print(spacing, " Predict :", node.leaf_value)
return
# Print the question at this node
print(spacing + str(node.question) +
" | " + self.criteria + " :" + str(node.uncertainty))
# Call this function recursively on the true branch
print(spacing + '--> True:')
self.print_tree(node.true_branch, " " + spacing + "-")
# Call this function recursively on the false branch
print(spacing + '--> False:')
self.print_tree(node.false_branch, " " + spacing + "-")
def _classification(self, row: np.ndarray, node: Union[Node, None]) -> Union[dict]:
"""Classification recursive function
Args:
row (np.ndarray): input matrix.
node (Union[Node,None]): node to start with. mostly root node. rest will be handled by recursion.
Returns:
Union[dict]: leaf value. classification result.
"""
if node._is_leaf_node:
return node.leaf_value
if node.question.match(row):
return self._classification(row, node.true_branch)
else:
return self._classification(row, node.false_branch)
def _leaf_probabilities(self, results: dict) -> dict:
"""get probabilties
Args:
results (dict): results from _classification.
Returns:
dict: dictionary with categorical probabilities.
"""
total = sum(results.values())
probs = {}
for key in results:
probs[key] = (results[key] / total) * 100
return probs
def predict(self, X: Union[np.ndarray, list]) -> np.ndarray:
"""predict classification results
Args:
X (Union[np.ndarray,list]): testing matrix.
Raises:
ValueError: input X can only be a list or numpy array.
Returns:
np.ndarray: results of classification.
"""
if isinstance(X, (np.ndarray, list)):
X = np.array(X, dtype='O') if not isinstance(X, (np.ndarray)) else X
if len(X.shape) == 1:
max_result = 0
result_dict = self._classification(row=X, node=self._tree)
result = None
for key in result_dict:
if result_dict[key] > max_result:
result = key
return np.array([[result]], dtype='O')
else:
leaf_value = []
# get maximum caterigorical value from all catergories
for row in X:
max_result = 0
result_dict = self._classification(row=row, node=self._tree)
result = None
for key in result_dict:
if result_dict[key] > max_result:
result = key
leaf_value.append([result])
return np.array(leaf_value, dtype='O')
else:
raise ValueError("X should be list or numpy array")
def predict_probability(self, X: Union[np.ndarray, list]) -> Union[np.ndarray, dict]:
"""predict classfication probabilities
Args:
X (Union[np.ndarray,list]): testing matrix.
Raises:
ValueError: input X can only be a list or numpy array.
Returns:
Union[np.ndarray, dict]: probabity results of classification.
"""
if isinstance(X, (np.ndarray, list)):
X = np.array(X, dtype='O') if not isinstance(X, (np.ndarray)) else X
if len(X.shape) == 1:
return self._leaf_probabilities(self._classification(row=X, node=self._tree))
else:
leaf_value = []
for row in X:
leaf_value.append([self._leaf_probabilities(
self._classification(row=row, node=self._tree))])
return np.array(leaf_value, dtype='O')
else:
raise ValueError("X should be list or numpy array")
class DecisionTreeRegressor:
"""Decision Tree Based Regression Model
Args:
max_depth (int, optional): maximum depth of the tree. Defaults to 10.
min_samples_split (int, optional): minimum number of samples while splitting. Defaults to 3.
criteria (str, optional): criteria for best info gain. Defaults to 'variance'.
"""
def __init__(self, max_depth: int = 10, min_samples_split: int = 3, criteria: str = 'variance'):
"""constructor
"""
self._X = None
self._y = None
self._feature_names = None
self._target_name = None
self._tree = None
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criteria = criteria
def _mean_leaf_value(self, a: np.ndarray) -> float:
"""leaf values mean
Args:
a (np.ndarray): input array.
Returns:
float: mean value
"""
return float(np.mean(a))
def _partition(self, rows: np.ndarray, question: Union[Question, None]) -> Tuple[list, list]:
"""partition the rows based on the question
Args:
rows (np.ndarray): input array to split.
question (Question): question object containing spltting concept.
Returns:
Tuple[list,list]: true idxs and false idxs.
"""
true_idx, false_idx = [], []
for idx, row in enumerate(rows):
if question.match(row):
true_idx.append(idx)
else:
false_idx.append(idx)
return true_idx, false_idx
def _uncertainty(self, a: np.ndarray) -> float:
"""calcualte uncertainty
Args:
a (np.ndarray): input array
Returns:
float: uncertainty value
"""
if self.criteria == "variance":
value = np.var(a)
else:
warnings.warn(f"{self.criteria} is not coded yet. returning to variance.")
value = np.var(a)
return float(value)
def _info_gain(self, left: np.ndarray, right: np.ndarray, parent_uncertainty: float) -> float:
"""Calculate information gain after splitting
Args:
left (np.ndarray): left side array.
right (np.ndarray): right side array.
parent_uncertainty (float): parent node Uncertainity.
Returns:
flaot: information gain value.
"""
pr = left.shape[0] / (left.shape[0] + right.shape[0])
child_uncertainty = pr * \
self._uncertainty(left) - (1 - pr) * self._uncertainty(right)
info_gain_value = parent_uncertainty - child_uncertainty
return info_gain_value
def _find_best_split(self, X: np.ndarray, y: np.ndarray) -> Tuple[float, Union[Question, None], float]:
"""method to find best split possible for the sample
Args:
X (np.ndarray): Feature matrix.
y (np.ndarray): target matrix.
Returns:
Tuple[float,Union[Question,None],float]: maximum gain from the split, best question of it, and parent node uncertainty
"""
max_gain = -1
best_split_question = None
parent_uncertainty = self._uncertainty(y)
m_samples, n_labels = X.shape
for col_index in range(n_labels): # iterate over feature columns
# get unique values from the feature
unique_values = np.unique(X[:, col_index])
for val in unique_values: # check for every value and find maximum info gain
ques = Question(
column_index=col_index,
value=val,
header=self._feature_names[col_index]
)
t_idx, f_idx = self._partition(X, ques)
# if it does not split the data
# skip it
if len(t_idx) == 0 or len(f_idx) == 0:
continue
true_y = y[t_idx, :]
false_y = y[f_idx, :]
gain = self._info_gain(true_y, false_y, parent_uncertainty)
if gain > max_gain:
max_gain, best_split_question = gain, ques
return max_gain, best_split_question, parent_uncertainty
def _build_tree(self, X: np.ndarray, y: np.ndarray, depth: int = 0) -> Node:
"""Recursive funtion to build tree
Args:
X (np.ndarray): input features matrix.
y (np.ndarray): target matrix.
depth (int, optional): depth count of the recursion. Defaults to 0.
Returns:
Node: either leaf node or decision node
"""
m_samples, n_labels = X.shape
# if depth is greater than max depth defined or labels/features are left to 1
# or number of samples are less than the minimum size of samples to split then
# stop recursion and return a node
if (depth > self.max_depth or n_labels == 1 or m_samples < self.min_samples_split):
return Node(leaf_value=y)
gain, ques, uncertainty = self._find_best_split(X, y)
# if gain is zero no point in going further
if gain < 0:
return Node(leaf_value=y)
t_idx, f_idx = self._partition(X, ques)
true_branch = self._build_tree(
X[t_idx, :], y[t_idx, :], depth + 1) # get true samples
false_branch = self._build_tree(
X[f_idx, :], y[f_idx, :], depth + 1) # get false samples
return Node(
question=ques,
true_branch=true_branch,
false_branch=false_branch,
uncertainty=uncertainty
)
def train(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list], feature_name: list = None, target_name: list = None) -> None:
"""Train the model
Args:
X (Union[np.ndarray,list]): feature matrix.
y (Union[np.ndarray,list]): target matrix.
feature_name (list, optional): feature names list. Defaults to None.
target_name (list, optional): target name list. Defaults to None.
"""
X = np.array(X, dtype='O') if not isinstance(
X, (np.ndarray)) else X # converting to numpy array
y = np.array(y, dtype='O') if not isinstance(
y, (np.ndarray)) else y # converting to numpy array
# reshaping to vectors
self._X = X.reshape(-1, 1) if len(X.shape) == 1 else X
self._y = y.reshape(-1, 1) if len(y.shape) == 1 else y
# creating feature names if not mentioned
self._feature_names = feature_name or [
f"C_{i}" for i in range(self._X.shape[1])]
# creating target name if not mentioned
self._target_name = target_name or ['target']
# BOOOM
# building the tree
self._tree = self._build_tree(
X=self._X,
y=self._y
)
def print_tree(self, node: Union[Node, None] = None, spacing: str = "|-", mean_preds: bool = True) -> None:
"""print the tree
Args:
node (Union[Node,None], optional): starting node. Defaults to None. then it will go to the root node of the tree.
spacing (str, optional): printing separater. Defaults to "|-".
mean_preds (bool): do the mean of prediction values. Defaults to True.
"""
node = node or self._tree
if node._is_leaf_node:
if mean_preds:
print(spacing, " Predict :", self._mean_leaf_value(node.leaf_value))
else:
print(spacing, " Predict :", node.leaf_value[...,-1])
return
# Print the question at this node
print(spacing + str(node.question) +
" | " + self.criteria + " :" + str(node.uncertainty))
# Call this function recursively on the true branch
print(spacing + '--> True:')
self.print_tree(node.true_branch, " " + spacing + "-", mean_preds)
# Call this function recursively on the false branch
print(spacing + '--> False:')
self.print_tree(node.false_branch, " " + spacing + "-", mean_preds)
def _regression(self, row: np.ndarray, node: Union[Node, None], mean_preds: bool) -> float:
"""regression recursive method
Args:
row (np.ndarray): input matrix.
node (Union[Node,None]): node to start with. mostly root node. rest will be handled by recursion.
mean_preds (bool): do the mean of prediction values.
Returns:
float: regression result.
"""
if node._is_leaf_node:
if mean_preds:
return self._mean_leaf_value(node.leaf_value)
else:
return node.leaf_value[...,-1]
if node.question.match(row):
return self._regression(row, node.true_branch, mean_preds)
else:
return self._regression(row, node.false_branch, mean_preds)
def predict(self, X: np.ndarray, mean_preds: bool = True) -> np.ndarray:
"""predict regresssion
Args:
X (np.ndarray): testing matrix.
mean_preds (bool): do the mean of prediction values. Defaults to True.
Raises:
ValueError: X should be list or numpy array
Returns:
np.ndarray: regression prediction.
"""
if isinstance(X, (np.ndarray, list)):
X = np.array(X, dtype='O') if not isinstance(X, (np.ndarray)) else X
if len(X.shape) == 1:
result = self._regression(row=X, node=self._tree, mean_preds=mean_preds)
return np.array([[result]], dtype='O')
else:
leaf_value = []
for row in X:
result = self._regression(row=row, node=self._tree, mean_preds=mean_preds)
leaf_value.append([result])
return np.array(leaf_value, dtype='O')
else:
raise ValueError("X should be list or numpy array")
|
[
"numpy.mean",
"numpy.unique",
"numpy.square",
"numpy.array",
"warnings.warn",
"numpy.log2",
"numpy.var"
] |
[((4092, 4124), 'numpy.unique', 'np.unique', (['a'], {'return_counts': '(True)'}), '(a, return_counts=True)\n', (4101, 4124), True, 'import numpy as np\n'), ((4464, 4498), 'numpy.unique', 'np.unique', (['arr'], {'return_counts': '(True)'}), '(arr, return_counts=True)\n', (4473, 4498), True, 'import numpy as np\n'), ((4824, 4858), 'numpy.unique', 'np.unique', (['arr'], {'return_counts': '(True)'}), '(arr, return_counts=True)\n', (4833, 4858), True, 'import numpy as np\n'), ((1157, 1185), 'numpy.array', 'np.array', (['example'], {'dtype': '"""O"""'}), "(example, dtype='O')\n", (1165, 1185), True, 'import numpy as np\n'), ((7681, 7707), 'numpy.unique', 'np.unique', (['X[:, col_index]'], {}), '(X[:, col_index])\n', (7690, 7707), True, 'import numpy as np\n'), ((10526, 10548), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (10534, 10548), True, 'import numpy as np\n'), ((10645, 10667), 'numpy.array', 'np.array', (['y'], {'dtype': '"""O"""'}), "(y, dtype='O')\n", (10653, 10667), True, 'import numpy as np\n'), ((16761, 16771), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (16768, 16771), True, 'import numpy as np\n'), ((17666, 17675), 'numpy.var', 'np.var', (['a'], {}), '(a)\n', (17672, 17675), True, 'import numpy as np\n'), ((17702, 17776), 'warnings.warn', 'warnings.warn', (['f"""{self.criteria} is not coded yet. returning to variance."""'], {}), "(f'{self.criteria} is not coded yet. returning to variance.')\n", (17715, 17776), False, 'import warnings\n'), ((17797, 17806), 'numpy.var', 'np.var', (['a'], {}), '(a)\n', (17803, 17806), True, 'import numpy as np\n'), ((19243, 19269), 'numpy.unique', 'np.unique', (['X[:, col_index]'], {}), '(X[:, col_index])\n', (19252, 19269), True, 'import numpy as np\n'), ((21952, 21974), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (21960, 21974), True, 'import numpy as np\n'), ((22071, 22093), 'numpy.array', 'np.array', (['y'], {'dtype': '"""O"""'}), "(y, dtype='O')\n", (22079, 22093), True, 'import numpy as np\n'), ((5368, 5438), 'warnings.warn', 'warnings.warn', (['f"""{self.criteria} is not coded yet. returning to gini."""'], {}), "(f'{self.criteria} is not coded yet. returning to gini.')\n", (5381, 5438), False, 'import warnings\n'), ((13730, 13752), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (13738, 13752), True, 'import numpy as np\n'), ((14120, 14151), 'numpy.array', 'np.array', (['[[result]]'], {'dtype': '"""O"""'}), "([[result]], dtype='O')\n", (14128, 14151), True, 'import numpy as np\n'), ((14667, 14698), 'numpy.array', 'np.array', (['leaf_value'], {'dtype': '"""O"""'}), "(leaf_value, dtype='O')\n", (14675, 14698), True, 'import numpy as np\n'), ((15236, 15258), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (15244, 15258), True, 'import numpy as np\n'), ((15672, 15703), 'numpy.array', 'np.array', (['leaf_value'], {'dtype': '"""O"""'}), "(leaf_value, dtype='O')\n", (15680, 15703), True, 'import numpy as np\n'), ((25266, 25288), 'numpy.array', 'np.array', (['X'], {'dtype': '"""O"""'}), "(X, dtype='O')\n", (25274, 25288), True, 'import numpy as np\n'), ((25478, 25509), 'numpy.array', 'np.array', (['[[result]]'], {'dtype': '"""O"""'}), "([[result]], dtype='O')\n", (25486, 25509), True, 'import numpy as np\n'), ((25756, 25787), 'numpy.array', 'np.array', (['leaf_value'], {'dtype': '"""O"""'}), "(leaf_value, dtype='O')\n", (25764, 25787), True, 'import numpy as np\n'), ((4524, 4556), 'numpy.square', 'np.square', (['(counts / arr.shape[0])'], {}), '(counts / arr.shape[0])\n', (4533, 4556), True, 'import numpy as np\n'), ((4923, 4933), 'numpy.log2', 'np.log2', (['p'], {}), '(p)\n', (4930, 4933), True, 'import numpy as np\n')]
|
"""
Utilities for creating simulated data sets.
"""
from typing import Optional, Sequence
import numpy as np
import pandas as pd
from scipy.linalg import toeplitz
from ..api import AllTracker
__all__ = ["sim_data"]
__tracker = AllTracker(globals())
def sim_data(
n: int = 100,
intercept: float = -5,
two_way_coef: Optional[float] = None,
linear_vars: int = 10,
linear_var_coef: Optional[Sequence[float]] = None,
noise_vars: int = 0,
corr_vars: int = 0,
corr_type: str = "AR1",
corr_value: float = 0,
surg_err: float = 0.05,
bin_var_p: float = 0,
bin_coef: float = 0,
outcome: str = "classification",
regression_err: Optional[float] = None,
seed_val: int = 4763546,
) -> pd.DataFrame:
"""
Simulate data for classification or regression that includes an interaction between
two linear features, and some non-linear and linear features.
Noise variables, correlated variables that are not predictive and surrogate features
which are just derived from features that are predictive are also added.
This function is for the most part a direct translation of the ``twoClassSim``
function from the R package caret -- the option for an ordinal outcome and binary
outcome mis-labelling were omitted. Full credit for the approach used for simulating
binary classification data goes to the authors and contributors of caret
[`<NAME>. (2008). Caret package. Journal of Statistical Software, 28(5).
<https://rdrr.io/cran/caret/man/twoClassSim.html>`_]
Key modifications compared to the *R* implementation:
1. The ordinal outcome option has not been translated
2. Mis-labelling of the binary outcome has not been translated
3. The addition of a linear feature that is a copy of another used in the linear
predictor with a small amount of noise has been added to allow for the study
of variable surrogacy
4. Option for a binary predictor and surrogate has been added
5. Toggle option for regression versus classification has been added
6. Arguments for the coefficients of primary predictors of interest has been added
:param n: number of observations
:param intercept: value for the intercept which can be modified to generate class
imbalance
:param two_way_coef: list of three coefficients: two linear terms and an
interaction effect
:param linear_vars: number of linear features
:param linear_var_coef: an optional list of coefficients for linear features if
the default is not desired
:param noise_vars: number of unrelated independent noise features (do not
contribute to the linear predictor)
:param corr_vars: number of unrelated correlated noise features (do not contribute
to the linear predictor)
:param corr_type: type of correlation (exchangeable or auto-regressive) for
correlated noise features
:param corr_value: correlation for correlated noise features
:param surg_err: degree of noise added to first linear predictor
:param bin_var_p: prevalence for a binary feature to include in linear predictor
:param bin_coef: coefficient for the impact of binary feature on linear predictor
:param outcome: can be either classification for a binary outcome or regression
for a continuous outcome
:param regression_err: the error to be used in simulating a regression outcome
:param seed_val: set a seed for reproducibility
:return: data frame containing the simulated features and target for classification
"""
# set seed
np.random.seed(seed=seed_val)
# add two correlated normal features for use in creating an interaction term in the
# linear predictor
sigma = np.array([[2, 1.3], [1.3, 2]])
mu = [0, 0]
tmp_data = pd.DataFrame(
np.random.multivariate_normal(mu, sigma, size=n),
columns=["TwoFactor1", "TwoFactor2"],
)
# add independent linear features that contribute to the linear predictor
if linear_vars > 0:
lin_cols = ["Linear" + str(x) for x in range(1, linear_vars + 1)]
tmp_data = pd.concat(
[
tmp_data,
pd.DataFrame(np.random.normal(size=(n, linear_vars)), columns=lin_cols),
],
axis=1,
)
else:
lin_cols = None
# add non-linear features that contribute to the linear predictor
tmp_data["Nonlinear1"] = pd.Series(np.random.uniform(low=-1.0, high=1.0, size=n))
tmp_data = pd.concat(
[
tmp_data,
pd.DataFrame(
np.random.uniform(size=(n, 2)), columns=["Nonlinear2", "Nonlinear3"]
),
],
axis=1,
)
# add independent noise features that do not contribute to the linear predictor
if noise_vars > 0:
noise_cols = ["Noise" + str(x) for x in range(1, noise_vars + 1)]
tmp_data = pd.concat(
[
tmp_data,
pd.DataFrame(
np.random.normal(size=(n, noise_vars)), columns=noise_cols
),
],
axis=1,
)
# add correlated noise features that do not contribute to the linear predictor
if corr_vars > 0:
if corr_type == "exch":
vc = corr_value * np.ones((corr_vars, corr_vars))
np.fill_diagonal(vc, 1)
elif corr_type == "AR1":
vc_values = corr_value ** np.arange(corr_vars)
vc = toeplitz(vc_values)
else:
raise ValueError(
f'arg corr_type must be "exch" or "AR1", but got {repr(corr_type)}'
)
corr_cols = ["Corr" + str(x) for x in range(1, corr_vars + 1)]
tmp_data = pd.concat(
[
tmp_data,
pd.DataFrame(
np.random.multivariate_normal(np.zeros(corr_vars), vc, size=n),
columns=corr_cols,
),
],
axis=1,
)
# add a surrogate linear feature that does not contribute to the linear predictor
if linear_vars > 0:
tmp_data["Linear1_prime"] = tmp_data["Linear1"] + np.random.normal(
0, surg_err, size=n
)
# add a binary feature that contributes to the linear predictor
if bin_var_p > 0:
tmp_data["Binary1"] = np.where(np.random.uniform(size=n) <= bin_var_p, 0, 1)
# generate linear predictor
if two_way_coef is None:
two_way_coef = [4, 4, 2]
lp = (
intercept
- two_way_coef[0] * tmp_data.TwoFactor1
+ two_way_coef[1] * tmp_data.TwoFactor2
+ two_way_coef[2] * tmp_data.TwoFactor1 * tmp_data.TwoFactor2
+ tmp_data.Nonlinear1 ** 3
+ 2 * np.exp(-6 * (tmp_data.Nonlinear1 - 0.3) ** 2)
+ 2 * np.sin(np.pi * tmp_data.Nonlinear2 * tmp_data.Nonlinear3)
)
# add independent linear features to the linear predictor if required
if linear_vars > 0:
if linear_var_coef is None:
lin_coef = np.linspace(linear_vars, 1, num=linear_vars) / 4
neg_idx = list(range(1, linear_vars, 2))
lin_coef[neg_idx] *= -1
lp += tmp_data[lin_cols].dot(lin_coef)
elif linear_var_coef is not None:
if linear_vars != len(linear_var_coef):
raise ValueError(
"User defined linear feature coefficient list must be of length "
f"{linear_vars}"
)
lp += tmp_data[lin_cols].dot(linear_var_coef)
# add binary feature to the linear predictor if required
if bin_var_p > 0:
lp += bin_coef * tmp_data["Binary1"]
tmp_data["Binary1_prime"] = 1 - tmp_data["Binary1"]
# create classification outcome from linear predictor
if outcome == "classification":
# convert to a probability
prob = 1 / (1 + np.exp(-lp))
# generate target
tmp_data["target"] = np.where(prob <= np.random.uniform(size=n), 0, 1)
# create regression outcome
elif outcome == "regression":
# continuous outcome based on linear predictor
tmp_data["target"] = np.random.normal(lp, regression_err, size=n)
return tmp_data
__tracker.validate()
|
[
"numpy.random.normal",
"numpy.ones",
"numpy.random.multivariate_normal",
"numpy.fill_diagonal",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"scipy.linalg.toeplitz",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.sin",
"numpy.arange"
] |
[((3611, 3640), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'seed_val'}), '(seed=seed_val)\n', (3625, 3640), True, 'import numpy as np\n'), ((3765, 3795), 'numpy.array', 'np.array', (['[[2, 1.3], [1.3, 2]]'], {}), '([[2, 1.3], [1.3, 2]])\n', (3773, 3795), True, 'import numpy as np\n'), ((3849, 3897), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'sigma'], {'size': 'n'}), '(mu, sigma, size=n)\n', (3878, 3897), True, 'import numpy as np\n'), ((4476, 4521), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': 'n'}), '(low=-1.0, high=1.0, size=n)\n', (4493, 4521), True, 'import numpy as np\n'), ((5377, 5400), 'numpy.fill_diagonal', 'np.fill_diagonal', (['vc', '(1)'], {}), '(vc, 1)\n', (5393, 5400), True, 'import numpy as np\n'), ((6202, 6239), 'numpy.random.normal', 'np.random.normal', (['(0)', 'surg_err'], {'size': 'n'}), '(0, surg_err, size=n)\n', (6218, 6239), True, 'import numpy as np\n'), ((6838, 6895), 'numpy.sin', 'np.sin', (['(np.pi * tmp_data.Nonlinear2 * tmp_data.Nonlinear3)'], {}), '(np.pi * tmp_data.Nonlinear2 * tmp_data.Nonlinear3)\n', (6844, 6895), True, 'import numpy as np\n'), ((8192, 8236), 'numpy.random.normal', 'np.random.normal', (['lp', 'regression_err'], {'size': 'n'}), '(lp, regression_err, size=n)\n', (8208, 8236), True, 'import numpy as np\n'), ((4623, 4653), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, 2)'}), '(size=(n, 2))\n', (4640, 4653), True, 'import numpy as np\n'), ((5333, 5364), 'numpy.ones', 'np.ones', (['(corr_vars, corr_vars)'], {}), '((corr_vars, corr_vars))\n', (5340, 5364), True, 'import numpy as np\n'), ((5511, 5530), 'scipy.linalg.toeplitz', 'toeplitz', (['vc_values'], {}), '(vc_values)\n', (5519, 5530), False, 'from scipy.linalg import toeplitz\n'), ((6392, 6417), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (6409, 6417), True, 'import numpy as np\n'), ((6778, 6823), 'numpy.exp', 'np.exp', (['(-6 * (tmp_data.Nonlinear1 - 0.3) ** 2)'], {}), '(-6 * (tmp_data.Nonlinear1 - 0.3) ** 2)\n', (6784, 6823), True, 'import numpy as np\n'), ((7060, 7104), 'numpy.linspace', 'np.linspace', (['linear_vars', '(1)'], {'num': 'linear_vars'}), '(linear_vars, 1, num=linear_vars)\n', (7071, 7104), True, 'import numpy as np\n'), ((7921, 7932), 'numpy.exp', 'np.exp', (['(-lp)'], {}), '(-lp)\n', (7927, 7932), True, 'import numpy as np\n'), ((8007, 8032), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (8024, 8032), True, 'import numpy as np\n'), ((4227, 4266), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, linear_vars)'}), '(size=(n, linear_vars))\n', (4243, 4266), True, 'import numpy as np\n'), ((5042, 5080), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, noise_vars)'}), '(size=(n, noise_vars))\n', (5058, 5080), True, 'import numpy as np\n'), ((5473, 5493), 'numpy.arange', 'np.arange', (['corr_vars'], {}), '(corr_vars)\n', (5482, 5493), True, 'import numpy as np\n'), ((5896, 5915), 'numpy.zeros', 'np.zeros', (['corr_vars'], {}), '(corr_vars)\n', (5904, 5915), True, 'import numpy as np\n')]
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses.py."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from rlax._src import losses
class L2LossTest(parameterized.TestCase):
def setUp(self):
super(L2LossTest, self).setUp()
self.xs = jnp.array([-2, -1, -0.5, 0, 0.5, 1, 2])
self.ys = jnp.array([2., 0.5, 0.125, 0, 0.125, 0.5, 2.])
self.dys = jnp.array([-2, -1, -0.5, 0, 0.5, 1, 2])
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_l2_loss_scalar(self, compile_fn, place_fn):
# Optionally compile.
l2_loss = compile_fn(losses.l2_loss)
# Optionally convert to device array.
x = place_fn(jnp.array(0.5))
# Test output.
np.testing.assert_allclose(l2_loss(x), 0.125)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_l2_loss_vector(self, compile_fn, place_fn):
# Optionally compile.
l2_loss = compile_fn(losses.l2_loss)
# Optionally convert to device array.
xs = place_fn(self.xs)
# Test output.
np.testing.assert_allclose(l2_loss(xs), self.ys)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_l2_regularizer(self, compile_fn, place_fn):
# Optionally compile.
l2_loss = compile_fn(losses.l2_loss)
# Optionally convert to device array.
xs = place_fn(self.xs)
# Test output.
np.testing.assert_allclose(l2_loss(xs), l2_loss(xs, jnp.zeros_like(xs)))
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_gradients(self, compile_fn, place_fn):
# Optionally compile.
l2_loss = compile_fn(losses.l2_loss)
# Optionally convert to device array.
xs = place_fn(self.xs)
# Compute gradient in batch
batch_grad_func = jax.vmap(jax.grad(l2_loss), (0))
actual = batch_grad_func(xs)
np.testing.assert_allclose(actual, self.dys)
class LogLossTest(parameterized.TestCase):
def setUp(self):
super(LogLossTest, self).setUp()
self.preds = jnp.array([1., 1., 0., 0., 0.5, 0.5])
self.targets = jnp.array([1., 0., 0., 1., 1., 0])
self.expected = jnp.array([0., np.inf, 0., np.inf, 0.6931472, 0.6931472])
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_log_loss_scalar(self, compile_fn, place_fn):
# Optionally compile.
log_loss = compile_fn(losses.log_loss)
# Optionally convert to device array.
preds = place_fn(self.preds[2])
targets = place_fn(self.targets[2])
# Test output.
np.testing.assert_allclose(
log_loss(preds, targets), self.expected[2], atol=1e-4)
@parameterized.named_parameters(
('JitOnp', jax.jit, lambda t: t),
('NoJitOnp', lambda fn: fn, lambda t: t),
('JitJnp', jax.jit, jax.device_put),
('NoJitJnp', lambda fn: fn, jax.device_put))
def test_log_loss_vector(self, compile_fn, place_fn):
# Optionally compile.
log_loss = compile_fn(losses.log_loss)
# Optionally convert to device array.
preds = place_fn(self.preds)
targets = place_fn(self.targets)
# Test output.
np.testing.assert_allclose(
log_loss(preds, targets), self.expected, atol=1e-4)
if __name__ == '__main__':
absltest.main()
|
[
"numpy.testing.assert_allclose",
"absl.testing.absltest.main",
"absl.testing.parameterized.named_parameters",
"jax.numpy.array",
"jax.grad",
"jax.numpy.zeros_like"
] |
[((1173, 1371), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (1203, 1371), False, 'from absl.testing import parameterized\n'), ((1657, 1855), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (1687, 1855), False, 'from absl.testing import parameterized\n'), ((2138, 2336), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (2168, 2336), False, 'from absl.testing import parameterized\n'), ((2643, 2841), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (2673, 2841), False, 'from absl.testing import parameterized\n'), ((3505, 3703), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (3535, 3703), False, 'from absl.testing import parameterized\n'), ((4080, 4278), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('JitOnp', jax.jit, lambda t: t)", "('NoJitOnp', lambda fn: fn, lambda t: t)", "('JitJnp', jax.jit, jax.device_put)", "('NoJitJnp', lambda fn: fn, jax.device_put)"], {}), "(('JitOnp', jax.jit, lambda t: t), (\n 'NoJitOnp', lambda fn: fn, lambda t: t), ('JitJnp', jax.jit, jax.\n device_put), ('NoJitJnp', lambda fn: fn, jax.device_put))\n", (4110, 4278), False, 'from absl.testing import parameterized\n'), ((4673, 4688), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4686, 4688), False, 'from absl.testing import absltest\n'), ((1013, 1052), 'jax.numpy.array', 'jnp.array', (['[-2, -1, -0.5, 0, 0.5, 1, 2]'], {}), '([-2, -1, -0.5, 0, 0.5, 1, 2])\n', (1022, 1052), True, 'import jax.numpy as jnp\n'), ((1067, 1115), 'jax.numpy.array', 'jnp.array', (['[2.0, 0.5, 0.125, 0, 0.125, 0.5, 2.0]'], {}), '([2.0, 0.5, 0.125, 0, 0.125, 0.5, 2.0])\n', (1076, 1115), True, 'import jax.numpy as jnp\n'), ((1129, 1168), 'jax.numpy.array', 'jnp.array', (['[-2, -1, -0.5, 0, 0.5, 1, 2]'], {}), '([-2, -1, -0.5, 0, 0.5, 1, 2])\n', (1138, 1168), True, 'import jax.numpy as jnp\n'), ((3167, 3211), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', 'self.dys'], {}), '(actual, self.dys)\n', (3193, 3211), True, 'import numpy as np\n'), ((3331, 3372), 'jax.numpy.array', 'jnp.array', (['[1.0, 1.0, 0.0, 0.0, 0.5, 0.5]'], {}), '([1.0, 1.0, 0.0, 0.0, 0.5, 0.5])\n', (3340, 3372), True, 'import jax.numpy as jnp\n'), ((3388, 3427), 'jax.numpy.array', 'jnp.array', (['[1.0, 0.0, 0.0, 1.0, 1.0, 0]'], {}), '([1.0, 0.0, 0.0, 1.0, 1.0, 0])\n', (3397, 3427), True, 'import jax.numpy as jnp\n'), ((3443, 3502), 'jax.numpy.array', 'jnp.array', (['[0.0, np.inf, 0.0, np.inf, 0.6931472, 0.6931472]'], {}), '([0.0, np.inf, 0.0, np.inf, 0.6931472, 0.6931472])\n', (3452, 3502), True, 'import jax.numpy as jnp\n'), ((1568, 1582), 'jax.numpy.array', 'jnp.array', (['(0.5)'], {}), '(0.5)\n', (1577, 1582), True, 'import jax.numpy as jnp\n'), ((3106, 3123), 'jax.grad', 'jax.grad', (['l2_loss'], {}), '(l2_loss)\n', (3114, 3123), False, 'import jax\n'), ((2618, 2636), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['xs'], {}), '(xs)\n', (2632, 2636), True, 'import jax.numpy as jnp\n')]
|
#!/usr/bin/env python3
"""
Evolve network architecture on a classification dataset, while at the same time training the weights
with one of several learning algorithms.
"""
import joblib
import time
import torch.utils.data
import logging
import numpy as np
import copy
import os
import pickle
from networks import WeightLearningNetwork
from evolution import rank_by_dominance, reproduce_tournament
from datasets import load_preprocessed_dataset
from learning import train, test, train_and_evaluate, get_performance_value
import utils
# Set up parameters and output dir.
params = utils.load_params(mode='wlnn') # based on terminal input
params['script'] = 'run-wlnn-mnist.py'
writer, out_dir = utils.init_output(params, overwrite=params['overwrite_output'])
os.makedirs(os.path.join(out_dir, 'networks')) # dir to store all networks
if params['use_cuda'] and not torch.cuda.is_available():
logging.info('use_cuda was set but cuda is not available, running on cpu')
params['use_cuda'] = False
device = 'cuda' if params['use_cuda'] else 'cpu'
# Ensure deterministic computation.
utils.seed_all(0)
### Ensure that runs are reproducible even on GPU. Note, this slows down training!
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Load dataset.
train_images, train_labels, test_images, test_labels = load_preprocessed_dataset(
params['dataset'], flatten_images=True, use_torch=True)
train_dataset = torch.utils.data.TensorDataset(train_images, train_labels)
test_dataset = torch.utils.data.TensorDataset(test_images, test_labels)
# Create initial population.
# TODO: Make train_only_outputs a learning_rule.
train_only_outputs = (params['train_only_outputs'] or params['learning_rule'] == 'hebbian')
use_random_feedback = (params['learning_rule'] == 'feedback_alignment')
population = [
WeightLearningNetwork(params['num_inputs'], params['num_outputs'],
params['p_initial_connection_enabled'],
p_add_connection=params['p_add_connection'],
p_add_node=params['p_add_node'],
inherit_weights=params['inherit_weights'],
train_only_outputs=train_only_outputs,
use_random_feedback=use_random_feedback,
add_only_hidden_connections=True)
for _ in range(params['population_size'])]
# Add some nodes manually at the beginning.
for net in population:
for _ in range(net.get_num_connections()):
if np.random.rand() < 0.5:
net.add_node()
# Evaluate the networks before doing any evolution or learning.
for net in population:
net.create_torch_layers(device=device)
with joblib.Parallel(n_jobs=params['num_workers']) as parallel:
# Select champion based on training set for consistency with evolution loop.
objectives = parallel(joblib.delayed(test)(net, \
train_dataset, params, device=device) for net in population)
objectives = np.array(objectives)
rewards = -objectives[:, 0]
accs = objectives[:, 1]
best_index = rewards.argmax()
champion = {'net': copy.deepcopy(population[best_index]),
'reward': rewards[best_index],
'acc': accs[best_index],
'connections': population[best_index].get_num_connections()}
logging.info(f'Pre-evolution and training champion net on test set: '
f'reward: {champion["reward"]:.3f} '
f'(acc: {champion["acc"]:.3f})')
for net in population:
net.delete_torch_layers()
# Store the current champion network.
champion['net'].delete_torch_layers()
champion['net'].save(os.path.join(out_dir, 'champion_network.json'))
# Evolution loop.
generation = -1 # necessary for logging info when there are 0 generations
with joblib.Parallel(n_jobs=params['num_workers']) as parallel:
for generation in range(params['num_generations']):
start_time_generation = time.time()
# Evaluate fitness of all networks.
start_time_evaluation = time.time()
objectives = parallel(joblib.delayed(train_and_evaluate)(
net, train_dataset, test_dataset, params, verbose=0, save_net=(generation % 100 == 0),
filename=os.path.join(out_dir, 'networks', f'generation{generation}-net{i}.json'))
for i, net in enumerate(population))
objectives = np.array(objectives) # shape: population_size, 2
rewards = objectives[:, 0]
accs = objectives[:, 1]
complexities = np.array([net.get_num_connections() for net in population])
complexities = np.maximum(complexities, 1) # prevent 0 division
time_evaluation = time.time() - start_time_evaluation
# Pick best net from this generation (based on reward) and check
# if it's better than the previously observed best net (= champion).
start_time_champion_evaluation = time.time()
best_index = rewards.argmax()
if rewards[best_index] > champion['reward']:
# In contrast to run-wann-mnist.py, we don't have to check on the
# entire training set because the network was already evaluated on
# the complete set.
# TODO: Maybe train champion net on more epochs already here (it's
# done below right now) and compare against results of previous
# champion net. This would take quite a bit of time though because
# I probably need to do it at almost every generation.
champion = {'net': copy.deepcopy(population[best_index]),
'reward': rewards[best_index],
'acc': accs[best_index],
'connections': population[best_index].get_num_connections()}
# Save new champion net to file. Note that this net doesn't have weight_matrices when
# using multiple workers (weight_matrices is only created within the worker process).
champion['net'].delete_torch_layers()
champion['net'].save(os.path.join(out_dir, 'champion_network.json'))
time_champion_evaluation = time.time() - start_time_champion_evaluation
# Write metrics to log and tensorboard.
logging.info(f'{generation} - Best net: reward: {rewards[best_index]:.3f} '
f'(acc: {accs[best_index]:.3f}) - evaluation: {time_evaluation:.1f} s, '
f'champion evaluation: {time_champion_evaluation:.1f} s')
writer.add_scalar('best/reward', rewards[best_index], generation)
writer.add_scalar('best/acc', accs[best_index], generation)
if generation % 20 == 0:
if 'long_training_reward' not in champion:
# Train champion net for more epochs.
# TODO: Do this more elegantly. Maybe make an additional
# parameter num_epochs_long.
long_params = params.copy()
long_params['num_epochs'] = 10
champion['net'].create_torch_layers(device)
loss, acc = train(champion['net'], train_dataset, long_params, device=device)
champion['long_training_reward'] = - get_performance_value(loss, period='last_epoch')
champion['long_training_acc'] = get_performance_value(acc, period='last_epoch')
# Evaluate this long trained net on test set.
loss, acc = test(champion['net'], test_dataset, params, device=device)
champion['test_reward'] = -loss
champion['test_acc'] = acc
# Manually delete weight matrices, so they don't block memory
# (important on cuda).
champion['net'].delete_torch_layers()
utils.log_champion_info(champion)
utils.write_champion_info(writer, champion, generation)
utils.write_networks_stats(writer, population, generation)
utils.log_network_stats(population, writer, generation)
logging.info('')
# TODO: Is this necessary?
#writer.add_histogram('final_acc', accs, generation)
writer.add_histogram('population/acc', accs, generation)
writer.add_histogram('population/connections', [net.get_num_connections() for net
in population], generation)
# Store all accuracies and connections (for learning rate plots).
for i, (net, acc) in enumerate(zip(population, accs)):
writer.add_scalar(f'population/net{i}_acc', acc, generation)
writer.add_scalar(f'population/net{i}_connections', net.get_num_connections(), generation)
# Rank networks based on the evaluation metrics.
start_time_ranking = time.time()
# TODO: This is a dirty hack, I am using rewards for both mean_rewards
# and max_rewards for now. Think about how to make this better. Also,
# should maybe adapt parameters of how often complexity is used vs.
# reward.
ranks = rank_by_dominance(rewards, rewards, complexities,
p_complexity_objective=params['p_complexity_objective'])
time_ranking = time.time() - start_time_ranking
# Make new population by picking parent networks via tournament
# selection and mutating them.
start_time_reproduction = time.time()
new_population = reproduce_tournament(population, ranks, params['tournament_size'],
cull_ratio=params['cull_ratio'],
elite_ratio=params['elite_ratio'],
num_mutations=params['num_mutations_per_generation'])
population = new_population
time_reproduction = time.time() - start_time_reproduction
time_generation = time.time() - start_time_generation
writer.add_scalar('times/complete_generation', time_generation, generation)
writer.add_scalar('times/evaluation', time_evaluation, generation)
writer.add_scalar('times/champion_evaluation', time_champion_evaluation, generation)
writer.add_scalar('times/ranking', time_ranking, generation)
writer.add_scalar('times/reproduction', time_reproduction, generation)
# Log final results and close writer.
logging.info('\nResults at the end of evolution:')
utils.log_champion_info(champion)
utils.write_networks_stats(writer, population, generation)
utils.log_network_stats(population, writer, generation)
writer.close()
# Store performance summary.
utils.store_performance(objectives, out_dir=params['out_dir'])
|
[
"numpy.random.rand",
"numpy.array",
"copy.deepcopy",
"utils.init_output",
"utils.store_performance",
"logging.info",
"utils.write_champion_info",
"utils.log_champion_info",
"utils.load_params",
"numpy.maximum",
"learning.get_performance_value",
"learning.train",
"networks.WeightLearningNetwork",
"evolution.reproduce_tournament",
"utils.log_network_stats",
"time.time",
"utils.seed_all",
"os.path.join",
"evolution.rank_by_dominance",
"joblib.Parallel",
"learning.test",
"utils.write_networks_stats",
"joblib.delayed",
"datasets.load_preprocessed_dataset"
] |
[((582, 612), 'utils.load_params', 'utils.load_params', ([], {'mode': '"""wlnn"""'}), "(mode='wlnn')\n", (599, 612), False, 'import utils\n'), ((697, 760), 'utils.init_output', 'utils.init_output', (['params'], {'overwrite': "params['overwrite_output']"}), "(params, overwrite=params['overwrite_output'])\n", (714, 760), False, 'import utils\n'), ((1092, 1109), 'utils.seed_all', 'utils.seed_all', (['(0)'], {}), '(0)\n', (1106, 1109), False, 'import utils\n'), ((1348, 1434), 'datasets.load_preprocessed_dataset', 'load_preprocessed_dataset', (["params['dataset']"], {'flatten_images': '(True)', 'use_torch': '(True)'}), "(params['dataset'], flatten_images=True, use_torch\n =True)\n", (1373, 1434), False, 'from datasets import load_preprocessed_dataset\n'), ((3350, 3491), 'logging.info', 'logging.info', (['f"""Pre-evolution and training champion net on test set: reward: {champion[\'reward\']:.3f} (acc: {champion[\'acc\']:.3f})"""'], {}), '(\n f"Pre-evolution and training champion net on test set: reward: {champion[\'reward\']:.3f} (acc: {champion[\'acc\']:.3f})"\n )\n', (3362, 3491), False, 'import logging\n'), ((10369, 10422), 'logging.info', 'logging.info', (['"""\nResults at the end of evolution:"""'], {}), '("""\nResults at the end of evolution:""")\n', (10381, 10422), False, 'import logging\n'), ((10420, 10453), 'utils.log_champion_info', 'utils.log_champion_info', (['champion'], {}), '(champion)\n', (10443, 10453), False, 'import utils\n'), ((10454, 10512), 'utils.write_networks_stats', 'utils.write_networks_stats', (['writer', 'population', 'generation'], {}), '(writer, population, generation)\n', (10480, 10512), False, 'import utils\n'), ((10513, 10568), 'utils.log_network_stats', 'utils.log_network_stats', (['population', 'writer', 'generation'], {}), '(population, writer, generation)\n', (10536, 10568), False, 'import utils\n'), ((10614, 10676), 'utils.store_performance', 'utils.store_performance', (['objectives'], {'out_dir': "params['out_dir']"}), "(objectives, out_dir=params['out_dir'])\n", (10637, 10676), False, 'import utils\n'), ((773, 806), 'os.path.join', 'os.path.join', (['out_dir', '"""networks"""'], {}), "(out_dir, 'networks')\n", (785, 806), False, 'import os\n'), ((899, 973), 'logging.info', 'logging.info', (['"""use_cuda was set but cuda is not available, running on cpu"""'], {}), "('use_cuda was set but cuda is not available, running on cpu')\n", (911, 973), False, 'import logging\n'), ((1844, 2204), 'networks.WeightLearningNetwork', 'WeightLearningNetwork', (["params['num_inputs']", "params['num_outputs']", "params['p_initial_connection_enabled']"], {'p_add_connection': "params['p_add_connection']", 'p_add_node': "params['p_add_node']", 'inherit_weights': "params['inherit_weights']", 'train_only_outputs': 'train_only_outputs', 'use_random_feedback': 'use_random_feedback', 'add_only_hidden_connections': '(True)'}), "(params['num_inputs'], params['num_outputs'], params[\n 'p_initial_connection_enabled'], p_add_connection=params[\n 'p_add_connection'], p_add_node=params['p_add_node'], inherit_weights=\n params['inherit_weights'], train_only_outputs=train_only_outputs,\n use_random_feedback=use_random_feedback, add_only_hidden_connections=True)\n", (1865, 2204), False, 'from networks import WeightLearningNetwork\n'), ((2728, 2773), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': "params['num_workers']"}), "(n_jobs=params['num_workers'])\n", (2743, 2773), False, 'import joblib\n'), ((3008, 3028), 'numpy.array', 'np.array', (['objectives'], {}), '(objectives)\n', (3016, 3028), True, 'import numpy as np\n'), ((3667, 3713), 'os.path.join', 'os.path.join', (['out_dir', '"""champion_network.json"""'], {}), "(out_dir, 'champion_network.json')\n", (3679, 3713), False, 'import os\n'), ((3813, 3858), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': "params['num_workers']"}), "(n_jobs=params['num_workers'])\n", (3828, 3858), False, 'import joblib\n'), ((3146, 3183), 'copy.deepcopy', 'copy.deepcopy', (['population[best_index]'], {}), '(population[best_index])\n', (3159, 3183), False, 'import copy\n'), ((3960, 3971), 'time.time', 'time.time', ([], {}), '()\n', (3969, 3971), False, 'import time\n'), ((4049, 4060), 'time.time', 'time.time', ([], {}), '()\n', (4058, 4060), False, 'import time\n'), ((4409, 4429), 'numpy.array', 'np.array', (['objectives'], {}), '(objectives)\n', (4417, 4429), True, 'import numpy as np\n'), ((4632, 4659), 'numpy.maximum', 'np.maximum', (['complexities', '(1)'], {}), '(complexities, 1)\n', (4642, 4659), True, 'import numpy as np\n'), ((4936, 4947), 'time.time', 'time.time', ([], {}), '()\n', (4945, 4947), False, 'import time\n'), ((6257, 6465), 'logging.info', 'logging.info', (['f"""{generation} - Best net: reward: {rewards[best_index]:.3f} (acc: {accs[best_index]:.3f}) - evaluation: {time_evaluation:.1f} s, champion evaluation: {time_champion_evaluation:.1f} s"""'], {}), "(\n f'{generation} - Best net: reward: {rewards[best_index]:.3f} (acc: {accs[best_index]:.3f}) - evaluation: {time_evaluation:.1f} s, champion evaluation: {time_champion_evaluation:.1f} s'\n )\n", (6269, 6465), False, 'import logging\n'), ((8776, 8787), 'time.time', 'time.time', ([], {}), '()\n', (8785, 8787), False, 'import time\n'), ((9058, 9169), 'evolution.rank_by_dominance', 'rank_by_dominance', (['rewards', 'rewards', 'complexities'], {'p_complexity_objective': "params['p_complexity_objective']"}), "(rewards, rewards, complexities, p_complexity_objective=\n params['p_complexity_objective'])\n", (9075, 9169), False, 'from evolution import rank_by_dominance, reproduce_tournament\n'), ((9401, 9412), 'time.time', 'time.time', ([], {}), '()\n', (9410, 9412), False, 'import time\n'), ((9438, 9634), 'evolution.reproduce_tournament', 'reproduce_tournament', (['population', 'ranks', "params['tournament_size']"], {'cull_ratio': "params['cull_ratio']", 'elite_ratio': "params['elite_ratio']", 'num_mutations': "params['num_mutations_per_generation']"}), "(population, ranks, params['tournament_size'],\n cull_ratio=params['cull_ratio'], elite_ratio=params['elite_ratio'],\n num_mutations=params['num_mutations_per_generation'])\n", (9458, 9634), False, 'from evolution import rank_by_dominance, reproduce_tournament\n'), ((2541, 2557), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2555, 2557), True, 'import numpy as np\n'), ((4708, 4719), 'time.time', 'time.time', ([], {}), '()\n', (4717, 4719), False, 'import time\n'), ((6155, 6166), 'time.time', 'time.time', ([], {}), '()\n', (6164, 6166), False, 'import time\n'), ((7766, 7799), 'utils.log_champion_info', 'utils.log_champion_info', (['champion'], {}), '(champion)\n', (7789, 7799), False, 'import utils\n'), ((7812, 7867), 'utils.write_champion_info', 'utils.write_champion_info', (['writer', 'champion', 'generation'], {}), '(writer, champion, generation)\n', (7837, 7867), False, 'import utils\n'), ((7880, 7938), 'utils.write_networks_stats', 'utils.write_networks_stats', (['writer', 'population', 'generation'], {}), '(writer, population, generation)\n', (7906, 7938), False, 'import utils\n'), ((7954, 8009), 'utils.log_network_stats', 'utils.log_network_stats', (['population', 'writer', 'generation'], {}), '(population, writer, generation)\n', (7977, 8009), False, 'import utils\n'), ((8022, 8038), 'logging.info', 'logging.info', (['""""""'], {}), "('')\n", (8034, 8038), False, 'import logging\n'), ((9222, 9233), 'time.time', 'time.time', ([], {}), '()\n', (9231, 9233), False, 'import time\n'), ((9829, 9840), 'time.time', 'time.time', ([], {}), '()\n', (9838, 9840), False, 'import time\n'), ((9894, 9905), 'time.time', 'time.time', ([], {}), '()\n', (9903, 9905), False, 'import time\n'), ((2894, 2914), 'joblib.delayed', 'joblib.delayed', (['test'], {}), '(test)\n', (2908, 2914), False, 'import joblib\n'), ((5565, 5602), 'copy.deepcopy', 'copy.deepcopy', (['population[best_index]'], {}), '(population[best_index])\n', (5578, 5602), False, 'import copy\n'), ((6072, 6118), 'os.path.join', 'os.path.join', (['out_dir', '"""champion_network.json"""'], {}), "(out_dir, 'champion_network.json')\n", (6084, 6118), False, 'import os\n'), ((7074, 7139), 'learning.train', 'train', (["champion['net']", 'train_dataset', 'long_params'], {'device': 'device'}), "(champion['net'], train_dataset, long_params, device=device)\n", (7079, 7139), False, 'from learning import train, test, train_and_evaluate, get_performance_value\n'), ((7290, 7337), 'learning.get_performance_value', 'get_performance_value', (['acc'], {'period': '"""last_epoch"""'}), "(acc, period='last_epoch')\n", (7311, 7337), False, 'from learning import train, test, train_and_evaluate, get_performance_value\n'), ((7430, 7488), 'learning.test', 'test', (["champion['net']", 'test_dataset', 'params'], {'device': 'device'}), "(champion['net'], test_dataset, params, device=device)\n", (7434, 7488), False, 'from learning import train, test, train_and_evaluate, get_performance_value\n'), ((4091, 4125), 'joblib.delayed', 'joblib.delayed', (['train_and_evaluate'], {}), '(train_and_evaluate)\n', (4105, 4125), False, 'import joblib\n'), ((7193, 7241), 'learning.get_performance_value', 'get_performance_value', (['loss'], {'period': '"""last_epoch"""'}), "(loss, period='last_epoch')\n", (7214, 7241), False, 'from learning import train, test, train_and_evaluate, get_performance_value\n'), ((4247, 4319), 'os.path.join', 'os.path.join', (['out_dir', '"""networks"""', 'f"""generation{generation}-net{i}.json"""'], {}), "(out_dir, 'networks', f'generation{generation}-net{i}.json')\n", (4259, 4319), False, 'import os\n')]
|
import os
import sys
import numpy as np
import time
from utils_io import get_job_config
from utils.model_utils import read_data
from utils.args import parse_job_args
from fedsem import Fedsem_Trainer
from fedavg import Fedavg_Trainer
from fedprox import Fedprox_Trainer
from fedsgd import Fedsgd_Trainer
from fedbayes import Fedbayes_Sing_Trainer
from modelsaver import Model_Saver
def read_yamlconfig(args):
yaml_file = os.path.join("..", "configs", args.experiment, args.configuration)
job = get_job_config(yaml_file)
params = job['PARAMS']
rounds = params['num-rounds']
print("config rounds: ", rounds)
lr = params['lr']
print("config lr: ", lr )
epochs = params['epochs']
print("config epochs: ", epochs)
clients_per_round = params['clients-per-round']
print("config clients per round: ", clients_per_round)
return params
def main():
args = parse_job_args()
config = read_yamlconfig(args)
# changed 29/08/2021 the follow lines are for google cloud dir
base_dir = os.path.join(os.path.expanduser('~'), 'leaf')
train_data_dir = os.path.join(base_dir, 'data', args.dataset, 'data', 'train')
test_data_dir = os.path.join(base_dir, 'data', args.dataset, 'data', 'test')
users, groups, train_data, test_data = read_data(train_data_dir, test_data_dir)
exp_seeds, book_keep = config["exp-seeds"], [0.] * len(config["exp-seeds"])
for j, rnd_sed in enumerate(exp_seeds):
config["seed"] = rnd_sed
if args.experiment == 'fedavg':
trainer = Fedavg_Trainer(users, groups, train_data, test_data)
metric = trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'fedprox':
trainer = Fedprox_Trainer(users, groups, train_data, test_data)
metric = trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'fedcluster':
pass
elif args.experiment == 'fedsgd':
trainer = Fedsgd_Trainer(users, groups, train_data, test_data)
metric =trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'fedbayes':
trainer = Fedbayes_Sing_Trainer(users, groups, train_data, test_data)
metric =trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'modelsaver':
trainer = Model_Saver(users, groups, train_data, test_data)
metric = trainer.begins(config, args)
trainer.ends()
elif args.experiment == 'fedsem':
trainer = Fedsem_Trainer(users, groups, train_data, test_data)
metric = trainer.begins(config, args)
trainer.ends()
else:
print("Applications not defined. Please check configs directory if the name is right.")
break
book_keep[j] = metric
finals = np.array(book_keep) * 100
print(finals)
# print("{} runs - std: {}, med: {}".format(len(exp_seeds),
# np.var(finals),
# np.median(finals)))
main()
|
[
"fedbayes.Fedbayes_Sing_Trainer",
"fedprox.Fedprox_Trainer",
"os.path.join",
"utils.args.parse_job_args",
"fedsgd.Fedsgd_Trainer",
"modelsaver.Model_Saver",
"utils.model_utils.read_data",
"numpy.array",
"fedsem.Fedsem_Trainer",
"utils_io.get_job_config",
"fedavg.Fedavg_Trainer",
"os.path.expanduser"
] |
[((430, 496), 'os.path.join', 'os.path.join', (['""".."""', '"""configs"""', 'args.experiment', 'args.configuration'], {}), "('..', 'configs', args.experiment, args.configuration)\n", (442, 496), False, 'import os\n'), ((507, 532), 'utils_io.get_job_config', 'get_job_config', (['yaml_file'], {}), '(yaml_file)\n', (521, 532), False, 'from utils_io import get_job_config\n'), ((924, 940), 'utils.args.parse_job_args', 'parse_job_args', ([], {}), '()\n', (938, 940), False, 'from utils.args import parse_job_args\n'), ((1131, 1192), 'os.path.join', 'os.path.join', (['base_dir', '"""data"""', 'args.dataset', '"""data"""', '"""train"""'], {}), "(base_dir, 'data', args.dataset, 'data', 'train')\n", (1143, 1192), False, 'import os\n'), ((1213, 1273), 'os.path.join', 'os.path.join', (['base_dir', '"""data"""', 'args.dataset', '"""data"""', '"""test"""'], {}), "(base_dir, 'data', args.dataset, 'data', 'test')\n", (1225, 1273), False, 'import os\n'), ((1320, 1360), 'utils.model_utils.read_data', 'read_data', (['train_data_dir', 'test_data_dir'], {}), '(train_data_dir, test_data_dir)\n', (1329, 1360), False, 'from utils.model_utils import read_data\n'), ((1077, 1100), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1095, 1100), False, 'import os\n'), ((2969, 2988), 'numpy.array', 'np.array', (['book_keep'], {}), '(book_keep)\n', (2977, 2988), True, 'import numpy as np\n'), ((1590, 1642), 'fedavg.Fedavg_Trainer', 'Fedavg_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (1604, 1642), False, 'from fedavg import Fedavg_Trainer\n'), ((1785, 1838), 'fedprox.Fedprox_Trainer', 'Fedprox_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (1800, 1838), False, 'from fedprox import Fedprox_Trainer\n'), ((2043, 2095), 'fedsgd.Fedsgd_Trainer', 'Fedsgd_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (2057, 2095), False, 'from fedsgd import Fedsgd_Trainer\n'), ((2250, 2309), 'fedbayes.Fedbayes_Sing_Trainer', 'Fedbayes_Sing_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (2271, 2309), False, 'from fedbayes import Fedbayes_Sing_Trainer\n'), ((2454, 2503), 'modelsaver.Model_Saver', 'Model_Saver', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (2465, 2503), False, 'from modelsaver import Model_Saver\n'), ((2653, 2705), 'fedsem.Fedsem_Trainer', 'Fedsem_Trainer', (['users', 'groups', 'train_data', 'test_data'], {}), '(users, groups, train_data, test_data)\n', (2667, 2705), False, 'from fedsem import Fedsem_Trainer\n')]
|
import numpy as np
from touchstone.environments.make import make_vec_envs
NUM_ENVS = 2
if __name__ == '__main__':
env = make_vec_envs("Pendulum-v0", 42, NUM_ENVS)
np.random.seed(42)
state = env.reset()
for i in range(1000):
actions = env.action_space.sample()
out = env.step([actions for j in range(NUM_ENVS)])
# env.render()
assert abs(env.mean_reward_per_step) < 0.007
assert len(env.returns) == 1000
|
[
"touchstone.environments.make.make_vec_envs",
"numpy.random.seed"
] |
[((126, 168), 'touchstone.environments.make.make_vec_envs', 'make_vec_envs', (['"""Pendulum-v0"""', '(42)', 'NUM_ENVS'], {}), "('Pendulum-v0', 42, NUM_ENVS)\n", (139, 168), False, 'from touchstone.environments.make import make_vec_envs\n'), ((173, 191), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (187, 191), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the DLRM benchmark
#
# Utility function(s) to download and pre-process public data sets
# - Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
import os
import sys
from multiprocessing import Manager, Process
import numpy as np
def processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, pre_comp_counts):
# Process Kaggle Display Advertising Challenge or Terabyte Dataset
# by converting unicode strings in X_cat to integers and
# converting negative integer values in X_int.
#
# Loads data in the form "{kaggle|terabyte}_day_i.npz" where i is the day.
#
# Inputs:
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# i (int): splits in the dataset (typically 0 to 7 or 0 to 24)
# process data if not all files exist
filename_i = npzfile + "_{0}_processed.npz".format(i)
if os.path.exists(filename_i):
print("Using existing " + filename_i, end="\n")
else:
print("Not existing " + filename_i)
with np.load(npzfile + "_{0}.npz".format(i)) as data:
# Approach 2a: using pre-computed dictionaries
X_cat_t = np.zeros(data["X_cat_t"].shape)
for j in range(26):
for k, x in enumerate(data["X_cat_t"][j, :]):
X_cat_t[j, k] = convertDicts[j][x]
# continuous features
X_int = data["X_int"]
X_int[X_int < 0] = 0
# targets
y = data["y"]
np.savez_compressed(
filename_i,
# X_cat = X_cat,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=X_int,
y=y,
)
print("Processed " + filename_i, end="\n")
# sanity check (applicable only if counts have been pre-computed & are re-computed)
# for j in range(26):
# if pre_comp_counts[j] != counts[j]:
# sys.exit("ERROR: Sanity check on counts has failed")
# print("\nSanity check on counts passed")
return
def concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
o_filename
):
# Concatenates different days and saves the result.
#
# Inputs:
# days (int): total number of days in the dataset (typically 7 or 24)
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# o_filename (str): output file name
#
# Output:
# o_file (str): output file path
print("Concatenating multiple days into %s.npz file" % str(d_path + o_filename))
# load and concatenate data
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
if i == 0:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
else:
X_cat = np.concatenate((X_cat, data["X_cat"]))
X_int = np.concatenate((X_int, data["X_int"]))
y = np.concatenate((y, data["y"]))
print("Loaded day:", i, "y = 1:", len(y[y == 1]), "y = 0:", len(y[y == 0]))
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
print("Loaded counts!")
np.savez_compressed(
d_path + o_filename + ".npz",
X_cat=X_cat,
X_int=X_int,
y=y,
counts=counts,
)
return d_path + o_filename + ".npz"
def getCriteoAdData(
datafile,
o_filename,
max_ind_range=-1,
sub_sample_rate=0.0,
days=7,
data_split='train',
randomize='total',
dataset_multiprocessing=False,
):
# Passes through entire dataset and defines dictionaries for categorical
# features and determines the number of total categories.
#
# Inputs:
# datafile : path to downloaded raw data file
# o_filename (str): saves results under o_filename if filename is not ""
#
# Output:
# o_file (str): output file path
#split the datafile into path and filename
lstr = datafile.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0]
npzfile = d_path + ((d_file + "_day"))
trafile = d_path + ((d_file + "_fea"))
# count number of datapoints in training set
total_file = d_path + d_file + "_day_count.npz"
if os.path.exists(total_file):
with np.load(total_file) as data:
total_per_file = list(data["total_per_file"])
total_count = np.sum(total_per_file)
print("Skipping counts per file (already exist)")
else:
total_count = 0
total_per_file = []
# WARNING: The raw data consists of a single train.txt file
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
if os.path.exists(datafile):
print("Reading data from path=%s" % (datafile))
with open(str(datafile)) as f:
for _ in f:
total_count += 1
total_per_file.append(total_count)
# reset total per file due to split
num_data_per_split, extras = divmod(total_count, days)
total_per_file = [num_data_per_split] * days
for j in range(extras):
total_per_file[j] += 1
# split into days (simplifies code later on)
file_id = 0
boundary = total_per_file[file_id]
nf = open(npzfile + "_" + str(file_id), "w")
with open(str(datafile)) as f:
for j, line in enumerate(f):
if j == boundary:
nf.close()
file_id += 1
nf = open(npzfile + "_" + str(file_id), "w")
boundary += total_per_file[file_id]
nf.write(line)
nf.close()
else:
sys.exit("ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset")
# process a file worth of data and reinitialize data
# note that a file main contain a single or multiple splits
def process_one_file(
datfile,
npzfile,
split,
num_data_in_split,
dataset_multiprocessing,
convertDictsDay=None,
resultDay=None
):
if dataset_multiprocessing:
convertDicts_day = [{} for _ in range(26)]
with open(str(datfile)) as f:
y = np.zeros(num_data_in_split, dtype="i4") # 4 byte int
X_int = np.zeros((num_data_in_split, 13), dtype="i4") # 4 byte int
X_cat = np.zeros((num_data_in_split, 26), dtype="i4") # 4 byte int
if sub_sample_rate == 0.0:
rand_u = 1.0
else:
rand_u = np.random.uniform(low=0.0, high=1.0, size=num_data_in_split)
i = 0
percent = 0
for k, line in enumerate(f):
# process a line (data point)
line = line.split('\t')
# set missing values to zero
for j in range(len(line)):
if (line[j] == '') or (line[j] == '\n'):
line[j] = '0'
# sub-sample data by dropping zero targets, if needed
target = np.int32(line[0])
if target == 0 and \
(rand_u if sub_sample_rate == 0.0 else rand_u[k]) < sub_sample_rate:
continue
y[i] = target
X_int[i] = np.array(line[1:14], dtype=np.int32)
if max_ind_range > 0:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16) % max_ind_range, line[14:])),
dtype=np.int32
)
else:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16), line[14:])),
dtype=np.int32
)
# count uniques
if dataset_multiprocessing:
for j in range(26):
convertDicts_day[j][X_cat[i][j]] = 1
# debug prints
if float(i)/num_data_in_split*100 > percent+1:
percent = int(float(i)/num_data_in_split*100)
print(
"Load %d/%d (%d%%) Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
percent,
split,
target,
y[i],
),
end="\n",
)
else:
for j in range(26):
convertDicts[j][X_cat[i][j]] = 1
# debug prints
print(
"Load %d/%d Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
split,
target,
y[i],
),
end="\r",
)
i += 1
# store num_data_in_split samples or extras at the end of file
# count uniques
# X_cat_t = np.transpose(X_cat)
# for j in range(26):
# for x in X_cat_t[j,:]:
# convertDicts[j][x] = 1
# store parsed
filename_s = npzfile + "_{0}.npz".format(split)
if os.path.exists(filename_s):
print("\nSkip existing " + filename_s)
else:
np.savez_compressed(
filename_s,
X_int=X_int[0:i, :],
# X_cat=X_cat[0:i, :],
X_cat_t=np.transpose(X_cat[0:i, :]), # transpose of the data
y=y[0:i],
)
print("\nSaved " + npzfile + "_{0}.npz!".format(split))
if dataset_multiprocessing:
resultDay[split] = i
convertDictsDay[split] = convertDicts_day
return
else:
return i
# create all splits (reuse existing files if possible)
recreate_flag = False
convertDicts = [{} for _ in range(26)]
# WARNING: to get reproducable sub-sampling results you must reset the seed below
# np.random.seed(123)
# in this case there is a single split in each day
for i in range(days):
npzfile_i = npzfile + "_{0}.npz".format(i)
npzfile_p = npzfile + "_{0}_processed.npz".format(i)
if os.path.exists(npzfile_i):
print("Skip existing " + npzfile_i)
elif os.path.exists(npzfile_p):
print("Skip existing " + npzfile_p)
else:
recreate_flag = True
if recreate_flag:
if dataset_multiprocessing:
resultDay = Manager().dict()
convertDictsDay = Manager().dict()
processes = [Process(target=process_one_file,
name="process_one_file:%i" % i,
args=(npzfile + "_{0}".format(i),
npzfile,
i,
total_per_file[i],
dataset_multiprocessing,
convertDictsDay,
resultDay,
)
) for i in range(0, days)]
for process in processes:
process.start()
for process in processes:
process.join()
for day in range(days):
total_per_file[day] = resultDay[day]
print("Constructing convertDicts Split: {}".format(day))
convertDicts_tmp = convertDictsDay[day]
for i in range(26):
for j in convertDicts_tmp[i]:
convertDicts[i][j] = 1
else:
for i in range(days):
total_per_file[i] = process_one_file(
npzfile + "_{0}".format(i),
npzfile,
i,
total_per_file[i],
dataset_multiprocessing,
)
# report and save total into a file
total_count = np.sum(total_per_file)
if not os.path.exists(total_file):
np.savez_compressed(total_file, total_per_file=total_per_file)
print("Total number of samples:", total_count)
print("Divided into days/splits:\n", total_per_file)
# dictionary files
counts = np.zeros(26, dtype=np.int32)
if recreate_flag:
# create dictionaries
for j in range(26):
for i, x in enumerate(convertDicts[j]):
convertDicts[j][x] = i
dict_file_j = d_path + d_file + "_fea_dict_{0}.npz".format(j)
if not os.path.exists(dict_file_j):
np.savez_compressed(
dict_file_j,
unique=np.array(list(convertDicts[j]), dtype=np.int32)
)
counts[j] = len(convertDicts[j])
# store (uniques and) counts
count_file = d_path + d_file + "_fea_count.npz"
if not os.path.exists(count_file):
np.savez_compressed(count_file, counts=counts)
else:
# create dictionaries (from existing files)
for j in range(26):
with np.load(d_path + d_file + "_fea_dict_{0}.npz".format(j)) as data:
unique = data["unique"]
for i, x in enumerate(unique):
convertDicts[j][x] = i
# load (uniques and) counts
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
# process all splits
if dataset_multiprocessing:
processes = [Process(target=processCriteoAdData,
name="processCriteoAdData:%i" % i,
args=(d_path,
d_file,
npzfile,
i,
convertDicts,
counts,
)
) for i in range(0, days)]
for process in processes:
process.start()
for process in processes:
process.join()
else:
for i in range(days):
processCriteoAdData(d_path, d_file, npzfile, i, convertDicts, counts)
o_file = concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
o_filename
)
return o_file
|
[
"os.path.exists",
"numpy.transpose",
"multiprocessing.Process",
"numpy.int32",
"numpy.sum",
"numpy.zeros",
"numpy.random.uniform",
"numpy.array",
"numpy.concatenate",
"sys.exit",
"multiprocessing.Manager",
"numpy.savez_compressed",
"numpy.load"
] |
[((1153, 1179), 'os.path.exists', 'os.path.exists', (['filename_i'], {}), '(filename_i)\n', (1167, 1179), False, 'import os\n'), ((3617, 3716), 'numpy.savez_compressed', 'np.savez_compressed', (["(d_path + o_filename + '.npz')"], {'X_cat': 'X_cat', 'X_int': 'X_int', 'y': 'y', 'counts': 'counts'}), "(d_path + o_filename + '.npz', X_cat=X_cat, X_int=X_int,\n y=y, counts=counts)\n", (3636, 3716), True, 'import numpy as np\n'), ((4732, 4758), 'os.path.exists', 'os.path.exists', (['total_file'], {}), '(total_file)\n', (4746, 4758), False, 'import os\n'), ((13260, 13282), 'numpy.sum', 'np.sum', (['total_per_file'], {}), '(total_per_file)\n', (13266, 13282), True, 'import numpy as np\n'), ((13538, 13566), 'numpy.zeros', 'np.zeros', (['(26)'], {'dtype': 'np.int32'}), '(26, dtype=np.int32)\n', (13546, 13566), True, 'import numpy as np\n'), ((3499, 3542), 'numpy.load', 'np.load', (["(d_path + d_file + '_fea_count.npz')"], {}), "(d_path + d_file + '_fea_count.npz')\n", (3506, 3542), True, 'import numpy as np\n'), ((4882, 4904), 'numpy.sum', 'np.sum', (['total_per_file'], {}), '(total_per_file)\n', (4888, 4904), True, 'import numpy as np\n'), ((5306, 5330), 'os.path.exists', 'os.path.exists', (['datafile'], {}), '(datafile)\n', (5320, 5330), False, 'import os\n'), ((11441, 11466), 'os.path.exists', 'os.path.exists', (['npzfile_i'], {}), '(npzfile_i)\n', (11455, 11466), False, 'import os\n'), ((13294, 13320), 'os.path.exists', 'os.path.exists', (['total_file'], {}), '(total_file)\n', (13308, 13320), False, 'import os\n'), ((13330, 13392), 'numpy.savez_compressed', 'np.savez_compressed', (['total_file'], {'total_per_file': 'total_per_file'}), '(total_file, total_per_file=total_per_file)\n', (13349, 13392), True, 'import numpy as np\n'), ((1435, 1466), 'numpy.zeros', 'np.zeros', (["data['X_cat_t'].shape"], {}), "(data['X_cat_t'].shape)\n", (1443, 1466), True, 'import numpy as np\n'), ((3052, 3071), 'numpy.load', 'np.load', (['filename_i'], {}), '(filename_i)\n', (3059, 3071), True, 'import numpy as np\n'), ((4773, 4792), 'numpy.load', 'np.load', (['total_file'], {}), '(total_file)\n', (4780, 4792), True, 'import numpy as np\n'), ((6390, 6576), 'sys.exit', 'sys.exit', (['"""ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset"""'], {}), "(\n 'ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset'\n )\n", (6398, 6576), False, 'import sys\n'), ((7058, 7097), 'numpy.zeros', 'np.zeros', (['num_data_in_split'], {'dtype': '"""i4"""'}), "(num_data_in_split, dtype='i4')\n", (7066, 7097), True, 'import numpy as np\n'), ((7132, 7177), 'numpy.zeros', 'np.zeros', (['(num_data_in_split, 13)'], {'dtype': '"""i4"""'}), "((num_data_in_split, 13), dtype='i4')\n", (7140, 7177), True, 'import numpy as np\n'), ((7212, 7257), 'numpy.zeros', 'np.zeros', (['(num_data_in_split, 26)'], {'dtype': '"""i4"""'}), "((num_data_in_split, 26), dtype='i4')\n", (7220, 7257), True, 'import numpy as np\n'), ((10362, 10388), 'os.path.exists', 'os.path.exists', (['filename_s'], {}), '(filename_s)\n', (10376, 10388), False, 'import os\n'), ((11529, 11554), 'os.path.exists', 'os.path.exists', (['npzfile_p'], {}), '(npzfile_p)\n', (11543, 11554), False, 'import os\n'), ((14176, 14202), 'os.path.exists', 'os.path.exists', (['count_file'], {}), '(count_file)\n', (14190, 14202), False, 'import os\n'), ((14216, 14262), 'numpy.savez_compressed', 'np.savez_compressed', (['count_file'], {'counts': 'counts'}), '(count_file, counts=counts)\n', (14235, 14262), True, 'import numpy as np\n'), ((14607, 14650), 'numpy.load', 'np.load', (["(d_path + d_file + '_fea_count.npz')"], {}), "(d_path + d_file + '_fea_count.npz')\n", (14614, 14650), True, 'import numpy as np\n'), ((14775, 14907), 'multiprocessing.Process', 'Process', ([], {'target': 'processCriteoAdData', 'name': "('processCriteoAdData:%i' % i)", 'args': '(d_path, d_file, npzfile, i, convertDicts, counts)'}), "(target=processCriteoAdData, name='processCriteoAdData:%i' % i, args\n =(d_path, d_file, npzfile, i, convertDicts, counts))\n", (14782, 14907), False, 'from multiprocessing import Manager, Process\n'), ((1866, 1887), 'numpy.transpose', 'np.transpose', (['X_cat_t'], {}), '(X_cat_t)\n', (1878, 1887), True, 'import numpy as np\n'), ((3252, 3290), 'numpy.concatenate', 'np.concatenate', (["(X_cat, data['X_cat'])"], {}), "((X_cat, data['X_cat']))\n", (3266, 3290), True, 'import numpy as np\n'), ((3315, 3353), 'numpy.concatenate', 'np.concatenate', (["(X_int, data['X_int'])"], {}), "((X_int, data['X_int']))\n", (3329, 3353), True, 'import numpy as np\n'), ((3374, 3404), 'numpy.concatenate', 'np.concatenate', (["(y, data['y'])"], {}), "((y, data['y']))\n", (3388, 3404), True, 'import numpy as np\n'), ((7383, 7443), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': 'num_data_in_split'}), '(low=0.0, high=1.0, size=num_data_in_split)\n', (7400, 7443), True, 'import numpy as np\n'), ((7896, 7913), 'numpy.int32', 'np.int32', (['line[0]'], {}), '(line[0])\n', (7904, 7913), True, 'import numpy as np\n'), ((8126, 8162), 'numpy.array', 'np.array', (['line[1:14]'], {'dtype': 'np.int32'}), '(line[1:14], dtype=np.int32)\n', (8134, 8162), True, 'import numpy as np\n'), ((13831, 13858), 'os.path.exists', 'os.path.exists', (['dict_file_j'], {}), '(dict_file_j)\n', (13845, 13858), False, 'import os\n'), ((11734, 11743), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (11741, 11743), False, 'from multiprocessing import Manager, Process\n'), ((11781, 11790), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (11788, 11790), False, 'from multiprocessing import Manager, Process\n'), ((10644, 10671), 'numpy.transpose', 'np.transpose', (['X_cat[0:i, :]'], {}), '(X_cat[0:i, :])\n', (10656, 10671), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 10:06:05 2021
@author: ngbla
"""
import os
import cv2
import numpy as np
# Command: pip install pillow
from PIL import Image
#Initialize names and path to empty list
names = []
path = []
# Get the names of all the users
for users in os.listdir("img_training"):
names.append(users)
# Get the path to all the images
for name in names:
for image in os.listdir("img_training/{}".format(name)):
path_string = os.path.join("img_training/{}".format(name), image)
path.append(path_string)
faces = []
ids = []
# For each image create a numpy array and add it to faces list
for img_path in path:
image = Image.open(img_path).convert("L")
imgNp = np.array(image, "uint8")
id = int(img_path.split("/")[2].split("_")[0])
faces.append(imgNp)
ids.append(id)
# Convert the ids to numpy array and add it to ids list
ids = np.array(ids)
print("[INFO] Created faces and names Numpy Arrays")
print("[INFO] Initializing the Classifier")
# Make sure contrib is installed
# The command is pip install opencv-contrib-python
# Call the recognizer
trainer = cv2.face.LBPHFaceRecognizer_create()
#or use EigenFaceRecognizer by replacing above line with
#trainer = cv2.face.EigenFaceRecognizer_create()
#or use FisherFaceRecognizer by replacing above line with
#trainer = cv2.face.FisherFaceRecognizer_create() names Numpy Arrays")
# Give the faces and ids numpy arrays
trainer.train(faces, ids)
# Write the generated model to a yml file
trainer.write("training.yml")
#trainer.write("trainingEigen.yml")
print("[INFO] Training Done")
|
[
"numpy.array",
"os.listdir",
"cv2.face.LBPHFaceRecognizer_create",
"PIL.Image.open"
] |
[((313, 339), 'os.listdir', 'os.listdir', (['"""img_training"""'], {}), "('img_training')\n", (323, 339), False, 'import os\n'), ((940, 953), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (948, 953), True, 'import numpy as np\n'), ((1170, 1206), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (1204, 1206), False, 'import cv2\n'), ((756, 780), 'numpy.array', 'np.array', (['image', '"""uint8"""'], {}), "(image, 'uint8')\n", (764, 780), True, 'import numpy as np\n'), ((709, 729), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (719, 729), False, 'from PIL import Image\n')]
|
# -*- coding: utf-8 -*-
"""
Helper functions to organize CHDI imaging data
Created on Fri Jan 15 11:07:53 2016
@author: <NAME>
Python Version: Python 3.5.1 |Anaconda 2.4.1 (64-bit)
"""
import glob as gl
import pandas as pd
import numpy as np
import os
from functools import partial
def linear_pred(m,b,x):
y = m * x + b
return y
def scan_year(visit, studyid='TON'):
"""
Retrieve the year in which a scan was collected.
Parameters
----------
visit : str or int
Visit number
studyid: str, optional
Specifies the study from which files will be retrieved. Valid
values are 'THD' and 'TON'.
Returns
-------
sc_year : int
Actual scan year
"""
if type(visit) is str:
visit = int(visit[-1:])
if studyid == 'TON':
years = [2012, 2013, 2014]
else:
years = [2008, 2009, 2010, 2011]
sc_year = years[visit-1]
return sc_year
# Define root directories of every type of scan for each study (TON or THD)
# For TON: mritype = (0: unknown; 1: sMRI; 2:fMRI; 3: DTI)
# For THD: mritype = (0,4: DTI; 1,2,3: sMRI)
rootdir_per_scan_type = dict(TON={0: '',
3: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackOn_DTI/DTI'),
1: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackOn_sMRI'),
2: ('/data1/chdi_disks/Disk2/'
'IBM SOW3-part2/TrackOn/fMRI')},
THD={0: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/DTI'),
4: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/DTI'),
1: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/sMRI'),
2: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/sMRI'),
3: ('/data1/chdi_disks/Disk1/IBM_SOW3/'
'TRACK/Imaging_data/TrackHD/sMRI')})
#rootdir_per_scan_type = dict(TON={2: ('/data1/chdi_disks/Disk4/TRACKON')})
class Subject:
"""
Subject class that integrates all information about a subject (name,
visits, elegibility data, imaging folders, analyses that have been
performed) into a single object
Parameters
----------
subjid : str
Subject ID
general_df : `pandas.core.frame.DataFrame`
Dataframe loaded from general_ton.csv
mri_df : `pandas.core.frame.DataFrame`
Dataframe loaded from mri.csv
"""
def get_general_info(self, general_df, studyid='TON'):
"""
Retrieve general information about the subjects from general_ton.csv
Parameters
----------
general_df: `pandas.core.frame.DataFrame` or `dict`
Single dataframe with general csv file info or dictionary of
dataframes from general.csv and general_ton.csv
studyid: str, optional
Specifies the study from which files will be retrieved. Valid
values are 'THD' and 'TON'
"""
if isinstance(general_df, dict):
gen_df = general_df[studyid]
else:
gen_df = general_df
if gen_df[gen_df.subjid == self.subjid].shape[0] != 1:
raise ValueError(('The subject ID you requested ({}) is not'
'unique in the general database').
format(self.subjid))
sg = gen_df[gen_df.subjid == self.subjid].iloc[0]
self.studyid = studyid
# Generalized assignment of Subject group attribute (absent on THD)
if studyid == 'TON':
self.group = ['control', 'preHD'][sg.group-1]
self.inclusion_criteria = {'CAG_repeats': sg.ic4,
'disease_burden': sg.ic5,
'motorscores': sg.ic6,
'good_age': sg.ic7}
self.eligibility_criteria = sg[['ec1', 'ec2', 'ec3', 'ec4', 'ec5',
'ec6', 'ec7', 'ec8', 'ec9', 'ec10',
'ec11', 'ec12']].to_dict()
self.eligible = sg.eligible
hand_attr = 'handed'
else:
self.group = ['control', 'preHD', 'earlyHD'][sg.incl02]
self.inclusion_criteria = {'CAG_repeats': sg.incl02c,
'disease_burden': sg.incl02e,
'motorscores': sg.incl02g,
'good_age': not(sg.excl02 | sg.excl03)}
self.exclusion_criteria = sg[['excl01', 'excl04', 'excl05',
'excl06', 'excl07', 'excl08',
'excl09', 'excl10', 'excl11',
'excl12', 'excl13']].to_dict()
hand_attr = 'handness'
sg.fillna(value={hand_attr: 4}, inplace=True)
sg.fillna(value={'ethnic': 7}, inplace=True)
sg.fillna(value={'sex': np.nan}, inplace=True)
sg.fillna(value={'age': np.nan}, inplace=True)
ethnicity_dict = {1: 'caucassian', 11: 'african_black',
12: 'african_north', 13: 'asian_west',
14: 'asian_east', 15: 'mixed',
2: 'american_black', 3: 'american_latin',
6: 'other', 7: 'unknown'}
self.demographics = {'age': sg.age,
'sex': sg.sex,
'ethnicity': ethnicity_dict[sg.ethnic],
'handness': ['right', 'left', 'mixed', 'unknown']\
[int(getattr(sg, hand_attr)) - 1]}
def get_mri_info(self, mri_df):
"""
Retrieve scan-related information from mri.csv
Parameters
----------
mri_df : `pandas.core.frame.DataFrame`
Dataframe loaded from mri.csv
"""
temp = mri_df[mri_df.subjid == self.subjid].copy(deep=True)
if self.studyid == 'TON':
# For TON the dictionary is defined by `subtype` as reported in the
# document (augmented to include extra DTI scans -- blank on csv)
mri_type_dict = {1: 'T1W', 2: 'T1W Repeat', 3: 'T2W',
4: 'Resting State', 5: 'WM Task', 6: 'Motor Task',
7: 'Practice', 8: 'Field Map', 9: 'Generic',
10: 'NODDI', 11: 'CEST/MTR', 12: 'DTI'}
temp.fillna(value={'subytpe': 12}, inplace=True)
temp['subytpe'] = temp['subytpe'].astype(int)
else:
# For THD the dictionary is defined by inspection of `mritype` on
# the mri.csv spreadsheet. As consistent as possible with TON
mri_type_dict = {0: 'DTI', 1: 'T1W', 2: 'T1W Repeat', 3: 'T2W',
4: 'DTI'}
temp['subytpe'] = temp['mritype'].astype(int)
temp.replace({'subytpe': mri_type_dict}, inplace=True)
temp.set_index(['subytpe', 'visit'], inplace=True)
temp.index.set_names('subtype', level=0, inplace=True)
if not temp.index.is_lexsorted():
temp = temp.sort_index()
self.mri = temp
return
def get_subject_info(self, subj_df):
"""
Retrieve general information of a participant that was not compiled at
a specific visit, but rather once or in anually updated manner (from
subject.csv)
Parameters
----------
subj_df : `pandas.core.frame.DataFrame`
Dataframe loaded from subject.csv
"""
ss = subj_df[subj_df.subjid == self.subjid]
siteid = np.unique(ss.siteid.tolist())
if len(siteid) != 1:
raise ValueError(('Subject ID {} has different `site ids` for',
'TRACK and TrackOn').format(self.subjid))
self.siteid = siteid[0]
def __make_CAP_score_function(self, vd):
"""
Estimates the visit_day to CAP score visit transformation, given the
track_on_visit estimates of dbscore
Parameters
----------
vd : dict of `pandas.core.frame.DataFrame`
Dictionary of visit_ton dataframes
Returns
-------
CAP_dy_func: function
Function that takes the day of a TRACK/TON visit and returns a CAP
"""
tk = [vk for vk in sorted(vd.keys())
if (('ton' in vk) and (self.subjid in vd[vk].subjid.values))]
# tk = [df for if self.subjid in ] fix HERE
if len(tk) >=2:
dy_in = np.array([vd[vk][vd[vk]['subjid'] == self.subjid].visdy.iloc[0]
for vk in tk])
db_in = np.array([vd[vk][vd[vk]['subjid'] == self.subjid].dbscore.iloc[0]
for vk in tk])
try:
ok_idx = ~np.isnan(db_in)
x = dy_in[ok_idx]
m, b = np.linalg.lstsq(np.stack((x, np.ones(x.shape)),
axis=1), db_in[ok_idx])[0]
except:
m = b = np.nan
CAP_dy_func = partial(linear_pred, m, b)
return CAP_dy_func
def get_pheno_vars(self, pheno_df):
"""
Produces a datafram with phenotypic variables per visit
Parameters
----------
pheno_df : Pandas datafram
Dataframe with phenotypic variables (e.g., one provided by SOW4)
Returns
-------
CAP_dy_func: function
Function that takes the day of a TRACK/TON visit and returns a CAP
"""
df = pheno_df[pheno_df['subjid'] == self.subjid].copy(deep=True)
vd_to_vis = {dv: dk for dk, dv in self.visdy.items()}
df['visit'] = df['visdy'].replace(vd_to_vis)
df.set_index('visit', inplace=True)
self.pheno_df = df
def get_cog_motor_performance(self, visit_dict):
'''
'''
cog_motor_tasks = ['sdmt', 'stroop', 'paced_tap', 'indirect_circle_trace',
'map_search', 'cancelation', 'spot_change',
'mental_rotation', 'count_backwards', 'grip_var']
field_list = cog_motor_tasks + ['visdy', 'visit']
visits_used = self.visdy.keys()
visits_used.sort(key=lambda x: x[::-1])
all_vis_dfs = []
for v_idx, visit in enumerate(visits_used):
visit_df = visit_dict[visit]
fields_in_dict = [fn for fn in field_list if fn in visit_df.columns]
nan_fields = [fn for fn in field_list if fn not in visit_df.columns]
vis_dict = visit_df[visit_df['subjid'] == self.subjid].iloc[0][
fields_in_dict].to_dict()
for field in nan_fields:
vis_dict[field] = np.nan
vis_dict['visit'] = visit
vis_dict['v_idx'] = v_idx
all_vis_dfs.append(vis_dict)
out_df = pd.DataFrame(all_vis_dfs).set_index('v_idx')
self.cog_motor_performance = out_df
def __init__(self, subjid=None, general_df=None, mri_df=None,
subj_df=None, visit_df_dict=None, pheno_df=None,
studyid='TON'):
# Subject.all_subjects.append(self)
if subjid is not None:
self.subjid = subjid
if general_df is not None:
self.get_general_info(general_df, studyid)
if mri_df is not None:
self.get_mri_info(mri_df)
if subj_df is not None:
self.get_subject_info(subj_df)
if visit_df_dict is not None:
self.CAP_from_visdy = self.__make_CAP_score_function(visit_df_dict)
self.visdy = dict()
self.CAP = dict()
for vk, df in visit_df_dict.iteritems():
if self.subjid in df['subjid'].values:
vd = df[df['subjid'] == self.subjid]['visdy'].iloc[0]
self.visdy[vk] = vd
self.CAP[vk] = self.CAP_from_visdy(vd)
self.get_cog_motor_performance(visit_df_dict)
if pheno_df is not None:
self.get_pheno_vars(pheno_df)
#Continue here: make get_pheno_vars function, duplicate visdy col,
#rename it and apply inverse dictionary
def get_scan_dicom(self, mri_df=None, visit=None, subtype=None):
"""
Retrieve list of dicom filenames (single dicom filename for each
directory) where valid scans of the evaluated subject are located
Parameters
----------
mri_df : `pandas.core.frame.DataFrame`, optional
Dataframe loaded from mri.csv
visit : int, optional
Integer value that specifies the visit number
subtype : str, optional
String that defines the type of image being queried (e.g., "T1W").
For more infoirmation, please refer to
"TRACK-IDS-2015-10-R1-DataDictionary(1).pdf", section 4.15 (MRI)
Returns
-------
dcm_list : list
list of single dicom filenames from directories where valid
scans are located
"""
if 'DTI' in subtype:
if hasattr(subtype, 'extend'):
subtype.extend('Generic')
else:
subtype = [subtype, 'Generic']
if mri_df is None:
mri_df = self.mri
if visit is not None:
visit_str = 'Visit ' + str(visit)
else:
visit_str = None
idx = pd.IndexSlice
if not mri_df.index.is_lexsorted():
mri_df = mri_df.sort_index()
used_df = mri_df.loc[idx[subtype, visit_str], :]
dcm_list = []
#from IPython.terminal.debugger import TerminalPdb; TerminalPdb().set_trace()
for (scandy, mritype, subjid, scandesc,
scanid, scanstatus, this_vst) in zip(
used_df['scandy'], used_df['mritype'],
used_df['subjid'], used_df['scandesc'], used_df['scanid'],
used_df['scanstatus'],
used_df.index.get_level_values('visit')):
try:
scandesc = scandesc.replace(' ', '_')
except:
scandesc = 'NO_SCAN_DESCRIPTION'
dirlist = gl.glob('/'.join([rootdir_per_scan_type[self.studyid][mritype],
subjid, scandesc,
str(scan_year(this_vst,
self.studyid)) + '*',
'S' + str(scanid)]))
cond = dirlist and scanstatus == 1
if cond:
dcm_long_list = gl.glob('/'.join([dirlist[0], '*.dcm']))
cond = cond and dcm_long_list
if cond:
dcm_list.append(dcm_long_list[0])
else:
dcm_list.append('')
return dcm_list
def get_valid_visits(self, mri_df=None, subtype='T1W'):
"""
Retrieve list of visits associated to a given subject where valid scans
of a specific imaging subtype have been acquired. The output is an
array of valid subject/visit pairs.
Parameters
----------
mri_df : `pandas.core.frame.DataFrame`, optional
Dataframe loaded from mri.csv
subtype : str, optional
String that defines the type of image being queried. Default value
is 'T1W'. For more infoirmation, please refer to
"TRACK-IDS-2015-10-R1-DataDictionary(1).pdf", section 4.15 (MRI)
Returns
-------
subj_vst_array : ndarray
Aggregate array of valid subject/visit pairs
"""
subjects_list = []
visits_list = []
if mri_df is None:
mri_df = self.mri
if subtype in mri_df.index.get_level_values('subtype'):
used_df = mri_df.xs((subtype,), level=[0])
for (scandy, mritype, subjid, scandesc,
scanid, scanstatus, this_vst) in zip(
used_df['scandy'], used_df['mritype'], used_df['subjid'],
used_df['scandesc'], used_df['scanid'], used_df['scanstatus'],
used_df.index.get_level_values('visit')):
scandesc = scandesc.replace(' ', '_')
dirlist = gl.glob('/'.join([rootdir_per_scan_type[self.studyid][mritype],
subjid, scandesc,
str(scan_year(this_vst,
self.studyid)) + '*',
'S' + str(scanid)]))
cond = dirlist and scanstatus == 1
if cond:
dcm_long_list = gl.glob('/'.join([dirlist[0], '*.dcm']))
cond = cond and len(dcm_long_list) > 0
if cond:
subjects_list.append(subjid)
vst_nmb = int(this_vst.split(' ')[-1])
visits_list.append(vst_nmb)
# correct for redundant list of visits
inds = np.unique(visits_list, return_index=True)[1]
visits_list = np.array(visits_list)[inds].tolist()
subjects_list = np.array(subjects_list)[inds].tolist()
subj_vst_array = np.array([subjects_list, visits_list])
return subj_vst_array
def make_Track_ON_subjects(datadir, load_full_visit_forms=False):
"""
Create list and dict of subjects from TRACK-ON study,
together with relevant data frames.
Parameters
----------
datadir: str
Specifies directory with csv files.
load_full_visit_forms: bool, optional
Specifies if Visit_X.csv files should be loaded. Generally,
NOT recommmded since it makes things SLOW. Defaults to False.
Returns
-------
subject_list: list
Contains subject id strings.
subjects_dict: dict
Keys: subject id, values: Subject object.
gton: `pandas.core.frame.DataFrame`
Includes contents from general_ton.csv.
mri_TON: `pandas.core.frame.DataFrame`
Contains rows of mri.csv belonguing to the TRACK-ON study only.
"""
cvs_names = [file for file in os.listdir(datadir) if file.endswith('.csv')]
visit_ton_forms = ['visit1_ton', 'visit2_ton', 'visit3_ton']
visit_track_forms = ['visit1', 'visit2', 'visit3', 'visit4']
used_csv_list = ['general_ton', 'mri', 'subject']
THD_cog_motor_dict = {'sdmt_correct': 'sdmt',
'swr_correct': 'stroop',
'ptap_3hz_alltrials_self_intertapinterval_stddev': 'paced_tap',
'circle_indirect_alltrials_annulus_length': 'indirect_circle_trace',
'msearch_totalcorrect_1minute': 'map_search',
# 'cancelation': ''],
'spot_setsize5_k': 'spot_change',
'mrot_all_percentcor': 'mental_rotation',
'gripvarright': 'grip_var'}
#'count_backwards': ''}
TON_cog_motor_dict = {'sdmt_correct': 'sdmt',
'stroop_correct': 'stroop',
'ptap_3hz_all_self_iti_sd': 'paced_tap',
'circle_ind_all_annulus_l': 'indirect_circle_trace',
'msearch_totcorr_1min': 'map_search',
'cancel_digit_totalcorrect_90s': 'cancelation',
'spot_setsize5_k': 'spot_change',
'mrot_all_percentcor':'mental_rotation',
'circle_cnt_direct_totalnumber': 'count_backwards',
'lhx_gf_rx_cvf': 'grip_var'}
# if load_full_visit_forms:
# used_csv_list.extend(visit_ton_forms)
# used_csv_list.extend(visit_track_forms)
# Make a dictionary of dataframes, one for each csv file:
df_dict = {cvs_n.split('.')[0]: pd.read_csv(os.path.join(datadir, cvs_n),
sep='\t') for cvs_n in cvs_names
if cvs_n.split('.')[0] in used_csv_list}
pheno_fn = os.path.join(datadir, 'track_pheno_data.csv')
if os.path.isfile(pheno_fn):
df_dict['track_pheno_data'] = pd.read_csv(pheno_fn, sep=',')
if not load_full_visit_forms:
visit_df_dict = {}
for v_t in visit_track_forms:
csv_used = os.path.join(datadir, v_t + '.csv')
if v_t == 'visit1':
used_cols = ['subjid', 'studyid', 'visit',
'visdy','caglarger__value']
else:
used_cols = ['subjid', 'studyid', 'visit', 'visdy']
used_cols.extend(THD_cog_motor_dict.keys())
with open(csv_used,'r') as f:
head=f.readline()
cols_in_file = head.split('\t')
ok_cols = [col for col in used_cols if col in cols_in_file]
visit_df_dict[v_t] = pd.read_csv(csv_used, sep='\t',
usecols=ok_cols)
visit_df_dict[v_t].rename(columns=THD_cog_motor_dict,
inplace=True)
for visit_ton in visit_ton_forms:
csv_ton_used = os.path.join(datadir, visit_ton + '.csv')
used_cols = ['subjid', 'studyid', 'visdy', 'dbscore']
used_cols.extend(TON_cog_motor_dict.keys())
with open(csv_ton_used,'r') as f:
head=f.readline()
cols_in_file = head.split('\t')
ok_cols = [col for col in used_cols if col in cols_in_file]
visit_df_dict[visit_ton] = pd.read_csv(csv_ton_used,
sep='\t',
usecols=ok_cols)
visit_df_dict[visit_ton].rename(columns=TON_cog_motor_dict,
inplace=True)
else:
long_visit_list = visit_track_forms.extend(visit_ton_forms)
visit_df_dict = {cvs_n.split('.')[0]: pd.read_csv(os.path.join(datadir, cvs_n),
sep='\t') for cvs_n in cvs_names if cvs_n.split('.')[0]
in long_visit_list}
gton = df_dict['general_ton']
mri = df_dict['mri']
subj_df = df_dict['subject']
if 'track_pheno_data' in df_dict.keys():
pheno_df = df_dict['track_pheno_data']
else:
pheno_df = None
# visits_ton = {key: ton_df for key, ton_df in df_dict.iteritems()
# if key in visit_ton_forms}
mri_TON = mri[mri.studyid == 'TON'] # dframe with TRACK-ON scans only
subjects_dict = dict()
subject_list = list()
for subj_ix, subj_name in enumerate(gton['subjid']):
subjects_dict[subj_name] = Subject(subj_name, gton, mri_TON, subj_df,
visit_df_dict, pheno_df)
subject_list.append(subj_name)
return subject_list, subjects_dict, gton, mri_TON, visit_df_dict
def make_Track_subjects_subset(datadir, mristr=3., studyid='TON'):
"""
Create list and dict of subjects from a given TRACK study (Track HD or
Track On) and a specific field strength. It also outputs relevant data
frames.
Parameters
----------
datadir: str
Specifies directory with csv files.
mristr: float, optional
Specifies the field strength of the files to be retrieved.
studyid: str, optional
Specifies the study from which files will be retrieved. Valid values
are 'THD' and 'TON'.
Returns
-------
subject_list: list
Contains subject id strings.
subjects_dict: dict
Keys: subject id, values: Subject object.
gen_tk: `pandas.core.frame.DataFrame`
Includes contents from appropriate Track study general csv file.
mri_tk: `pandas.core.frame.DataFrame`
Contains rows of mri.csv associated to the specified Track study only.
"""
csv_names = [file for file in os.listdir(datadir) if file.endswith('.csv')]
used_csv_list = ['general_ton', 'general', 'mri', 'subject']
# Make a dictionary of dataframes, one for each csv file:
df_dict = {cvs_n.split('.')[0]: pd.read_csv(os.path.join(datadir, cvs_n),
sep='\t') for cvs_n in csv_names if cvs_n.split('.')[0]
in used_csv_list}
gen_tk = {key: df_dict[key] for key in used_csv_list[:2]}
gen_tk['TON'] = gen_tk.pop('general_ton')
gen_tk['THD'] = gen_tk.pop('general')
mri_tk = df_dict['mri']
# Retrieve info only from defined study of interest and field of strength
mri_tk = mri_tk[mri_tk.studyid == studyid]
mri_tk = mri_tk[mri_tk.mristr == mristr]
subj_df = df_dict['subject']
subjects_dict = dict()
subject_list = list()
subjects_ids = np.unique(mri_tk['subjid'])
for subj_name in subjects_ids:
subjects_dict[subj_name] = Subject(subj_name, gen_tk, mri_tk,
subj_df, studyid=studyid)
subject_list.append(subj_name)
return subject_list, subjects_dict, gen_tk, mri_tk
def make_Track_ON_subjs_n_visits(datadir, subtype='T1W'):
"""
Retrieve list of visits for which valid scans of an imaging subtype exist.
This search is performed for all subjects listed in `mri.csv`. The output
is an array of valid subject/visit pairs.
Parameters
----------
datadir: str
Specifies directory with csv files.
subtype : str, optional
String that defines the type of image being queried. Default value
is 'T1W'. For more information, please refer to
"TRACK-IDS-2015-10-R1-DataDictionary(1).pdf", section 4.15 (MRI)
Returns
-------
subj_vst_array : ndarray
Aggregate array of valid subject/visit pairs
"""
csv_names = [file for file in os.listdir(datadir) if file.endswith('.csv')]
used_csv_list = ['general_ton', 'mri', 'subject']
# Make a dictionary of dataframes, one for each csv file:
df_dict = {csv_n.split('.')[0]: pd.read_csv(os.path.join(datadir, csv_n),
sep='\t') for csv_n in csv_names if csv_n.split('.')[0]
in used_csv_list}
gton = df_dict['general_ton']
mri = df_dict['mri']
subj_df = df_dict['subject']
mri_TON = mri[mri.studyid == 'TON'] # dframe with TRACK-ON scans only
subj_visit_array = np.array([])
subjects_ids = np.unique(mri_TON['subjid'])
for subj_name in subjects_ids:
subj_obj = Subject(subj_name, gton, mri_TON, subj_df)
if subj_visit_array.size == 0:
subj_visit_array = subj_obj.get_valid_visits(subtype=subtype)
else:
new_subj_visit = subj_obj.get_valid_visits(subtype=subtype)
if new_subj_visit.size > 0:
subj_visit_array = np.concatenate((subj_visit_array,
new_subj_visit), axis=1)
return subj_visit_array
|
[
"os.listdir",
"numpy.unique",
"pandas.read_csv",
"numpy.ones",
"os.path.join",
"os.path.isfile",
"numpy.array",
"functools.partial",
"numpy.isnan",
"numpy.concatenate",
"pandas.DataFrame"
] |
[((20722, 20767), 'os.path.join', 'os.path.join', (['datadir', '"""track_pheno_data.csv"""'], {}), "(datadir, 'track_pheno_data.csv')\n", (20734, 20767), False, 'import os\n'), ((20775, 20799), 'os.path.isfile', 'os.path.isfile', (['pheno_fn'], {}), '(pheno_fn)\n', (20789, 20799), False, 'import os\n'), ((25366, 25393), 'numpy.unique', 'np.unique', (["mri_tk['subjid']"], {}), "(mri_tk['subjid'])\n", (25375, 25393), True, 'import numpy as np\n'), ((26941, 26953), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (26949, 26953), True, 'import numpy as np\n'), ((26973, 27001), 'numpy.unique', 'np.unique', (["mri_TON['subjid']"], {}), "(mri_TON['subjid'])\n", (26982, 27001), True, 'import numpy as np\n'), ((9556, 9582), 'functools.partial', 'partial', (['linear_pred', 'm', 'b'], {}), '(linear_pred, m, b)\n', (9563, 9582), False, 'from functools import partial\n'), ((17877, 17915), 'numpy.array', 'np.array', (['[subjects_list, visits_list]'], {}), '([subjects_list, visits_list])\n', (17885, 17915), True, 'import numpy as np\n'), ((20839, 20869), 'pandas.read_csv', 'pd.read_csv', (['pheno_fn'], {'sep': '""","""'}), "(pheno_fn, sep=',')\n", (20850, 20869), True, 'import pandas as pd\n'), ((9027, 9105), 'numpy.array', 'np.array', (["[vd[vk][vd[vk]['subjid'] == self.subjid].visdy.iloc[0] for vk in tk]"], {}), "([vd[vk][vd[vk]['subjid'] == self.subjid].visdy.iloc[0] for vk in tk])\n", (9035, 9105), True, 'import numpy as np\n'), ((9156, 9241), 'numpy.array', 'np.array', (["[vd[vk][vd[vk]['subjid'] == self.subjid].dbscore.iloc[0] for vk in tk]"], {}), "([vd[vk][vd[vk]['subjid'] == self.subjid].dbscore.iloc[0] for vk in tk]\n )\n", (9164, 9241), True, 'import numpy as np\n'), ((18795, 18814), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (18805, 18814), False, 'import os\n'), ((20573, 20601), 'os.path.join', 'os.path.join', (['datadir', 'cvs_n'], {}), '(datadir, cvs_n)\n', (20585, 20601), False, 'import os\n'), ((20992, 21027), 'os.path.join', 'os.path.join', (['datadir', "(v_t + '.csv')"], {}), "(datadir, v_t + '.csv')\n", (21004, 21027), False, 'import os\n'), ((21544, 21592), 'pandas.read_csv', 'pd.read_csv', (['csv_used'], {'sep': '"""\t"""', 'usecols': 'ok_cols'}), "(csv_used, sep='\\t', usecols=ok_cols)\n", (21555, 21592), True, 'import pandas as pd\n'), ((21831, 21872), 'os.path.join', 'os.path.join', (['datadir', "(visit_ton + '.csv')"], {}), "(datadir, visit_ton + '.csv')\n", (21843, 21872), False, 'import os\n'), ((22231, 22283), 'pandas.read_csv', 'pd.read_csv', (['csv_ton_used'], {'sep': '"""\t"""', 'usecols': 'ok_cols'}), "(csv_ton_used, sep='\\t', usecols=ok_cols)\n", (22242, 22283), True, 'import pandas as pd\n'), ((24558, 24577), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (24568, 24577), False, 'import os\n'), ((24779, 24807), 'os.path.join', 'os.path.join', (['datadir', 'cvs_n'], {}), '(datadir, cvs_n)\n', (24791, 24807), False, 'import os\n'), ((26407, 26426), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (26417, 26426), False, 'import os\n'), ((26617, 26645), 'os.path.join', 'os.path.join', (['datadir', 'csv_n'], {}), '(datadir, csv_n)\n', (26629, 26645), False, 'import os\n'), ((9303, 9318), 'numpy.isnan', 'np.isnan', (['db_in'], {}), '(db_in)\n', (9311, 9318), True, 'import numpy as np\n'), ((11356, 11381), 'pandas.DataFrame', 'pd.DataFrame', (['all_vis_dfs'], {}), '(all_vis_dfs)\n', (11368, 11381), True, 'import pandas as pd\n'), ((17676, 17717), 'numpy.unique', 'np.unique', (['visits_list'], {'return_index': '(True)'}), '(visits_list, return_index=True)\n', (17685, 17717), True, 'import numpy as np\n'), ((22653, 22681), 'os.path.join', 'os.path.join', (['datadir', 'cvs_n'], {}), '(datadir, cvs_n)\n', (22665, 22681), False, 'import os\n'), ((27373, 27431), 'numpy.concatenate', 'np.concatenate', (['(subj_visit_array, new_subj_visit)'], {'axis': '(1)'}), '((subj_visit_array, new_subj_visit), axis=1)\n', (27387, 27431), True, 'import numpy as np\n'), ((17747, 17768), 'numpy.array', 'np.array', (['visits_list'], {}), '(visits_list)\n', (17755, 17768), True, 'import numpy as np\n'), ((17812, 17835), 'numpy.array', 'np.array', (['subjects_list'], {}), '(subjects_list)\n', (17820, 17835), True, 'import numpy as np\n'), ((9397, 9413), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (9404, 9413), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
import yt
from hyperion.model import Model
import matplotlib as mpl
mpl.use('Agg')
import powderday.config as cfg
from powderday.grid_construction import yt_octree_generate
from powderday.find_order import find_order
import powderday.powderday_test_octree as pto
import powderday.hyperion_octree_stats as hos
from hyperion.dust import SphericalDust
from powderday.helpers import energy_density_absorbed_by_CMB
from powderday.analytics import dump_cell_info
def sph_m_gen(fname,field_add):
refined,dustdens,fc1,fw1,reg,ds = yt_octree_generate(fname,field_add)
if yt.__version__ == '4.0.dev0':
xmin = (fc1[:,0]-fw1[:,0]/2.).to('cm') #in proper cm
xmax = (fc1[:,0]+fw1[:,0]/2.).to('cm')
ymin = (fc1[:,1]-fw1[:,1]/2.).to('cm')
ymax = (fc1[:,1]+fw1[:,1]/2.).to('cm')
zmin = (fc1[:,2]-fw1[:,2]/2.).to('cm')
zmax = (fc1[:,2]+fw1[:,2]/2.).to('cm')
else:
xmin = (fc1[:,0]-fw1[:,0]/2.).convert_to_units('cm') #in proper cm
xmax = (fc1[:,0]+fw1[:,0]/2.).convert_to_units('cm')
ymin = (fc1[:,1]-fw1[:,1]/2.).convert_to_units('cm')
ymax = (fc1[:,1]+fw1[:,1]/2.).convert_to_units('cm')
zmin = (fc1[:,2]-fw1[:,2]/2.).convert_to_units('cm')
zmax = (fc1[:,2]+fw1[:,2]/2.).convert_to_units('cm')
#dx,dy,dz are the edges of the parent grid
dx = (np.max(xmax)-np.min(xmin)).value
dy = (np.max(ymax)-np.min(ymin)).value
dz = (np.max(zmax)-np.min(zmin)).value
xcent = float(ds.quan(cfg.model.x_cent,"code_length").to('cm').value)
ycent = float(ds.quan(cfg.model.y_cent,"code_length").to('cm').value)
zcent = float(ds.quan(cfg.model.z_cent,"code_length").to('cm').value)
boost = np.array([xcent,ycent,zcent])
print ('[sph_tributary] boost = ',boost)
print ('[sph_tributary] xmin (pc)= ',np.min(xmin.to('pc')))
print ('[sph_tributary] xmax (pc)= ',np.max(xmax.to('pc')))
print ('[sph_tributary] ymin (pc)= ',np.min(ymin.to('pc')))
print ('[sph_tributary] ymax (pc)= ',np.max(ymax.to('pc')))
print ('[sph_tributary] zmin (pc)= ',np.min(zmin.to('pc')))
print ('[sph_tributary] zmax (pc)= ',np.max(zmax.to('pc')))
#<NAME>'s conversion from z-first ordering (yt's default) to
#x-first ordering (the script should work both ways)
refined_array = np.array(refined)
refined_array = np.squeeze(refined_array)
order = find_order(refined_array)
refined_reordered = []
dustdens_reordered = np.zeros(len(order))
for i in range(len(order)):
refined_reordered.append(refined[order[i]])
dustdens_reordered[i] = dustdens[order[i]]
refined = refined_reordered
dustdens=dustdens_reordered
#hyperion octree stats
max_level = hos.hyperion_octree_stats(refined)
pto.test_octree(refined,max_level)
dump_cell_info(refined,fc1,fw1,xmin,xmax,ymin,ymax,zmin,zmax)
np.save('refined.npy',refined)
np.save('density.npy',dustdens)
#========================================================================
#Initialize Hyperion Model
#========================================================================
m = Model()
print ('Setting Octree Grid with Parameters: ')
#m.set_octree_grid(xcent,ycent,zcent,
# dx,dy,dz,refined)
m.set_octree_grid(0,0,0,dx/2,dy/2,dz/2,refined)
#get CMB:
energy_density_absorbed=energy_density_absorbed_by_CMB()
specific_energy = np.repeat(energy_density_absorbed.value,dustdens.shape)
if cfg.par.PAH == True:
# load PAH fractions for usg, vsg, and big (grain sizes)
frac = cfg.par.PAH_frac
# Normalize to 1
total = np.sum(list(frac.values()))
frac = {k: v / total for k, v in frac.items()}
for size in frac.keys():
d = SphericalDust(cfg.par.dustdir+'%s.hdf5'%size)
if cfg.par.SUBLIMATION == True:
d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
#m.add_density_grid(dustdens * frac[size], cfg.par.dustdir+'%s.hdf5' % size)
m.add_density_grid(dustdens*frac[size],d,specific_energy=specific_energy)
m.set_enforce_energy_range(cfg.par.enforce_energy_range)
else:
d = SphericalDust(cfg.par.dustdir+cfg.par.dustfile)
if cfg.par.SUBLIMATION == True:
d.set_sublimation_temperature('fast',temperature=cfg.par.SUBLIMATION_TEMPERATURE)
m.add_density_grid(dustdens,d,specific_energy=specific_energy)
#m.add_density_grid(dustdens,cfg.par.dustdir+cfg.par.dustfile)
m.set_specific_energy_type('additional')
return m,xcent,ycent,zcent,dx,dy,dz,reg,ds,boost
|
[
"powderday.helpers.energy_density_absorbed_by_CMB",
"numpy.repeat",
"powderday.find_order.find_order",
"matplotlib.use",
"hyperion.model.Model",
"hyperion.dust.SphericalDust",
"numpy.squeeze",
"numpy.max",
"numpy.array",
"powderday.powderday_test_octree.test_octree",
"numpy.min",
"powderday.hyperion_octree_stats.hyperion_octree_stats",
"powderday.analytics.dump_cell_info",
"numpy.save",
"powderday.grid_construction.yt_octree_generate"
] |
[((126, 140), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (133, 140), True, 'import matplotlib as mpl\n'), ((596, 632), 'powderday.grid_construction.yt_octree_generate', 'yt_octree_generate', (['fname', 'field_add'], {}), '(fname, field_add)\n', (614, 632), False, 'from powderday.grid_construction import yt_octree_generate\n'), ((1775, 1806), 'numpy.array', 'np.array', (['[xcent, ycent, zcent]'], {}), '([xcent, ycent, zcent])\n', (1783, 1806), True, 'import numpy as np\n'), ((2377, 2394), 'numpy.array', 'np.array', (['refined'], {}), '(refined)\n', (2385, 2394), True, 'import numpy as np\n'), ((2415, 2440), 'numpy.squeeze', 'np.squeeze', (['refined_array'], {}), '(refined_array)\n', (2425, 2440), True, 'import numpy as np\n'), ((2458, 2483), 'powderday.find_order.find_order', 'find_order', (['refined_array'], {}), '(refined_array)\n', (2468, 2483), False, 'from powderday.find_order import find_order\n'), ((2815, 2849), 'powderday.hyperion_octree_stats.hyperion_octree_stats', 'hos.hyperion_octree_stats', (['refined'], {}), '(refined)\n', (2840, 2849), True, 'import powderday.hyperion_octree_stats as hos\n'), ((2856, 2891), 'powderday.powderday_test_octree.test_octree', 'pto.test_octree', (['refined', 'max_level'], {}), '(refined, max_level)\n', (2871, 2891), True, 'import powderday.powderday_test_octree as pto\n'), ((2896, 2965), 'powderday.analytics.dump_cell_info', 'dump_cell_info', (['refined', 'fc1', 'fw1', 'xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'], {}), '(refined, fc1, fw1, xmin, xmax, ymin, ymax, zmin, zmax)\n', (2910, 2965), False, 'from powderday.analytics import dump_cell_info\n'), ((2962, 2993), 'numpy.save', 'np.save', (['"""refined.npy"""', 'refined'], {}), "('refined.npy', refined)\n", (2969, 2993), True, 'import numpy as np\n'), ((2997, 3029), 'numpy.save', 'np.save', (['"""density.npy"""', 'dustdens'], {}), "('density.npy', dustdens)\n", (3004, 3029), True, 'import numpy as np\n'), ((3231, 3238), 'hyperion.model.Model', 'Model', ([], {}), '()\n', (3236, 3238), False, 'from hyperion.model import Model\n'), ((3488, 3520), 'powderday.helpers.energy_density_absorbed_by_CMB', 'energy_density_absorbed_by_CMB', ([], {}), '()\n', (3518, 3520), False, 'from powderday.helpers import energy_density_absorbed_by_CMB\n'), ((3543, 3599), 'numpy.repeat', 'np.repeat', (['energy_density_absorbed.value', 'dustdens.shape'], {}), '(energy_density_absorbed.value, dustdens.shape)\n', (3552, 3599), True, 'import numpy as np\n'), ((4359, 4408), 'hyperion.dust.SphericalDust', 'SphericalDust', (['(cfg.par.dustdir + cfg.par.dustfile)'], {}), '(cfg.par.dustdir + cfg.par.dustfile)\n', (4372, 4408), False, 'from hyperion.dust import SphericalDust\n'), ((1419, 1431), 'numpy.max', 'np.max', (['xmax'], {}), '(xmax)\n', (1425, 1431), True, 'import numpy as np\n'), ((1432, 1444), 'numpy.min', 'np.min', (['xmin'], {}), '(xmin)\n', (1438, 1444), True, 'import numpy as np\n'), ((1462, 1474), 'numpy.max', 'np.max', (['ymax'], {}), '(ymax)\n', (1468, 1474), True, 'import numpy as np\n'), ((1475, 1487), 'numpy.min', 'np.min', (['ymin'], {}), '(ymin)\n', (1481, 1487), True, 'import numpy as np\n'), ((1505, 1517), 'numpy.max', 'np.max', (['zmax'], {}), '(zmax)\n', (1511, 1517), True, 'import numpy as np\n'), ((1518, 1530), 'numpy.min', 'np.min', (['zmin'], {}), '(zmin)\n', (1524, 1530), True, 'import numpy as np\n'), ((3909, 3958), 'hyperion.dust.SphericalDust', 'SphericalDust', (["(cfg.par.dustdir + '%s.hdf5' % size)"], {}), "(cfg.par.dustdir + '%s.hdf5' % size)\n", (3922, 3958), False, 'from hyperion.dust import SphericalDust\n')]
|
import numpy as np
from lib.lif import LIF, ParamsLIF
from lib.causal import causaleffect
#Set x = 0, sigma = 10
#wvals = 2..20
sigma = 10
mu = 1
tau = 1
t = 500
params = ParamsLIF(sigma = sigma, mu = mu, tau = tau)
lif = LIF(params, t = t)
lif.x = 0
#Simulate for a range of $W$ values.
N = 19
nsims = 1
wmax = 20
n = params.n
deltaT = 50
#Play with different c values
cvals = [0.01, 0.25, 0.5, 0.75, 0.99]
for c in cvals:
print("Running simulations for c = %f"%c)
outfile = './sweeps/param_w_N_%d_nsims_%d_c_%f_deltaT_counterfactual_simulations.npz'%(N, nsims, c)
params.c = c
lif.setup(params)
lif.x = 0
wvals = np.linspace(2, wmax, N)
vs = np.zeros((N, N, nsims, n, lif.T), dtype=np.float16)
hs = np.zeros((N, N, nsims, n, lif.T), dtype=np.bool)
us = np.zeros((N, N, nsims, n, lif.T), dtype=np.float16)
for i,w0 in enumerate(wvals):
for j,w1 in enumerate(wvals):
print("Running %d simulations with w0=%f, w1=%f"%(nsims, w0, w1))
lif.W = np.array([w0, w1])
for k in range(nsims):
(v, h, Cost, betas, u) = lif.simulate(deltaT)
vs[i,j,k,:] = v
hs[i,j,k,:] = h
us[i,j,k,:] = u
#Save output
np.savez(outfile, vs = vs, hs = hs, params = params, wvals = wvals\
, nsims = nsims, us = us)
|
[
"numpy.savez",
"lib.lif.ParamsLIF",
"lib.lif.LIF",
"numpy.array",
"numpy.linspace",
"numpy.zeros"
] |
[((172, 210), 'lib.lif.ParamsLIF', 'ParamsLIF', ([], {'sigma': 'sigma', 'mu': 'mu', 'tau': 'tau'}), '(sigma=sigma, mu=mu, tau=tau)\n', (181, 210), False, 'from lib.lif import LIF, ParamsLIF\n'), ((223, 239), 'lib.lif.LIF', 'LIF', (['params'], {'t': 't'}), '(params, t=t)\n', (226, 239), False, 'from lib.lif import LIF, ParamsLIF\n'), ((625, 648), 'numpy.linspace', 'np.linspace', (['(2)', 'wmax', 'N'], {}), '(2, wmax, N)\n', (636, 648), True, 'import numpy as np\n'), ((655, 706), 'numpy.zeros', 'np.zeros', (['(N, N, nsims, n, lif.T)'], {'dtype': 'np.float16'}), '((N, N, nsims, n, lif.T), dtype=np.float16)\n', (663, 706), True, 'import numpy as np\n'), ((713, 761), 'numpy.zeros', 'np.zeros', (['(N, N, nsims, n, lif.T)'], {'dtype': 'np.bool'}), '((N, N, nsims, n, lif.T), dtype=np.bool)\n', (721, 761), True, 'import numpy as np\n'), ((768, 819), 'numpy.zeros', 'np.zeros', (['(N, N, nsims, n, lif.T)'], {'dtype': 'np.float16'}), '((N, N, nsims, n, lif.T), dtype=np.float16)\n', (776, 819), True, 'import numpy as np\n'), ((1190, 1269), 'numpy.savez', 'np.savez', (['outfile'], {'vs': 'vs', 'hs': 'hs', 'params': 'params', 'wvals': 'wvals', 'nsims': 'nsims', 'us': 'us'}), '(outfile, vs=vs, hs=hs, params=params, wvals=wvals, nsims=nsims, us=us)\n', (1198, 1269), True, 'import numpy as np\n'), ((978, 996), 'numpy.array', 'np.array', (['[w0, w1]'], {}), '([w0, w1])\n', (986, 996), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
import numpy as np
import cv2
import time
def get_time(start_time):
return int((time.time() - start_time) * 1000)
def is_inside(inside, outside, limit_val=-1):
point_limit = limit_val * len(inside)
if limit_val < 0:
point_limit = 1
in_point = 0;
for i in inside:
is_in = cv2.pointPolygonTest(outside, tuple(i[0]), False)
if is_in >= 0:
in_point += 1
if in_point >= point_limit:
return True
return False
start_time = time.time()
# checking arguments
arg = {}
for a in sys.argv[1:]:
if (a[0] == "-"):
a = a[1:]
a = a.split("=")
if len(a) == 2:
arg[a[0]] = a[1]
elif len(a) == 1:
arg[a[0]] = ""
else:
sys.exit(3)
else:
sys.exit(2)
if "input" not in arg:
sys.exit(1)
input_name, input_ext = arg["input"].split(".")
img = cv2.imread(input_name + "." + input_ext)
if img is None:
sys.exit(1)
# resizing image if bigger than max values
h, w, c = img.shape
max_width = 1920
max_height = 1080
if (w > max_width) or (h > max_height):
ratio = min(max_width / w, max_height / h)
new_size = (round(w * ratio), round(h * ratio))
if "silent" not in arg:
print("%-6s ms| Resizing image, new size: %dx%d, %.2f%%"%(get_time(start_time), new_size[0], new_size[1], ratio))
img = cv2.resize(img, new_size, interpolation=cv2.INTER_AREA)
# denoising the image
img_blur = cv2.fastNlMeansDenoisingColored(img, None, 15, 10, 7, 21)
# applying blur until desired values
blur_limit = float(15)
if "blur-limit" in arg:
blur_limit = float(arg["blur-limit"])
ok = False
while ok == False:
img_blur = cv2.GaussianBlur(img_blur, (3, 3), 0)
detected_blur = cv2.Laplacian(img_blur, cv2.CV_64F).var() * 100000 / (img.shape[0] * img.shape[1])
if "silent" not in arg:
print("%-6s ms| Blur value: %.2f"%(get_time(start_time), detected_blur))
if detected_blur <= blur_limit:
ok = True
# grayscaling and thresholding the image
img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)
thr = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[0]
# detecting edges and finding contours
img_edges = cv2.Canny(img_gray, thr, 0.5 * thr)
#kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,1))
#img_edges = cv2.morphologyEx(img_edges, cv2.MORPH_CLOSE, kernel)
cnt, hier = cv2.findContours(img_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
ch = []
if True:
img_area = img_edges.shape[0] * img_edges.shape[1]
cnt = [cv2.convexHull(cnt[i], False) for i in range(len(cnt))]
good = []
for c in cnt:
if (img_area / 100) >= cv2.contourArea(c) >= (img_area / 10000):
good.append(c)
cnt = good
ch = [[cnt[i], i] for i in range(len(cnt))]
ch_top = sorted(ch, key=lambda x : cv2.contourArea(x[0]), reverse=True)[:50]
if "silent" not in arg:
print("%-6s ms| Found %d contours."%(get_time(start_time), len(ch)))
img_filtered = img.copy()
possible = []
for t in ch_top:
inner = 0
for b in ch:
if is_inside(b[0], t[0]):
inner += 1
if inner >= 3:
possible.append(t[1])
break
if inner < 3:
# orange
img_filtered = cv2.drawContours(img_filtered, [t[0]], -1, (0, 126, 255), 1)
ch = [ch[p] for p in possible]
#ch = [[cv2.convexHull(c[0]), c[1]] for c in ch]
plates = []
for c, idx in ch:
og = c
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
c = np.int0(box)
if ((cv2.contourArea(c) / cv2.contourArea(og)) - 1) <= 0.2:
desired = 520 / 110;
current = max(rect[1]) / min(rect[1])
margin = 0.3
if desired * (1 - margin) <= current <= desired * (1 + margin):
plates.append([c, og])
else:
# red
img_filtered = cv2.drawContours(img_filtered, [c], -1, (0, 0, 255), 1)
else:
# red
img_filtered = cv2.drawContours(img_filtered, [c], -1, (0, 0, 255), 1)
good = []
for i in range(len(plates)):
ok = True
for j in range(len(plates)):
if (i != j) and is_inside(plates[j][1], plates[i][1], 1):
ok = False
break
if ok:
good.append(plates[i])
else:
# turquoise
img_filtered = cv2.drawContours(img_filtered, [plates[i][1]], -1, (255, 255, 0), 1)
plates = good
img_detected = img.copy()
candidates = []
index = 0
for p, og in plates:
mask = np.zeros(img_gray.shape, np.uint8)
img_masked = cv2.drawContours(mask, [p], 0, 255, -1,)
img_masked = cv2.bitwise_and(img_edges, img_edges, mask=mask)
cv2.drawContours(img_masked, [og], 0, 0, 2)
#kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))
#print(kernel)
#img_masked = cv2.dilate(img_masked, kernel)
x, y, w, h = cv2.boundingRect(p)
crop_masked = img_masked[y:y+h, x:x+w]
crop_detected = img_detected[y:y+h, x:x+w]
cnt, hier = cv2.findContours(crop_masked, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
if hier is None:
# purple
img_filtered = cv2.drawContours(img_filtered, [p], -1, (255, 0, 255), 1)
continue
hier = hier[0]
ch = [[cnt[i], hier[i]] for i in range(len(cnt)) if (hier[i][0] != -1) or (hier[i][1] != -1)]
for i in range(len(ch)):
ch[i][0] = cv2.convexHull(ch[i][0], False)
good = []
for i in range(len(ch)):
ok = True
for j in range(len(ch)):
if (i != j) and is_inside(ch[i][0], ch[j][0], 0.8):
ok = False
break
if ok:
good.append(ch[i])
ch = sorted(good, key=lambda x : cv2.contourArea(x[0]) * cv2.boundingRect(x[0])[3], reverse=True)[:6]
if (len(ch) >= 6):
chars = []
img_detected = cv2.drawContours(img_detected, [og], -1, (0, 255, 0), 2)
cnt = [c[0] for c in ch]
#crop_detected = cv2.drawContours(crop_detected, cnt, -1, (255, 0, 0), 1)
num = -1
for c in cnt:
num += 1
#box = cv2.boxPoints(cv2.minAreaRect(c))
#box = np.int0(box)
#crop_detected = cv2.drawContours(crop_detected, [box], -1, (255, 0, 0), 1)
x, y, w, h = cv2.boundingRect(c)
chars.append([crop_detected.copy()[y:y+h, x:x+w], x])
crop_detected = cv2.rectangle(crop_detected, (x,y), (x+w,y+h), (255, 0, 0), 1)
chars = sorted(chars, key=lambda x : x[1])
candidates.append([c[0] for c in chars])
index += 1
#cv2.imshow("Last plate", crop_masked.astype(np.uint8))
else:
# yellow
img_filtered = cv2.drawContours(img_filtered, [p], -1, (0, 255, 255), 1)
if "silent" not in arg:
print("%-6s ms| %d plates found."%(get_time(start_time), index))
idx = 0
t_num = "0123456789"
t_char = "abcdefghijklmnoprstuvwxyz"
for cnd in candidates:
idx += 1
plate = ""
pos = 0
for c in cnd:
if pos > 2:
templates = t_num
else:
templates = t_char
pos += 1
vals = []
for t in templates:
template = cv2.imread("templates/" + t + ".jpg")
h, w, col = c.shape
template = cv2.resize(template, (w, h), interpolation=cv2.INTER_AREA)
t_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
c_gray = cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)
t_gray = cv2.adaptiveThreshold(t_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 0)
c_gray = cv2.adaptiveThreshold(c_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 0)
#template = cv2.threshold(template, 126, 255, cv2.THRESH_BINARY)[1]
#cv2.imshow("org", c_gray.astype(np.uint8))
#cv2.imshow("tmp", t_gray.astype(np.uint8))
vals.append([t, cv2.matchTemplate(t_gray, c_gray, cv2.TM_SQDIFF)[0][0]])
plate += sorted(vals, key=lambda x : x[1])[0][0]
plate = plate.upper()
plate = plate[:3] + "-" + plate[3:]
if "silent" not in arg:
print("Plate " + str(idx) + " number:", plate)
else:
print(plate)
if "silent" not in arg:
print("Executed in %d ms" % get_time(start_time))
if "no-image" not in arg:
concat = np.concatenate((cv2.cvtColor(img_edges, cv2.COLOR_GRAY2BGR), img_filtered), axis = 1)
if (index > 0):
concat2 = np.concatenate((img_detected, np.zeros(img_detected.shape, dtype = np.uint8)), axis = 1)
concat = np.concatenate((concat, concat2), axis = 0)
#cv2.imshow("First detected plate", crop_masked.astype(np.uint8))
cv2.namedWindow("images", cv2.WINDOW_NORMAL)
cv2.resizeWindow("images", (1280, 720))
cv2.imshow("images", concat)
while cv2.getWindowProperty("images", cv2.WND_PROP_VISIBLE) >= 1:
if cv2.waitKey(1000) == 32: # 27
break
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"cv2.imshow",
"cv2.destroyAllWindows",
"sys.exit",
"cv2.Laplacian",
"cv2.resizeWindow",
"cv2.threshold",
"cv2.contourArea",
"cv2.minAreaRect",
"numpy.concatenate",
"cv2.matchTemplate",
"cv2.waitKey",
"cv2.drawContours",
"cv2.boxPoints",
"numpy.int0",
"cv2.cvtColor",
"cv2.getWindowProperty",
"cv2.resize",
"cv2.Canny",
"time.time",
"cv2.namedWindow",
"cv2.GaussianBlur",
"cv2.imread",
"cv2.convexHull",
"cv2.fastNlMeansDenoisingColored",
"cv2.bitwise_and",
"numpy.zeros",
"cv2.adaptiveThreshold",
"cv2.findContours",
"cv2.boundingRect"
] |
[((542, 553), 'time.time', 'time.time', ([], {}), '()\n', (551, 553), False, 'import time\n'), ((943, 983), 'cv2.imread', 'cv2.imread', (["(input_name + '.' + input_ext)"], {}), "(input_name + '.' + input_ext)\n", (953, 983), False, 'import cv2\n'), ((1506, 1563), 'cv2.fastNlMeansDenoisingColored', 'cv2.fastNlMeansDenoisingColored', (['img', 'None', '(15)', '(10)', '(7)', '(21)'], {}), '(img, None, 15, 10, 7, 21)\n', (1537, 1563), False, 'import cv2\n'), ((2094, 2136), 'cv2.cvtColor', 'cv2.cvtColor', (['img_blur', 'cv2.COLOR_BGR2GRAY'], {}), '(img_blur, cv2.COLOR_BGR2GRAY)\n', (2106, 2136), False, 'import cv2\n'), ((2268, 2303), 'cv2.Canny', 'cv2.Canny', (['img_gray', 'thr', '(0.5 * thr)'], {}), '(img_gray, thr, 0.5 * thr)\n', (2277, 2303), False, 'import cv2\n'), ((2443, 2510), 'cv2.findContours', 'cv2.findContours', (['img_edges', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (2459, 2510), False, 'import cv2\n'), ((875, 886), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (883, 886), False, 'import sys\n'), ((1005, 1016), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1013, 1016), False, 'import sys\n'), ((1416, 1471), 'cv2.resize', 'cv2.resize', (['img', 'new_size'], {'interpolation': 'cv2.INTER_AREA'}), '(img, new_size, interpolation=cv2.INTER_AREA)\n', (1426, 1471), False, 'import cv2\n'), ((1737, 1774), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img_blur', '(3, 3)', '(0)'], {}), '(img_blur, (3, 3), 0)\n', (1753, 1774), False, 'import cv2\n'), ((2144, 2212), 'cv2.threshold', 'cv2.threshold', (['img_gray', '(0)', '(255)', '(cv2.THRESH_BINARY | cv2.THRESH_OTSU)'], {}), '(img_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n', (2157, 2212), False, 'import cv2\n'), ((3500, 3518), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (3515, 3518), False, 'import cv2\n'), ((3530, 3549), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (3543, 3549), False, 'import cv2\n'), ((3558, 3570), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (3565, 3570), True, 'import numpy as np\n'), ((4517, 4551), 'numpy.zeros', 'np.zeros', (['img_gray.shape', 'np.uint8'], {}), '(img_gray.shape, np.uint8)\n', (4525, 4551), True, 'import numpy as np\n'), ((4569, 4608), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[p]', '(0)', '(255)', '(-1)'], {}), '(mask, [p], 0, 255, -1)\n', (4585, 4608), False, 'import cv2\n'), ((4627, 4675), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img_edges', 'img_edges'], {'mask': 'mask'}), '(img_edges, img_edges, mask=mask)\n', (4642, 4675), False, 'import cv2\n'), ((4681, 4724), 'cv2.drawContours', 'cv2.drawContours', (['img_masked', '[og]', '(0)', '(0)', '(2)'], {}), '(img_masked, [og], 0, 0, 2)\n', (4697, 4724), False, 'import cv2\n'), ((4874, 4893), 'cv2.boundingRect', 'cv2.boundingRect', (['p'], {}), '(p)\n', (4890, 4893), False, 'import cv2\n'), ((5001, 5073), 'cv2.findContours', 'cv2.findContours', (['crop_masked', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_TC89_KCOS'], {}), '(crop_masked, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)\n', (5017, 5073), False, 'import cv2\n'), ((8659, 8703), 'cv2.namedWindow', 'cv2.namedWindow', (['"""images"""', 'cv2.WINDOW_NORMAL'], {}), "('images', cv2.WINDOW_NORMAL)\n", (8674, 8703), False, 'import cv2\n'), ((8708, 8747), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""images"""', '(1280, 720)'], {}), "('images', (1280, 720))\n", (8724, 8747), False, 'import cv2\n'), ((8752, 8780), 'cv2.imshow', 'cv2.imshow', (['"""images"""', 'concat'], {}), "('images', concat)\n", (8762, 8780), False, 'import cv2\n'), ((8916, 8939), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8937, 8939), False, 'import cv2\n'), ((835, 846), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (843, 846), False, 'import sys\n'), ((2596, 2625), 'cv2.convexHull', 'cv2.convexHull', (['cnt[i]', '(False)'], {}), '(cnt[i], False)\n', (2610, 2625), False, 'import cv2\n'), ((3304, 3364), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[t[0]]', '(-1)', '(0, 126, 255)', '(1)'], {}), '(img_filtered, [t[0]], -1, (0, 126, 255), 1)\n', (3320, 3364), False, 'import cv2\n'), ((4002, 4057), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[c]', '(-1)', '(0, 0, 255)', '(1)'], {}), '(img_filtered, [c], -1, (0, 0, 255), 1)\n', (4018, 4057), False, 'import cv2\n'), ((4347, 4415), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[plates[i][1]]', '(-1)', '(255, 255, 0)', '(1)'], {}), '(img_filtered, [plates[i][1]], -1, (255, 255, 0), 1)\n', (4363, 4415), False, 'import cv2\n'), ((5136, 5193), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[p]', '(-1)', '(255, 0, 255)', '(1)'], {}), '(img_filtered, [p], -1, (255, 0, 255), 1)\n', (5152, 5193), False, 'import cv2\n'), ((5378, 5409), 'cv2.convexHull', 'cv2.convexHull', (['ch[i][0]', '(False)'], {}), '(ch[i][0], False)\n', (5392, 5409), False, 'import cv2\n'), ((5837, 5893), 'cv2.drawContours', 'cv2.drawContours', (['img_detected', '[og]', '(-1)', '(0, 255, 0)', '(2)'], {}), '(img_detected, [og], -1, (0, 255, 0), 2)\n', (5853, 5893), False, 'import cv2\n'), ((6677, 6734), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[p]', '(-1)', '(0, 255, 255)', '(1)'], {}), '(img_filtered, [p], -1, (0, 255, 255), 1)\n', (6693, 6734), False, 'import cv2\n'), ((8536, 8577), 'numpy.concatenate', 'np.concatenate', (['(concat, concat2)'], {'axis': '(0)'}), '((concat, concat2), axis=0)\n', (8550, 8577), True, 'import numpy as np\n'), ((8793, 8846), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['"""images"""', 'cv2.WND_PROP_VISIBLE'], {}), "('images', cv2.WND_PROP_VISIBLE)\n", (8814, 8846), False, 'import cv2\n'), ((2716, 2734), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (2731, 2734), False, 'import cv2\n'), ((3899, 3954), 'cv2.drawContours', 'cv2.drawContours', (['img_filtered', '[c]', '(-1)', '(0, 0, 255)', '(1)'], {}), '(img_filtered, [c], -1, (0, 0, 255), 1)\n', (3915, 3954), False, 'import cv2\n'), ((6267, 6286), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (6283, 6286), False, 'import cv2\n'), ((6381, 6449), 'cv2.rectangle', 'cv2.rectangle', (['crop_detected', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(1)'], {}), '(crop_detected, (x, y), (x + w, y + h), (255, 0, 0), 1)\n', (6394, 6449), False, 'import cv2\n'), ((7159, 7196), 'cv2.imread', 'cv2.imread', (["('templates/' + t + '.jpg')"], {}), "('templates/' + t + '.jpg')\n", (7169, 7196), False, 'import cv2\n'), ((7252, 7310), 'cv2.resize', 'cv2.resize', (['template', '(w, h)'], {'interpolation': 'cv2.INTER_AREA'}), '(template, (w, h), interpolation=cv2.INTER_AREA)\n', (7262, 7310), False, 'import cv2\n'), ((7333, 7375), 'cv2.cvtColor', 'cv2.cvtColor', (['template', 'cv2.COLOR_BGR2GRAY'], {}), '(template, cv2.COLOR_BGR2GRAY)\n', (7345, 7375), False, 'import cv2\n'), ((7397, 7432), 'cv2.cvtColor', 'cv2.cvtColor', (['c', 'cv2.COLOR_BGR2GRAY'], {}), '(c, cv2.COLOR_BGR2GRAY)\n', (7409, 7432), False, 'import cv2\n'), ((7455, 7556), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['t_gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY_INV', '(15)', '(0)'], {}), '(t_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY_INV, 15, 0)\n', (7476, 7556), False, 'import cv2\n'), ((7573, 7674), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['c_gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY_INV', '(15)', '(0)'], {}), '(c_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY_INV, 15, 0)\n', (7594, 7674), False, 'import cv2\n'), ((8321, 8364), 'cv2.cvtColor', 'cv2.cvtColor', (['img_edges', 'cv2.COLOR_GRAY2BGR'], {}), '(img_edges, cv2.COLOR_GRAY2BGR)\n', (8333, 8364), False, 'import cv2\n'), ((8864, 8881), 'cv2.waitKey', 'cv2.waitKey', (['(1000)'], {}), '(1000)\n', (8875, 8881), False, 'import cv2\n'), ((120, 131), 'time.time', 'time.time', ([], {}), '()\n', (129, 131), False, 'import time\n'), ((805, 816), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (813, 816), False, 'import sys\n'), ((2885, 2906), 'cv2.contourArea', 'cv2.contourArea', (['x[0]'], {}), '(x[0])\n', (2900, 2906), False, 'import cv2\n'), ((3581, 3599), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (3596, 3599), False, 'import cv2\n'), ((3602, 3621), 'cv2.contourArea', 'cv2.contourArea', (['og'], {}), '(og)\n', (3617, 3621), False, 'import cv2\n'), ((8460, 8504), 'numpy.zeros', 'np.zeros', (['img_detected.shape'], {'dtype': 'np.uint8'}), '(img_detected.shape, dtype=np.uint8)\n', (8468, 8504), True, 'import numpy as np\n'), ((1795, 1830), 'cv2.Laplacian', 'cv2.Laplacian', (['img_blur', 'cv2.CV_64F'], {}), '(img_blur, cv2.CV_64F)\n', (1808, 1830), False, 'import cv2\n'), ((5702, 5723), 'cv2.contourArea', 'cv2.contourArea', (['x[0]'], {}), '(x[0])\n', (5717, 5723), False, 'import cv2\n'), ((5726, 5748), 'cv2.boundingRect', 'cv2.boundingRect', (['x[0]'], {}), '(x[0])\n', (5742, 5748), False, 'import cv2\n'), ((7892, 7940), 'cv2.matchTemplate', 'cv2.matchTemplate', (['t_gray', 'c_gray', 'cv2.TM_SQDIFF'], {}), '(t_gray, c_gray, cv2.TM_SQDIFF)\n', (7909, 7940), False, 'import cv2\n')]
|
# Part of code was adpated from https://github.com/r9y9/deepvoice3_pytorch/tree/master/compute_timestamp_ratio.py
# Copyright (c) 2017: <NAME>.
import argparse
import sys
import numpy as np
from hparams import hparams, hparams_debug_string
from deepvoice3_paddle.data import TextDataSource, MelSpecDataSource
from nnmnkwii.datasets import FileSourceDataset
from tqdm import trange
from deepvoice3_paddle import frontend
def build_parser():
parser = argparse.ArgumentParser(
description="Compute output/input timestamp ratio.")
parser.add_argument(
"--hparams", type=str, default="", help="Hyper parameters.")
parser.add_argument(
"--preset",
type=str,
required=True,
help="Path of preset parameters (json).")
parser.add_argument("data_root", type=str, help="path of the dataset.")
return parser
if __name__ == "__main__":
parser = build_parser()
args, _ = parser.parse_known_args()
data_root = args.data_root
preset = args.preset
# Load preset if specified
if preset is not None:
with open(preset) as f:
hparams.parse_json(f.read())
# Override hyper parameters
hparams.parse(args.hparams)
assert hparams.name == "deepvoice3"
# Code below
X = FileSourceDataset(TextDataSource(data_root))
Mel = FileSourceDataset(MelSpecDataSource(data_root))
in_sizes = []
out_sizes = []
for i in trange(len(X)):
x, m = X[i], Mel[i]
if X.file_data_source.multi_speaker:
x = x[0]
in_sizes.append(x.shape[0])
out_sizes.append(m.shape[0])
in_sizes = np.array(in_sizes)
out_sizes = np.array(out_sizes)
input_timestamps = np.sum(in_sizes)
output_timestamps = np.sum(
out_sizes) / hparams.outputs_per_step / hparams.downsample_step
print(input_timestamps, output_timestamps,
output_timestamps / input_timestamps)
sys.exit(0)
|
[
"argparse.ArgumentParser",
"deepvoice3_paddle.data.TextDataSource",
"deepvoice3_paddle.data.MelSpecDataSource",
"hparams.hparams.parse",
"numpy.array",
"numpy.sum",
"sys.exit"
] |
[((456, 532), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute output/input timestamp ratio."""'}), "(description='Compute output/input timestamp ratio.')\n", (479, 532), False, 'import argparse\n'), ((1188, 1215), 'hparams.hparams.parse', 'hparams.parse', (['args.hparams'], {}), '(args.hparams)\n', (1201, 1215), False, 'from hparams import hparams, hparams_debug_string\n'), ((1635, 1653), 'numpy.array', 'np.array', (['in_sizes'], {}), '(in_sizes)\n', (1643, 1653), True, 'import numpy as np\n'), ((1670, 1689), 'numpy.array', 'np.array', (['out_sizes'], {}), '(out_sizes)\n', (1678, 1689), True, 'import numpy as np\n'), ((1714, 1730), 'numpy.sum', 'np.sum', (['in_sizes'], {}), '(in_sizes)\n', (1720, 1730), True, 'import numpy as np\n'), ((1935, 1946), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1943, 1946), False, 'import sys\n'), ((1300, 1325), 'deepvoice3_paddle.data.TextDataSource', 'TextDataSource', (['data_root'], {}), '(data_root)\n', (1314, 1325), False, 'from deepvoice3_paddle.data import TextDataSource, MelSpecDataSource\n'), ((1355, 1383), 'deepvoice3_paddle.data.MelSpecDataSource', 'MelSpecDataSource', (['data_root'], {}), '(data_root)\n', (1372, 1383), False, 'from deepvoice3_paddle.data import TextDataSource, MelSpecDataSource\n'), ((1755, 1772), 'numpy.sum', 'np.sum', (['out_sizes'], {}), '(out_sizes)\n', (1761, 1772), True, 'import numpy as np\n')]
|
import os
import sys
import cv2
import numpy as np
class Equirectangular:
def __init__(self, img):
self._img = img
#self._img = cv2.imread(img_name, cv2.IMREAD_COLOR)
[self._height, self._width, _] = self._img.shape
print(self._img.shape)
def GetPerspective(self, FOV, THETA, PHI, height, width):
#
# THETA is left/right angle, PHI is up/down angle, both in degree
#
equ_h = self._height
equ_w = self._width
equ_cx = (equ_w - 1) / 2.0
equ_cy = (equ_h - 1) / 2.0
wFOV = FOV
hFOV = float(height) / width * wFOV
w_len = np.tan(np.radians(wFOV / 2.0))
h_len = np.tan(np.radians(hFOV / 2.0))
x_map = np.ones([height, width], np.float32)
y_map = np.tile(np.linspace(-w_len, w_len,width), [height,1])
z_map = -np.tile(np.linspace(-h_len, h_len,height), [width,1]).T
D = np.sqrt(x_map**2 + y_map**2 + z_map**2)
xyz = np.stack((x_map,y_map,z_map),axis=2)/np.repeat(D[:, :, np.newaxis], 3, axis=2)
y_axis = np.array([0.0, 1.0, 0.0], np.float32)
z_axis = np.array([0.0, 0.0, 1.0], np.float32)
[R1, _] = cv2.Rodrigues(z_axis * np.radians(THETA))
[R2, _] = cv2.Rodrigues(np.dot(R1, y_axis) * np.radians(-PHI))
xyz = xyz.reshape([height * width, 3]).T
xyz = np.dot(R1, xyz)
xyz = np.dot(R2, xyz).T
lat = np.arcsin(xyz[:, 2])
lon = np.arctan2(xyz[:, 1] , xyz[:, 0])
lon = lon.reshape([height, width]) / np.pi * 180
lat = -lat.reshape([height, width]) / np.pi * 180
lon = lon / 180 * equ_cx + equ_cx
lat = lat / 90 * equ_cy + equ_cy
persp = cv2.remap(self._img, lon.astype(np.float32), lat.astype(np.float32), cv2.INTER_CUBIC, borderMode=cv2.BORDER_WRAP)
return persp
|
[
"numpy.radians",
"numpy.sqrt",
"numpy.ones",
"numpy.repeat",
"numpy.arcsin",
"numpy.array",
"numpy.dot",
"numpy.linspace",
"numpy.arctan2",
"numpy.stack"
] |
[((743, 779), 'numpy.ones', 'np.ones', (['[height, width]', 'np.float32'], {}), '([height, width], np.float32)\n', (750, 779), True, 'import numpy as np\n'), ((936, 981), 'numpy.sqrt', 'np.sqrt', (['(x_map ** 2 + y_map ** 2 + z_map ** 2)'], {}), '(x_map ** 2 + y_map ** 2 + z_map ** 2)\n', (943, 981), True, 'import numpy as np\n'), ((1095, 1132), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]', 'np.float32'], {}), '([0.0, 1.0, 0.0], np.float32)\n', (1103, 1132), True, 'import numpy as np\n'), ((1150, 1187), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]', 'np.float32'], {}), '([0.0, 0.0, 1.0], np.float32)\n', (1158, 1187), True, 'import numpy as np\n'), ((1383, 1398), 'numpy.dot', 'np.dot', (['R1', 'xyz'], {}), '(R1, xyz)\n', (1389, 1398), True, 'import numpy as np\n'), ((1445, 1465), 'numpy.arcsin', 'np.arcsin', (['xyz[:, 2]'], {}), '(xyz[:, 2])\n', (1454, 1465), True, 'import numpy as np\n'), ((1480, 1512), 'numpy.arctan2', 'np.arctan2', (['xyz[:, 1]', 'xyz[:, 0]'], {}), '(xyz[:, 1], xyz[:, 0])\n', (1490, 1512), True, 'import numpy as np\n'), ((654, 676), 'numpy.radians', 'np.radians', (['(wFOV / 2.0)'], {}), '(wFOV / 2.0)\n', (664, 676), True, 'import numpy as np\n'), ((701, 723), 'numpy.radians', 'np.radians', (['(hFOV / 2.0)'], {}), '(hFOV / 2.0)\n', (711, 723), True, 'import numpy as np\n'), ((804, 837), 'numpy.linspace', 'np.linspace', (['(-w_len)', 'w_len', 'width'], {}), '(-w_len, w_len, width)\n', (815, 837), True, 'import numpy as np\n'), ((990, 1029), 'numpy.stack', 'np.stack', (['(x_map, y_map, z_map)'], {'axis': '(2)'}), '((x_map, y_map, z_map), axis=2)\n', (998, 1029), True, 'import numpy as np\n'), ((1027, 1068), 'numpy.repeat', 'np.repeat', (['D[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(D[:, :, np.newaxis], 3, axis=2)\n', (1036, 1068), True, 'import numpy as np\n'), ((1413, 1428), 'numpy.dot', 'np.dot', (['R2', 'xyz'], {}), '(R2, xyz)\n', (1419, 1428), True, 'import numpy as np\n'), ((1229, 1246), 'numpy.radians', 'np.radians', (['THETA'], {}), '(THETA)\n', (1239, 1246), True, 'import numpy as np\n'), ((1280, 1298), 'numpy.dot', 'np.dot', (['R1', 'y_axis'], {}), '(R1, y_axis)\n', (1286, 1298), True, 'import numpy as np\n'), ((1301, 1317), 'numpy.radians', 'np.radians', (['(-PHI)'], {}), '(-PHI)\n', (1311, 1317), True, 'import numpy as np\n'), ((875, 909), 'numpy.linspace', 'np.linspace', (['(-h_len)', 'h_len', 'height'], {}), '(-h_len, h_len, height)\n', (886, 909), True, 'import numpy as np\n')]
|
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError: # mpl is optional
pass
def compareplot(comp_df, insample_dev=True, se=True, dse=True, ax=None,
plot_kwargs=None):
"""
Model comparison summary plot in the style of the one used in the book
Statistical Rethinking by <NAME>.
Parameters
----------
comp_df: DataFrame
the result of the `pm.compare()` function
insample_dev : bool
plot the in-sample deviance, that is the value of the IC without the
penalization given by the effective number of parameters (pIC).
Defaults to True
se : bool
plot the standard error of the IC estimate. Defaults to True
dse : bool
plot standard error of the difference in IC between each model and the
top-ranked model. Defaults to True
plot_kwargs : dict
Optional arguments for plot elements. Currently accepts 'color_ic',
'marker_ic', 'color_insample_dev', 'marker_insample_dev', 'color_dse',
'marker_dse', 'ls_min_ic' 'color_ls_min_ic', 'fontsize'
ax : axes
Matplotlib axes. Defaults to None
Returns
-------
ax : matplotlib axes
"""
if ax is None:
_, ax = plt.subplots()
if plot_kwargs is None:
plot_kwargs = {}
yticks_pos, step = np.linspace(0, -1, (comp_df.shape[0] * 2) - 1,
retstep=True)
yticks_pos[1::2] = yticks_pos[1::2] + step / 2
yticks_labels = [''] * len(yticks_pos)
ic = 'WAIC'
if ic not in comp_df.columns:
ic = 'LOO'
if dse:
yticks_labels[0] = comp_df.index[0]
yticks_labels[2::2] = comp_df.index[1:]
ax.set_yticks(yticks_pos)
ax.errorbar(x=comp_df[ic].iloc[1:],
y=yticks_pos[1::2],
xerr=comp_df.dSE[1:],
color=plot_kwargs.get('color_dse', 'grey'),
fmt=plot_kwargs.get('marker_dse', '^'))
else:
yticks_labels = comp_df.index
ax.set_yticks(yticks_pos[::2])
if se:
ax.errorbar(x=comp_df[ic],
y=yticks_pos[::2],
xerr=comp_df.SE,
color=plot_kwargs.get('color_ic', 'k'),
fmt=plot_kwargs.get('marker_ic', 'o'),
mfc='None',
mew=1)
else:
ax.plot(comp_df[ic],
yticks_pos[::2],
color=plot_kwargs.get('color_ic', 'k'),
marker=plot_kwargs.get('marker_ic', 'o'),
mfc='None',
mew=1,
lw=0)
if insample_dev:
ax.plot(comp_df[ic] - (2 * comp_df['p'+ic]),
yticks_pos[::2],
color=plot_kwargs.get('color_insample_dev', 'k'),
marker=plot_kwargs.get('marker_insample_dev', 'o'),
lw=0)
ax.axvline(comp_df[ic].iloc[0],
ls=plot_kwargs.get('ls_min_ic', '--'),
color=plot_kwargs.get('color_ls_min_ic', 'grey'))
ax.set_xlabel('Deviance', fontsize=plot_kwargs.get('fontsize', 14))
ax.set_yticklabels(yticks_labels)
ax.set_ylim(-1 + step, 0 - step)
return ax
|
[
"numpy.linspace",
"matplotlib.pyplot.subplots"
] |
[((1348, 1406), 'numpy.linspace', 'np.linspace', (['(0)', '(-1)', '(comp_df.shape[0] * 2 - 1)'], {'retstep': '(True)'}), '(0, -1, comp_df.shape[0] * 2 - 1, retstep=True)\n', (1359, 1406), True, 'import numpy as np\n'), ((1255, 1269), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1267, 1269), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import re
from nltk import Tree
from nltk import induce_pcfg
from nltk import Nonterminal
from nltk.parse.generate import generate
epsilon = 1e-20
class corpus:
# stores all sentence forms in data
def __init__(self):
self.sentence_forms = {}
for i in range(6): # init six levels
self.sentence_forms[i + 1] = {}
self.corp = []
def sort_sentence_types(self, types):
for t in types:
freq = types[t]
if freq >= 500:
self.sentence_forms[1][t.rstrip("\n")] = freq # we need to strip these newline characters because they shouldn't count as terminal
self.sentence_forms[2][t.rstrip("\n")] = freq
self.sentence_forms[3][t.rstrip("\n")] = freq
self.sentence_forms[4][t.rstrip("\n")] = freq
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
if freq >= 300:
self.sentence_forms[2][t.rstrip("\n")] = freq
self.sentence_forms[3][t.rstrip("\n")] = freq
self.sentence_forms[4][t.rstrip("\n")] = freq
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
if freq >= 100:
self.sentence_forms[3][t.rstrip("\n")] = freq
self.sentence_forms[4][t.rstrip("\n")] = freq
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
if freq >= 50:
self.sentence_forms[4][t.rstrip("\n")] = freq
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
if freq >= 10:
self.sentence_forms[5][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
self.sentence_forms[6][t.rstrip("\n")] = freq
FREE = "Free"
PRG = "Regular"
PCFG = "Context Free"
def geometric(n, p):
return p * np.power(1.0 - p, n - 1, dtype=np.float64)
def compute_prior(G, corpus, n, level, flag=False): # flag for NLTK
# P : number of productions for grammar G
# n: number of non terminals for grammar G
# V: Vocabulary size = # num non terminals + # num terminals = len(corpus[level])
productions = None
if flag:
productions = corpus
else:
productions = G
P = len(productions)
V = None
if flag:
V = len(corpus)
else:
V = len(corpus.sentence_forms[level])
prob_P = np.log(geometric(P, 0.5)+epsilon)
prob_n = np.log(geometric(n, 0.5)+epsilon)
log_prior = prob_P + prob_n
for i in range(P):
if flag:
N_i = len(productions[i])
else:
N_i = len(list(productions.keys())[i])# num symbols for production i
prob_N_i = geometric(N_i, 0.5)
log_prior -= (N_i * np.log(V))
log_prior += prob_N_i
return log_prior
def compute_log_likelihood(corpus, G, T, level, flag=False):
# k: number of unique sentence types in corpus
log_likelihood = 0
D = None
k = None
if flag:
k = len(corpus)
D = corpus
else:
D = corpus.corp # sentence forms at specified level in corpus
k = len(D) # get num diff sentence forms at given level
productions = G
for i in range(k):
sl = None
if flag:
sl = compute_sentence_likelihood_nltk(productions, D[:50])
else:
sentence_i = D[i].split(" ")
sl = compute_sentence_likelihood(sentence_i, productions)
if sl != 0:
log_likelihood += np.log(sl)
return log_likelihood
def compute_sentence_likelihood(S_i, productions):
# sum of probability of generating S_i under all possible parses
# productions = "S -> U" # example
prob = 0
prods = list(productions.keys())
for p in prods:
p_split = p.split("->") # change based on how the prod symbols are seperated
s1 = p_split[0]
s2 = p_split[1] # should be only two prod symbols per production
for i, token in enumerate(S_i[:-1]):
if s1 == token and s2 == S_i[i + 1]:
prob += productions[p]
return prob
def compute_sentence_likelihood_nltk(G, productions):
prob = 0
prods = list(G.keys())
S_i = productions
for p in prods:
p_split = p.split(" -> ")
s1 = p_split[0]
s2 = p_split[1]
for i, token in enumerate(S_i[:-1]):
if s1 == token and s2 == S_i[i + 1]:
prob += np.log(G[p])
return prob
def compute_log_posterior(log_prior, log_likelihood):
return log_prior + log_likelihood + np.log((1.0 / 3.0))
def test_functions(adam_levelk, k):
terminal_pattern = "[.?!]"
levelk_terminal = 0
for j in adam_levelk.keys():
terminal = re.search(terminal_pattern, j)
if terminal:
levelk_terminal += 1
# #turn grammar into probabilities
total = sum(adam_levelk.values())
adam_levelk_probabilities = {}
for j in adam_levelk.keys():
adam_levelk_probabilities[j] = adam_levelk[j]/total
levelk_nonterminal = (len(adam_levelk) - levelk_terminal)
prior = compute_prior(adam_levelk_probabilities, data, levelk_nonterminal, k)
likelihood = compute_log_likelihood(data, adam_levelk_probabilities, PCFG, k)
logpost = compute_log_posterior(prior, likelihood)
return prior, likelihood, logpost
import os
directory = "Adam/"
people = ["*MOT", "*URS", "*RIC", "*COL", "*CHI"]
def read_and_return(directory):
speakers = {}
struct = {}
append_next = False
for file_path in os.listdir(directory):
with open("Adam/" + file_path, "r") as f:
speakers[file_path] = []
struct[file_path] = []
for line in f:
split = line.split(" ")
if append_next and split[0][:4] == "%mor":
content = split[0].split("\t")[-1]
struct[file_path].append(content.split(" "))
elif split[0][:4] in people[:-1]:
speakers[file_path].append(split)
append_next = True
else:
append_next = False
return speakers, struct
def loadTrees(path):
with open (path, 'r') as f:
data = f.read().split("\n\n")
flattened_data = []
for i in range(len(data)):
#flatten it and strip extra whitespace
flattened_data.append(" ".join(data[i].replace("\n", "").split()))
tree = []
for i, s in enumerate(flattened_data[:-2]):
if "R" in s:
tree.append(Tree.fromstring(s))
return tree
def productionsFromTrees(trees):
productions = []
for tree in trees:
productions += tree.productions()
return productions
def inducePCFGFromProductions(productions):
S = Nonterminal('S')
grammar = induce_pcfg(S, productions)
return grammar
if __name__ == "__main__":
speakers, struct = read_and_return(directory) # this function was used before perfors sent his data
corp = []
types = {}
for fp in struct:
for segments in struct[fp]:
t = ""
for s in segments[:-1]:
token = s.split("|")[0].split(":")[0]
if ("#" in token):
token = token.split("#")[1]
t += token + " "
corp.append(t[:-1])
splitter = t.split(" ")[:-1]
for i in range(len(splitter)):
if (i < (len(splitter) - 1)):
tok = splitter[i] + "->" + splitter[i+1]
if tok in types:
types[tok] += 1
else:
types[tok] = 1
data = corpus()
data.sort_sentence_types(types)
data.corp = corp
adam_level1 = data.sentence_forms[1]
adam_level2 = data.sentence_forms[2]
adam_level3 = data.sentence_forms[3]
adam_level4 = data.sentence_forms[4]
adam_level5 = data.sentence_forms[5]
adam_level6 = data.sentence_forms[6]
print("FREQUENCY WEIGHTED CFG")
for i in range(6):
print("----------------")
print("LEVEL " + str(i+1))
prior, likelihood, logpost = test_functions(data.sentence_forms[i+1], i+1)
print("Log Prior: " + str(prior))
print("Log Likelihood: " + str(likelihood))
print("Log Posterior: " + str(logpost))
trees = loadTrees("Parsetree/brown-adam.parsed")
productions = productionsFromTrees(trees)
nltkgrammar = inducePCFGFromProductions(productions)
grammarToParse = str(nltkgrammar).split("\n")
finalGrammar = []
grammarDict = {}
for g in grammarToParse:
finalGrammar.append(g[4:])
for fg in finalGrammar[1:]:
gg = fg.split("[")
rule = gg[0][:-1]
value = gg[1][:-1]
grammarDict[rule] = float(value)
terminal_pattern = "[.?!]"
terminal_sum = 0
for j in grammarDict.keys():
terminal = re.search(terminal_pattern, j)
if terminal:
terminal_sum += 1
print("PROBABALISTIC PCFG")
prior = compute_prior(grammarDict, productions, terminal_sum, 0, True)
print("Log Prior: " + str(prior))
likelihood = compute_log_likelihood(productions, grammarDict, PCFG, 0, True)
print("Log Likelihood: " + str(likelihood))
logpost = compute_log_posterior(prior, likelihood)
print("Log Posterior: " + str(logpost))
|
[
"os.listdir",
"numpy.power",
"nltk.Nonterminal",
"numpy.log",
"nltk.Tree.fromstring",
"nltk.induce_pcfg",
"re.search"
] |
[((5787, 5808), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (5797, 5808), False, 'import os\n'), ((7044, 7060), 'nltk.Nonterminal', 'Nonterminal', (['"""S"""'], {}), "('S')\n", (7055, 7060), False, 'from nltk import Nonterminal\n'), ((7075, 7102), 'nltk.induce_pcfg', 'induce_pcfg', (['S', 'productions'], {}), '(S, productions)\n', (7086, 7102), False, 'from nltk import induce_pcfg\n'), ((2104, 2146), 'numpy.power', 'np.power', (['(1.0 - p)', '(n - 1)'], {'dtype': 'np.float64'}), '(1.0 - p, n - 1, dtype=np.float64)\n', (2112, 2146), True, 'import numpy as np\n'), ((4807, 4824), 'numpy.log', 'np.log', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4813, 4824), True, 'import numpy as np\n'), ((4972, 5002), 're.search', 're.search', (['terminal_pattern', 'j'], {}), '(terminal_pattern, j)\n', (4981, 5002), False, 'import re\n'), ((9249, 9279), 're.search', 're.search', (['terminal_pattern', 'j'], {}), '(terminal_pattern, j)\n', (9258, 9279), False, 'import re\n'), ((2992, 3001), 'numpy.log', 'np.log', (['V'], {}), '(V)\n', (2998, 3001), True, 'import numpy as np\n'), ((3747, 3757), 'numpy.log', 'np.log', (['sl'], {}), '(sl)\n', (3753, 3757), True, 'import numpy as np\n'), ((4682, 4694), 'numpy.log', 'np.log', (['G[p]'], {}), '(G[p])\n', (4688, 4694), True, 'import numpy as np\n'), ((6811, 6829), 'nltk.Tree.fromstring', 'Tree.fromstring', (['s'], {}), '(s)\n', (6826, 6829), False, 'from nltk import Tree\n')]
|
import bpy
import numpy as np
from PIL import Image
class CarModelViewToImage():
# def __init__:
# self.camera_ = None
# self.image_folder_ = None
# self.car_width_ = 0
# self.car_length_ = 0
# self.viewport_width_ = 0
# self.viewport_height_ = 0
# self.stride_ = 0
# self.stride_radians_ = 0
# self.car_ = None
# self.scene_length_ = 0
# self.scene_height_ = 0
# self.light_ctr_ = None
def init(self, info):
"""
info: {
"car_width" : float,
"car_length": float,
"viewport_width" : float,
"viewport_height" : float,
"image_folder" : string
}
"""
# get base information
self.car_width_ = info["car_width"]
self.car_length_ = info["car_length"]
self.viewport_width_ = info["viewport_width"]
self.viewport_height_ = info["viewport_height"]
self.image_folder_ = info["image_folder"]
self.scene_length_ = self.car_length_ * 2
self.scene_height_ = self.car_length_
bpy.context.scene.render.resolution_x = self.viewport_width_
bpy.context.scene.render.resolution_y = self.viewport_height_
bpy.context.scene.render.filepath = self.image_folder_
# resize model and light
# save model dimensions and location
self.car_ = bpy.data.objects["car"]
# save light location
self.light_ctr_ = [bpy.data.objects["left_light"],
bpy.data.objects["right_light"], bpy.data.objects["top_light"]]
# move model and light
offset = self.car_.location.copy()
self.car_.location -= offset
for l in self.light_ctr_:
l.location -= offset
# calculate prop from length and resize
car_length_now = max(self.car_.dimensions)
scale_size = self.car_length_ / car_length_now
self.car_.scale *= scale_size
for l in self.light_ctr_:
l.location *= scale_size
l.scale *= scale_size
# set camera
bpy.ops.object.camera_add()
self.camera_ = bpy.data.objects["Camera"]
# set camera base info
self.camera_.data.lens_unit = "FOV"
self.camera_.data.angle = np.radians(90)
self.camera_.data.clip_start = 0.1
self.camera_.data.clip_end = self.scene_length_ * 2
# set camera constraint
bpy.ops.object.constraint_add(type="TRACK_TO")
bpy.context.object.constraints["Track To"].up_axis = 'UP_Y'
bpy.context.object.constraints["Track To"].track_axis = 'TRACK_NEGATIVE_Z'
bpy.context.object.constraints["Track To"].target = self.car_
bpy.context.object.constraints["Track To"].use_target_z = True
# set render Node
self.scene_ = bpy.context.scene
self.scene_.use_nodes = True
self.tree_ = self.scene_.node_tree
self.links_ = self.tree_.links
# clear default nodes
for n in self.tree_.nodes:
self.tree_.nodes.remove(n)
self.render_layer_ = self.tree_.nodes.new('CompositorNodeRLayers')
self.viewer_image_ = self.tree_.nodes.new('CompositorNodeViewer')
self.viewer_image_.use_alpha = False
def set_camera_pos(self, x, y, z=None):
# ่ฎก็ฎ็ๅฎๅๆ
real_x = np.clip(x, -1, 1) * self.scene_length_
real_y = np.clip(y, -1, 1) * self.scene_length_
self.camera_.location[0] = real_x
self.camera_.location[1] = real_y
if(z != None):
real_z = np.clip(z, 0, 1) * self.scene_height_
self.camera_.location[2] = real_z
def render_image(self, img_name, folder_path):
"""
ๆธฒๆๅพๅ
"""
filepath = folder_path + img_name
filepath_depth = folder_path + "z" + img_name
# color
self.links_.clear()
self.links_.new(self.render_layer_.outputs[0], self.viewer_image_.inputs[0])
bpy.ops.render.render()
bpy.data.images[0].save_render(filepath)
# depth
self.links_.clear()
# self.links_.new(self.render_layer_.outputs["Depth"], self.viewer_depth_.inputs[0])
self.links_.new(self.render_layer_.outputs["Depth"], self.viewer_image_.inputs[0])
bpy.ops.render.render()
pixels = bpy.data.images['Viewer Node'].pixels
pixels = np.array(pixels)[::4][::-1] # get the pixels
pixels[pixels < 10000000000.0] = 255
pixels[pixels >= 10000000000.0] = 0
pix = pixels.astype(dtype=np.uint8).reshape((self.viewport_height_, self.viewport_width_))
img = Image.fromarray(pix)
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img.save(filepath_depth)
def get_single_image(self, x, y, z, img_name, folder_path=""):
"""
x,y,z:ๆๅๅคดไฝ็ฝฎๅจๅบๆฏ็ๆฏไพ๏ผๅ
ถไธญxใyไธบ-1~1๏ผzไธบ0~1
img_name : ๆไปถๅ
folder_path : ๆไปถๅคน่ทฏๅพ
"""
# ่ฎพ็ฝฎๆๅๆบ
self.set_camera_pos(x,y,z)
# ๆธฒๆๅนถไฟๅญๅพๅ
bpy.context.scene.camera = self.camera_
bpy.ops.render.render()
self.render_image(img_name, folder_path)
if(folder_path != ""):
self.render_image(img_name, folder_path)
else:
self.render_image(img_name, self.image_folder_)
def get_surround_image(self, xy, z, rotate_stride, folder_path = ""):
"""
x,y,z:ๆๅๅคดไฝ็ฝฎๅจๅบๆฏ็ๆฏไพ๏ผๅ
ถไธญxใyไธบ-1~1๏ผzไธบ0~1
rotate_stride : ๆ่ฝฌ็่งๅบฆ
folder_path : ๆไปถๅคน่ทฏๅพ
"""
def set_camera_pos(angle, camera_to_origin_length):
self.camera_.location[0] = camera_to_origin_length * np.cos(np.radians(angle))
self.camera_.location[1] = camera_to_origin_length * np.sin(np.radians(angle))
# ่ฎก็ฎๆ่ฝฌ่งๅบฆ็ธๅ
ณไฟกๆฏ
bpy.context.scene.camera = self.camera_
self.stride_ = rotate_stride
self.stride_radians_ = np.radians(rotate_stride)
# set camera parameters
self.set_camera_pos(xy, 0, z)
real_xy = self.scene_length_ * np.clip(xy, -1, 1)
real_z = self.scene_height_ * np.clip(z, 0, 1)
camera_length = np.sqrt(real_xy**2 + real_z**2)
for i in range(0, 360, rotate_stride):
img_name = str(i) + ".jpg"
set_camera_pos(i, camera_length)
bpy.context.scene.camera = self.camera_
bpy.ops.render.render()
if(folder_path != ""):
self.render_image(img_name, folder_path)
else:
self.render_image(img_name, self.image_folder_)
if __name__ == '__main__':
info = {
"car_width" : 30,
"car_length": 50,
"viewport_width" : 1280,
"viewport_height" : 720,
"image_folder" : "E:/company/MyWork/Workspace/CPU_3D/resources/Huake8296/car_image/single/"
}
car_view = CarModelViewToImage()
car_view.init(info)
#car_view.get_single_image(0, 0, 1, "top_view.jpg")# have a bug
#car_view.get_surround_image(-0.6, 0.4, 90)
car_view.get_single_image(0, -0.6, 0.6, "view_front.jpg")
car_view.get_single_image(0, 0.6, 0.6, "view_back.jpg")
car_view.get_single_image(0.6, 0, 0.6, "view_left.jpg")
car_view.get_single_image(-0.6, 0, 0.6, "view_right.jpg")
car_view.get_single_image(0.6, -0.6, 0.6, "view_left_front.jpg")
car_view.get_single_image(0.6, 0.6, 0.6, "view_left_back.jpg")
car_view.get_single_image(-0.6, -0.6, 0.6, "view_right_front.jpg")
car_view.get_single_image(0.6, -0.6, 0.6, "view_right_back.jpg")
|
[
"numpy.radians",
"numpy.clip",
"bpy.ops.object.camera_add",
"PIL.Image.fromarray",
"numpy.sqrt",
"bpy.ops.object.constraint_add",
"numpy.array",
"bpy.ops.render.render"
] |
[((2180, 2207), 'bpy.ops.object.camera_add', 'bpy.ops.object.camera_add', ([], {}), '()\n', (2205, 2207), False, 'import bpy\n'), ((2367, 2381), 'numpy.radians', 'np.radians', (['(90)'], {}), '(90)\n', (2377, 2381), True, 'import numpy as np\n'), ((2526, 2572), 'bpy.ops.object.constraint_add', 'bpy.ops.object.constraint_add', ([], {'type': '"""TRACK_TO"""'}), "(type='TRACK_TO')\n", (2555, 2572), False, 'import bpy\n'), ((4068, 4091), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (4089, 4091), False, 'import bpy\n'), ((4379, 4402), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (4400, 4402), False, 'import bpy\n'), ((4732, 4752), 'PIL.Image.fromarray', 'Image.fromarray', (['pix'], {}), '(pix)\n', (4747, 4752), False, 'from PIL import Image\n'), ((5163, 5186), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (5184, 5186), False, 'import bpy\n'), ((5990, 6015), 'numpy.radians', 'np.radians', (['rotate_stride'], {}), '(rotate_stride)\n', (6000, 6015), True, 'import numpy as np\n'), ((6232, 6267), 'numpy.sqrt', 'np.sqrt', (['(real_xy ** 2 + real_z ** 2)'], {}), '(real_xy ** 2 + real_z ** 2)\n', (6239, 6267), True, 'import numpy as np\n'), ((3432, 3449), 'numpy.clip', 'np.clip', (['x', '(-1)', '(1)'], {}), '(x, -1, 1)\n', (3439, 3449), True, 'import numpy as np\n'), ((3488, 3505), 'numpy.clip', 'np.clip', (['y', '(-1)', '(1)'], {}), '(y, -1, 1)\n', (3495, 3505), True, 'import numpy as np\n'), ((6134, 6152), 'numpy.clip', 'np.clip', (['xy', '(-1)', '(1)'], {}), '(xy, -1, 1)\n', (6141, 6152), True, 'import numpy as np\n'), ((6191, 6207), 'numpy.clip', 'np.clip', (['z', '(0)', '(1)'], {}), '(z, 0, 1)\n', (6198, 6207), True, 'import numpy as np\n'), ((6470, 6493), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {}), '()\n', (6491, 6493), False, 'import bpy\n'), ((3656, 3672), 'numpy.clip', 'np.clip', (['z', '(0)', '(1)'], {}), '(z, 0, 1)\n', (3663, 3672), True, 'import numpy as np\n'), ((4484, 4500), 'numpy.array', 'np.array', (['pixels'], {}), '(pixels)\n', (4492, 4500), True, 'import numpy as np\n'), ((5741, 5758), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (5751, 5758), True, 'import numpy as np\n'), ((5832, 5849), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (5842, 5849), True, 'import numpy as np\n')]
|
import torch
import arcsim
import gc
import time
import json
import sys
import gc
import os
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
now = datetime.now()
timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
#steps = 30
#epochs= 10
steps = 40
epochs= 20
#handles = [25, 60, 30, 54] # corners
handles = [6,16,25,30,54,60,69,70] # side verts + 2 corners
#handles = [6,16,25,30,54,60,69,70,14,23,48] # side verts + inner side verts + 2 corners
#handles = [24,25,52,53,54,71] # corners but more
losses = []
param_g = torch.zeros([steps, len(handles)*3],dtype=torch.float64, requires_grad=True)
out_path = 'default_out'
os.mkdir(out_path)
with open('conf/rigidcloth/fold_starts/fold_start.json','r') as f:
config = json.load(f)
def save_config(config, file):
with open(file,'w') as f:
json.dump(config, f)
save_config(config, out_path+'/conf.json')
torch.set_num_threads(16)
scalev=1
def reset_sim(sim, epoch):
if epoch < 20:
arcsim.init_physics(out_path+'/conf.json', out_path+'/out%d'%epoch,False)
else:
arcsim.init_physics(out_path+'/conf.json',out_path+'/out',False)
def get_target_mesh():
sim = arcsim.get_sim()
arcsim.init_physics('conf/rigidcloth/fold_targets/half_fold.json',out_path+'/target',False)
#arcsim.init_physics('conf/rigidcloth/fold_targets/sides_in.json',out_path+'/target',False)
#arcsim.init_physics('conf/rigidcloth/fold_targets/diag_quarters.json',out_path+'/target',False)
global node_number
node_number = len(sim.cloths[0].mesh.nodes)
ref = [sim.cloths[0].mesh.nodes[i].x.numpy() for i in range(node_number)]
ref = torch.from_numpy(np.vstack(ref))
return ref
def get_loss(sim,ref):
reg = torch.norm(param_g, p=2)*0.001
loss = 0
print("VERTS", ref.shape[0], len(sim.cloths[0].mesh.nodes))
for i in range(ref.shape[0]):
loss += torch.norm(ref[i]-sim.cloths[0].mesh.nodes[i].x)**2
loss /= node_number
loss += reg
return loss
def run_sim(steps,sim,ref):
# sim.obstacles[2].curr_state_mesh.dummy_node.x = param_g[1]
print("step")
for step in range(steps):
print(step)
for i in range(len(handles)):
inc_v = param_g[step,3*i:3*i+3]
sim.cloths[0].mesh.nodes[handles[i]].v += inc_v
del inc_v
arcsim.sim_step()
loss = get_loss(sim,ref)
return loss
#@profile
def do_train(cur_step,optimizer,scheduler,sim):
epoch = 0
ref = get_target_mesh()
while True:
reset_sim(sim, epoch)
st = time.time()
loss = run_sim(steps, sim,ref)
en0 = time.time()
optimizer.zero_grad()
loss.backward()
en1 = time.time()
print("=======================================")
f.write('epoch {}: loss={} \n'.format(epoch, loss.data))
print('epoch {}: loss={} \n'.format(epoch, loss.data))
print('forward time={}'.format(en0-st))
print('backward time={}'.format(en1-en0))
optimizer.step()
print("Num cloth meshes", len(sim.cloths))
#arcsim.delete_mesh(sim.cloths[0].mesh)
#scheduler.step(epoch)
losses.append(loss)
if epoch>=epochs:
break
epoch = epoch + 1
# break
def visualize_loss(losses,dir_name):
plt.plot(losses)
plt.title('losses')
plt.xlabel('epochs')
plt.ylabel('losses')
plt.savefig(dir_name+'/'+'loss.jpg')
with open(out_path+('/log%s.txt'%timestamp),'w',buffering=1) as f:
tot_step = 1
sim=arcsim.get_sim()
# reset_sim(sim)
lr = 10
momentum = 0.4
f.write('lr={} momentum={}\n'.format(lr,momentum))
optimizer = torch.optim.SGD([{'params':param_g,'lr':lr}],momentum=momentum)
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer,10,2,eta_min=0.0001)
for cur_step in range(tot_step):
do_train(cur_step,optimizer,scheduler,sim)
#visualize_loss(losses,default_dir)
visualize_loss(losses,out_path)
print("done")
|
[
"torch.optim.SGD",
"matplotlib.pyplot.savefig",
"torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"arcsim.get_sim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torch.set_num_threads",
"datetime.datetime.now",
"torch.norm",
"os.mkdir",
"numpy.vstack",
"time.time",
"json.load",
"matplotlib.pyplot.title",
"arcsim.init_physics",
"arcsim.sim_step",
"json.dump"
] |
[((180, 194), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (192, 194), False, 'from datetime import datetime\n'), ((659, 677), 'os.mkdir', 'os.mkdir', (['out_path'], {}), '(out_path)\n', (667, 677), False, 'import os\n'), ((909, 934), 'torch.set_num_threads', 'torch.set_num_threads', (['(16)'], {}), '(16)\n', (930, 934), False, 'import torch\n'), ((758, 770), 'json.load', 'json.load', (['f'], {}), '(f)\n', (767, 770), False, 'import json\n'), ((1192, 1208), 'arcsim.get_sim', 'arcsim.get_sim', ([], {}), '()\n', (1206, 1208), False, 'import arcsim\n'), ((1213, 1312), 'arcsim.init_physics', 'arcsim.init_physics', (['"""conf/rigidcloth/fold_targets/half_fold.json"""', "(out_path + '/target')", '(False)'], {}), "('conf/rigidcloth/fold_targets/half_fold.json', out_path +\n '/target', False)\n", (1232, 1312), False, 'import arcsim\n'), ((3328, 3344), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (3336, 3344), True, 'import matplotlib.pyplot as plt\n'), ((3349, 3368), 'matplotlib.pyplot.title', 'plt.title', (['"""losses"""'], {}), "('losses')\n", (3358, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3393), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (3383, 3393), True, 'import matplotlib.pyplot as plt\n'), ((3398, 3418), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""losses"""'], {}), "('losses')\n", (3408, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3423, 3463), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(dir_name + '/' + 'loss.jpg')"], {}), "(dir_name + '/' + 'loss.jpg')\n", (3434, 3463), True, 'import matplotlib.pyplot as plt\n'), ((3553, 3569), 'arcsim.get_sim', 'arcsim.get_sim', ([], {}), '()\n', (3567, 3569), False, 'import arcsim\n'), ((3693, 3760), 'torch.optim.SGD', 'torch.optim.SGD', (["[{'params': param_g, 'lr': lr}]"], {'momentum': 'momentum'}), "([{'params': param_g, 'lr': lr}], momentum=momentum)\n", (3708, 3760), False, 'import torch\n'), ((3773, 3863), 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', (['optimizer', '(10)', '(2)'], {'eta_min': '(0.0001)'}), '(optimizer, 10, 2,\n eta_min=0.0001)\n', (3825, 3863), False, 'import torch\n'), ((207, 221), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (219, 221), False, 'from datetime import datetime\n'), ((842, 862), 'json.dump', 'json.dump', (['config', 'f'], {}), '(config, f)\n', (851, 862), False, 'import json\n'), ((1001, 1086), 'arcsim.init_physics', 'arcsim.init_physics', (["(out_path + '/conf.json')", "(out_path + '/out%d' % epoch)", '(False)'], {}), "(out_path + '/conf.json', out_path + '/out%d' % epoch, False\n )\n", (1020, 1086), False, 'import arcsim\n'), ((1093, 1163), 'arcsim.init_physics', 'arcsim.init_physics', (["(out_path + '/conf.json')", "(out_path + '/out')", '(False)'], {}), "(out_path + '/conf.json', out_path + '/out', False)\n", (1112, 1163), False, 'import arcsim\n'), ((1678, 1692), 'numpy.vstack', 'np.vstack', (['ref'], {}), '(ref)\n', (1687, 1692), True, 'import numpy as np\n'), ((1744, 1768), 'torch.norm', 'torch.norm', (['param_g'], {'p': '(2)'}), '(param_g, p=2)\n', (1754, 1768), False, 'import torch\n'), ((2346, 2363), 'arcsim.sim_step', 'arcsim.sim_step', ([], {}), '()\n', (2361, 2363), False, 'import arcsim\n'), ((2569, 2580), 'time.time', 'time.time', ([], {}), '()\n', (2578, 2580), False, 'import time\n'), ((2634, 2645), 'time.time', 'time.time', ([], {}), '()\n', (2643, 2645), False, 'import time\n'), ((2716, 2727), 'time.time', 'time.time', ([], {}), '()\n', (2725, 2727), False, 'import time\n'), ((1903, 1953), 'torch.norm', 'torch.norm', (['(ref[i] - sim.cloths[0].mesh.nodes[i].x)'], {}), '(ref[i] - sim.cloths[0].mesh.nodes[i].x)\n', (1913, 1953), False, 'import torch\n')]
|
import salome
import SMESH
from salome.geom import geomBuilder
from salome.smesh import smeshBuilder
import sys
import math
import numpy as np
from numpy.linalg import norm
from numpy.random import uniform
from pathlib import Path
from auxiliaryFunctions import clusteringAlgorithm
from auxiliaryFunctions import getTranslationalRiskAngleRefAxis
from itertools import product
import os
salome.salome_init()
geompy = geomBuilder.New()
smesh = smeshBuilder.New()
def smallestLineOnFace(face):
bndVertices_Slm = geompy.ExtractShapes(face, geompy.ShapeType["VERTEX"], True)
indexList = [(0,1), (0,2), (0,3)]
distances = [geompy.MinDistance(bndVertices_Slm[i], bndVertices_Slm[j]) for i,j in indexList]
index = distances.index(min(distances))
p1 = bndVertices_Slm[indexList[index][0]]
p2 = bndVertices_Slm[indexList[index][1]]
line = geompy.MakeLineTwoPnt(p1,p2)
return line
class Line:
def __init__(self, Point1, Point2):
self.origin = Point1
self.dest = Point2
v1 = geompy.MakeVertex(*list(Point1))
v2 = geompy.MakeVertex(*list(Point2))
self.geom = geompy.MakeLineTwoPnt(v1, v2)
def addToStudy(self, name = 'Line'):
geompy.addToStudy(self.geom, name)
def extendLine(self, multiplier):
self.geom = geompy.ExtendEdge(self.geom, 0, multiplier)
# Obtain the Salome vertexes: New Entity-Explode-SubShapes Selection
[v1, v2] = geompy.ExtractShapes(self.geom, geompy.ShapeType["VERTEX"], True)
v1coords = geompy.PointCoordinates(v1)
v2coords = geompy.PointCoordinates(v2)
self.dest = v2coords if np.allclose(v1coords, self.origin) else v1coords
def translateOrigin(self, newOrigin):
'''
Dada una linea definida por su origen y destino, la traslada paralelamente
a un nuevo origen
'''
vector = np.array(newOrigin) - np.array(self.origin)
point1 = newOrigin
point2 = np.array(self.dest) + vector
translatedLine = Line(point1, point2) # objeto linea en la nueva posicion
return translatedLine
def translateLineToCoords(self, coordsList):
translatedLines = [self.translateOrigin(coords) for coords in coordsList]
return translatedLines
def intersectsWith(self, femesh):
size = femesh.getSize()
center = femesh.getCenter()
multiplier = norm(center - self.origin) + size
self.extendLine(multiplier)
elementSize = femesh.getMinElementSize()
#tolerance = np.sqrt(2)*elementSize*1.1 # diagonal*factor
tolerance = elementSize*0.1 # diagonal*factor
smeshType = SMESH.FACE
smeshMethod = SMESH.FT_LyingOnGeom
aCriteria = [smesh.GetCriterion(smeshType, smeshMethod,
SMESH.FT_Undefined, self.geom,
SMESH.FT_Undefined, SMESH.FT_LogicalOR,
tolerance),]
aFilter = smesh.GetFilterFromCriteria(aCriteria)
aFilter.SetMesh(femesh.mesh.GetMesh())
holesMesh = femesh.mesh.GroupOnFilter(smeshType, 'tangentGroup', aFilter)
return not holesMesh.IsEmpty()
def getAngleWithVector(self, vector):
lineVector = np.array(self.dest - self.origin)
angle = np.degrees(np.arccos(lineVector.dot(vector)/(norm(lineVector)*norm(vector))))
return angle
class ScatteringPrism:
def __init__(self, prismParameters, translationalRiskAngle):
self.origin = prismParameters['origin']
self.fwdAngle = prismParameters['fwdAngle']
self.aftAngle = prismParameters['aftAngle']
self.fwdAngleR = np.radians(self.fwdAngle)
self.aftAngleR = np.radians(self.aftAngle)
self.orientation3D = prismParameters['orientation3D']
self.rotatingPlane = prismParameters['rotatingPlane']
assert (self.rotatingPlane in ('XY', 'XZ', 'YZ')), 'rotatingPlane must be XY, XZ or YZ'
self.cobM = self._COBMatrix()
# local axis z is always the reference axis for the translational risk angle
zlocal = self.cobM[:,2]
# Check if local axis z has the same sign compared to refAxisCode
refAxisCode = getTranslationalRiskAngleRefAxis(self.orientation3D, self.rotatingPlane)
if refAxisCode == 'X':
axis = np.array([1.0, 0.0, 0.0])
elif refAxisCode == 'Y':
axis = np.array([0.0, 1.0, 0.0])
elif refAxisCode == 'Z':
axis = np.array([0.0, 0.0, 1.0])
# different sign implies 180 degrees
self.translationalRiskAngle_lb = translationalRiskAngle[0]
self.translationalRiskAngle_ub = translationalRiskAngle[1]
#print('axis=',axis)
#print('zlocal=',zlocal)
if axis.dot(zlocal) < 0:
self.translationalRiskAngle_lb *= -1
self.translationalRiskAngle_ub *= -1
def generateDebris(self, nImpactLines, shapes, velocities):
'''
Dado un prisma definido por unos angulos y orientado de una determinada forma
Dado el numero posible de impactos que se pueden producir
Genera las lineas de impacto dentro del prisma
Genera geometria del debris con sus propiedades de forma, velocidad y su linea asociada
'''
assert nImpactLines == len(shapes) == len(velocities), 'arrays lenght must be equal to nImpactLines'
lines = self.getRandomLines(nImpactLines)
debrisList = [Debris(line, shape, velocity) for line, shape, velocity in zip(lines, shapes, velocities)]
return debrisList
def getRandomLines(self, numLines):
betas = uniform(self.aftAngle, self.fwdAngle, numLines)
thetas = uniform(self.translationalRiskAngle_lb, self.translationalRiskAngle_ub, numLines)
lines = [self._getLineInsidePrism(beta, theta) for beta, theta in zip(betas, thetas)]
return lines
def _getLineInsidePrism(self, beta, theta):
# beta, theta in degrees
pointPrism = self._getPointInsidePrism_global(beta, theta)
line = Line(self.origin, pointPrism)
return line
def _getPointInsidePrism_global(self, beta, theta):
'''
globalVector es el vector interior al prisma en coordenadas globales
libre y necesito las coordenadas del punto final del vector respecto al
eje global.
PTO (vertice del prisma en stma global) +
VECTOR interior al prisma en coordenadas globales =
PTO interior al prisma en stma global
Parameters:
beta: angle inside spread risk angle (degrees)
theta: angle inside translational risk angle (degrees)
'''
localVector = self._getPointInsidePrism_local(beta, theta)
globalVector = self.cobM.dot(localVector)
pointInsidePrismGlobal = self.origin + globalVector
return pointInsidePrismGlobal
def _getPointInsidePrism_local(self, beta, theta):
betaR = np.radians(beta)
thetaR = np.radians(theta)
h = 1
x = h
y = h*np.tan(betaR)
z = h*np.tan(thetaR)
if abs(theta) > 90: # Change sign of x and z in 2nd and 3rd quadrant
x*= -1
z*= -1
return [x, y, z]
def _COBMatrix(self):
x = self.orientation3D
if self.rotatingPlane == 'XY':
y = [0.0, 0.0, 1.0]
elif self.rotatingPlane == 'XZ':
y = [0.0, 1.0, 0.0]
elif self.rotatingPlane == 'YZ':
y = [1.0, 0.0, 0.0]
z = np.cross(x, y)
x, y, z = [v/norm(v) for v in (x, y, z)]
cobM = np.column_stack([x, y, z])
return cobM
class Debris:
'''
Define a piece of debris with shape and velocity properties and the
associated line inside the cone
'''
def __init__(self, line, shape, velocity):
self.line = line
self.shape = shape
self.velocity = velocity
self.origin = line.origin
self._getDebrisGeom()
def _getDebrisGeom(self):
angleRoll = float(uniform(0.0, 180.0, 1))
anglePitch = float(uniform(-45.0, 45.0, 1))
debris0 = geompy.MakeFaceObjHW(self.line.geom, self.shape['a'], self.shape['b'])
debris1 = geompy.Rotate(debris0, self.line.geom, angleRoll*np.pi/180.0, theCopy=True)
line = smallestLineOnFace(debris1)
middlePoints_Slm = geompy.MakeVertexOnCurve(line, 0.5, True)
origin_Slm = geompy.MakeVertex(*self.origin)
axis = geompy.MakeTranslationTwoPoints(line,middlePoints_Slm, origin_Slm)
debris2 = geompy.Rotate(debris1, axis, anglePitch*np.pi/180.0, theCopy=True)
#geompy.addToStudy(debris0, 'debris0')
#geompy.addToStudy(debris1, 'debris1')
geompy.addToStudy(debris2, 'debris2')
self.geom = debris2
def generateDebrisMesh(self, elementSize):
self.mesh = smesh.Mesh(self.geom)
Regular_1D = self.mesh.Segment()
size = Regular_1D.LocalLength(elementSize, None, 1e-07)
Quadrangle_2D = self.mesh.Quadrangle(algo=smeshBuilder.QUADRANGLE)
isDone = self.mesh.Compute()
def getNodeCoordinates(self):
nodesId = self.mesh.GetNodesId()
debrisNodesCoords = [self.mesh.GetNodeXYZ(id) for id in nodesId]
return debrisNodesCoords
class FEMesh:
def __init__(self, NameFile):
self.NameFile = NameFile
medFile = NameFile + '.med'
path = Path.cwd() / medFile
assert path.exists(), '%s does not exists' % str(path)
([self.mesh], status) = smesh.CreateMeshesFromMED(str(path))
assert status == SMESH.DRS_OK, 'Invalid Mesh'
def getnElements(self):
return self.mesh.NbElements()
def getElementsId(self):
return self.mesh.GetElementsId()
def getElementsCoG(self, elements):
elementsCoG = [self.mesh.BaryCenter(element) for element in elements]
return np.array(elementsCoG)
def _getBoundingBox(self):
box = np.array(self.mesh.BoundingBox())
minvalues = box[:3] # hasta el 3
maxvalues = box[3:] # del 3 hacia delante no incluido
return minvalues, maxvalues
def getSize(self):
minvalues, maxvalues = self._getBoundingBox()
size = norm(maxvalues - minvalues)
return size
def getCenter(self):
minvalues, maxvalues = self._getBoundingBox()
center = (maxvalues + minvalues)/2
return center
def getTranslationalRiskAngle(self, origin, orientation3D, rotatingPlane):
boundVertices = self._getBoundVertices(origin, orientation3D, rotatingPlane)
origin = np.array(origin)
p0 = np.array(boundVertices['bnd_1_near'])
p1 = np.array(boundVertices['bnd_1_far'])
tangentLine_1, tangent_point_1 = self._getTangentToMesh(origin,p0,p1)
angle_1 = tangentLine_1.getAngleWithVector(orientation3D)
p0 = np.array(boundVertices['bnd_2_near'])
p1 = np.array(boundVertices['bnd_2_far'])
tangentLine_2, tangent_point_2 = self._getTangentToMesh(origin,p0,p1)
angle_2 = tangentLine_2.getAngleWithVector(orientation3D)
tangentLine_1.addToStudy('tangentLine_1')
tangentLine_2.addToStudy('tangentLine_2')
tangent_point_1 = np.array(tangent_point_1)
tangent_point_2 = np.array(tangent_point_2)
refAxisCode = getTranslationalRiskAngleRefAxis(orientation3D, rotatingPlane)
axisDict = {'X': 0, 'Y': 1, 'Z': 2}
comp = axisDict[refAxisCode]
if tangent_point_1[comp] < origin[comp]: angle_1 = - angle_1
if tangent_point_2[comp] < origin[comp]: angle_2 = - angle_2
return angle_1, angle_2
def _getBoundVertices(self, origin, orientation3D, rotatingPlane):
if rotatingPlane == 'XY':
nVRotatingPlane = [0.0, 0.0, 1.0]
elif rotatingPlane == 'XZ':
nVRotatingPlane = [0.0, 1.0, 0.0]
elif rotatingPlane == 'YZ':
nVRotatingPlane = [1.0, 0.0, 0.0]
nVRotatingPlane_Slm = geompy.MakeVectorDXDYDZ(*nVRotatingPlane)
# normal vector to bound faces of translational risk angle
nVBoundFaces = np.cross(orientation3D, nVRotatingPlane)
nVBoundFaces_Slm = geompy.MakeVectorDXDYDZ(*nVBoundFaces)
#minimum and maximum values of the bounding box
minvalues, maxvalues = self._getBoundingBox()
vertex_1_Slm = geompy.MakeVertex(*minvalues) # each component to each argument
vertex_2_Slm = geompy.MakeVertex(*maxvalues)
# planes that contain bound faces
bndPlane_1_Slm = geompy.MakePlane(vertex_1_Slm, nVBoundFaces_Slm, 2*self.getSize())
bndPlane_2_Slm = geompy.MakePlane(vertex_2_Slm, nVBoundFaces_Slm, 2*self.getSize())
box = geompy.MakeBoxTwoPnt(vertex_1_Slm, vertex_2_Slm)
intersection1 = geompy.MakeSection(box, bndPlane_1_Slm, True) # box planar section
intersection2 = geompy.MakeSection(box, bndPlane_2_Slm, True) # box planar section
origin_Slm = geompy.MakeVertex(*origin)
planeInOrientation3D_Slm = geompy.MakePlane(origin_Slm, nVRotatingPlane_Slm, 4*self.getSize())
bndLine_1_Slm = geompy.MakeSection(intersection1, planeInOrientation3D_Slm, True) # box planar section
bndLine_2_Slm = geompy.MakeSection(intersection2, planeInOrientation3D_Slm, True) # box planar section
bndVertices_1_Slm = geompy.ExtractShapes(bndLine_1_Slm, geompy.ShapeType["VERTEX"], True)
bndVertices_2_Slm = geompy.ExtractShapes(bndLine_2_Slm, geompy.ShapeType["VERTEX"], True)
bndVertices_1 = [geompy.PointCoordinates(v) for v in bndVertices_1_Slm]
bndVertices_2 = [geompy.PointCoordinates(v) for v in bndVertices_2_Slm]
def distToorigin(coords):
dist = norm(np.array(coords) - np.array(origin))
return dist
bndVertices_1.sort(key=distToorigin)
bndVertices_2.sort(key=distToorigin)
bndVertices = {'bnd_1_near': bndVertices_1[0],
'bnd_1_far' : bndVertices_1[1],
'bnd_2_near': bndVertices_2[0],
'bnd_2_far' : bndVertices_2[1]
}
return bndVertices
def _getTangentToMesh(self, origin, lb, ub):
dist = 1.0
tol = 0.01
while dist > tol:
line_lb = Line(origin, lb)
intersects_lb = line_lb.intersectsWith(self)
line_ub = Line(origin, ub)
intersects_ub = line_ub.intersectsWith(self)
new_point = (lb+ub)/2
line = Line(origin, new_point)
intersects_new_point = line.intersectsWith(self)
if intersects_new_point & intersects_lb:
lb = new_point
elif intersects_new_point & intersects_ub:
ub = new_point
elif (not intersects_new_point) & intersects_lb:
ub = new_point
elif (not intersects_new_point) & intersects_ub:
lb = new_point
dist = norm(ub - lb)
line = Line(origin, new_point)
return line, new_point
def getMinElementSize(self):
minArea = self.mesh.GetMinMax(SMESH.FT_Area)[0]
minSize = np.sqrt(4*minArea/np.pi)
return minSize
def getAdjacentElementMesh(self, elementId, coplanarAngle=5):
aCriteria = smesh.GetCriterion(SMESH.FACE, SMESH.FT_CoplanarFaces,
SMESH.FT_Undefined, elementId,
SMESH.FT_Undefined, SMESH.FT_Undefined,
coplanarAngle)
aFilter = smesh.GetFilterFromCriteria([aCriteria])
aFilter.SetMesh(self.mesh.GetMesh())
sub_hole = self.mesh.GroupOnFilter(SMESH.FACE, 'Hole', aFilter)
sub_mesh = smesh.CopyMesh(sub_hole, 'meshHole', 0, 1)
return sub_mesh
def getHoleMeshFromIds(self, ids):
ids = str(ids).strip('[]') # '3,4,5' Remove characters
sub_mesh2D = self.getMeshFromRangeOfIds(ids, 2)
sub_mesh1D = self.getMeshFromRangeOfIds(ids, 1)
sub_mesh = self.getHoleMeshKeepingOriginalIds(sub_mesh2D, sub_mesh1D, 'meshHole')
hole = Hole(sub_mesh) # instancia de Hole que tiene esa malla asociada
return hole
def getMeshFromRangeOfIds(self, ids, dim):
assert dim in (1,2), 'dim must be 1 or 2'
smeshType = SMESH.FACE if dim == 2 else SMESH.EDGE
aCriteria = smesh.GetCriterion(smeshType,SMESH.FT_RangeOfIds,
SMESH.FT_Undefined,ids)
aFilter = smesh.GetFilterFromCriteria([aCriteria])
aFilter.SetMesh(self.mesh.GetMesh())
sub_hole = self.mesh.GroupOnFilter(smeshType, 'Hole%iD' %dim, aFilter)
sub_mesh = smesh.CopyMesh(sub_hole, 'meshHole%iD' %dim, 0, 1)
return sub_mesh
def getMeshFromGroupOfLines(self, debrisLines, dim, tolerance):
assert dim in (1,2), 'dim must be 1 or 2'
smeshType = SMESH.FACE if dim == 2 else SMESH.EDGE
assert hasattr(self, 'selectionMethod'), 'FEMesh needs attribute selectionMethod defined to use getMeshFromGroupOfLines'
smeshMethod = SMESH.FT_LyingOnGeom if self.selectionMethod == 'OneNode' else SMESH.FT_BelongToGeom
aCriteria = [smesh.GetCriterion(smeshType, smeshMethod,
SMESH.FT_Undefined, line.geom,
SMESH.FT_Undefined, SMESH.FT_LogicalOR,
tolerance) for line in debrisLines]
aFilter = smesh.GetFilterFromCriteria(aCriteria)
aFilter.SetMesh(self.fuselage.mesh.GetMesh())
holesMesh = self.fuselage.mesh.GroupOnFilter(smeshType, 'groupedHolesFromDebris%iD_%s' %(dim, HolesFromDebris.Id), aFilter)
mesh = smesh.CopyMesh(holesMesh, 'meshHolesFromDebris%iD_%s' %(dim, HolesFromDebris.Id), 0, 1) # malla que tiene info de todos los elementos con los que intersecan las lineas del debris
return mesh
def getHoleMeshKeepingOriginalIds(self, sub_mesh2D, sub_mesh1D, meshName):
ids2D = sub_mesh2D.GetElementsId()
ids1D = sub_mesh1D.GetElementsId()
idsAll = self.fuselage.getElementsId()
idsToRemove = [i for i in idsAll if i not in ids2D+ids1D]
sub_mesh = smesh.CopyMesh(self.fuselage.mesh, meshName)
sub_mesh.RemoveElements(idsToRemove)
return sub_mesh
def generateDamagedConfig(self, debrisList, damageCriteria, selectionMethod='AllNodes'):
'''
Dada una lista de objetos debris
Crea grupos de elementos a eliminar: Interseca las lineas de impacto
con la malla, dada la shape del debris
Elmina esos grupos de elementos de la malla
'''
assert selectionMethod in ('AllNodes', 'OneNode'), 'Selection Method must be AllNodes or OneNode'
size = self.getSize()
center = self.getCenter()
multiplier = [norm(center - debris.origin) + size for debris in debrisList]
for mult, debris in zip(multiplier, debrisList):
debris.line.extendLine(mult)
#debris.line.addToStudy()
damagedConfiguration = DamagedConfig(self, debrisList, damageCriteria, selectionMethod) # self es el objeto fuselage y self.mesh la malla de salome de ese objeto
return damagedConfiguration
def exportMesh(self, name='damagedConfig.med'):
try:
path = Path.cwd() / name
self.mesh.ExportMED(str(path), auto_groups=0, minor=40, overwrite=1, meshPart=None, autoDimension=1)
pass
except:
print('ExportMED() failed. Invalid file name?')
def addToStudy(self, name='fuselage'):
smesh.SetName(self.mesh.GetMesh(), name)
class Hole(FEMesh):
def __init__(self, mesh):
self.mesh = mesh
class HolesFromDebris(FEMesh):
# Class variable to use as counter
Id = 0
def __init__(self, fuselage, debris, damageCriteria, selectionMethod='AllNodes'):
self.fuselage = fuselage
self.debris = debris
self.damageCriteria = damageCriteria
self.selectionMethod = selectionMethod
self.isempty = False
# Reference the class variable
HolesFromDebris.Id += 1
self.groupedHoles = [] # va a ser una lista de objetos hole con la malla de cada agujero asociada. Hay que usar una recursion, y tengo que acumular sobre este vector
self._getGroupedHoles()
# damageCriteria es una instancia de la clase DamageCriteria con info de las curvas de velocidad y threshold. Tiene un metodo para aplicar dicho criterio
self.damagedHoles = self.damageCriteria.apply(self.groupedHoles, self.debris.velocity) # compobamos para cada hole de la lista si se atraviesa el fuselaje. Si atraviesa, se almacena en la lista damagedHoles
def _getMeshFromDebris(self):
elementSize = self.fuselage.getMinElementSize()
tolerance = np.sqrt(2)*elementSize*1.1 # diagonal*factor
self.debris.generateDebrisMesh(elementSize)
debrisNodesCoords = self.debris.getNodeCoordinates() # list with coordinates
debrisLines = self.debris.line.translateLineToCoords(debrisNodesCoords) # general function. list with line objects
#for line in debrisLines: line.addToStudy('ExtendedLine-%s' % HolesFromDebris.Id)
mesh2D = self.getMeshFromGroupOfLines(debrisLines, 2, tolerance)
mesh1D = self.getMeshFromGroupOfLines(debrisLines, 1, tolerance)
meshName = 'meshHolesFromDebris_%s' % HolesFromDebris.Id
self.mesh = self.getHoleMeshKeepingOriginalIds(mesh2D, mesh1D, meshName)
def _separateHolesOnImpacts(self, elements): # primera vez, al hacer self.getElementsId() obtengo los ids de la malla asociada a todos los agujeros procedentes de un mismo debris
if elements == []:
self.isempty = True # if elements is empty, there is no intersection or hole
else:
elementsCoG = self.getElementsCoG(elements)
clusteredElements = clusteringAlgorithm(elements, elementsCoG)
print('clusteredElements lenght',len(clusteredElements))
self.groupedHoles = [self.getHoleMeshFromIds(cluster) for cluster in clusteredElements] # list of hole objects
def _sortGroupedHoles(self):
def distanceToOrigin(hole): # defino una funcion dentro del metodo
return norm(hole.getCenter()-self.debris.origin)
self.groupedHoles.sort(key=distanceToOrigin) # ordena los elementos de la lista groupedHoles segun la funcion distanceToOrigin
def _getGroupedHoles(self):
self._getMeshFromDebris()
elements = self.getElementsId()
self._separateHolesOnImpacts(elements)
if self.groupedHoles == []:
# If groupedHoles is empty add an empty mesh
self.groupedHoles.append(Hole(self.mesh)) # son instancias de Hole que tienen esa malla vacia asociada
self._sortGroupedHoles()
class DamageCriteria:
def __init__(self, vThreshold, ftransfer):
self.vThreshold = vThreshold
self.ftransfer = ftransfer
def apply(self, groupedHoles, velocity):
damagedHoles = []
for hole in groupedHoles:
if velocity > self.vThreshold:
damagedHoles.append(hole)
velocity = self.ftransfer(velocity)
return damagedHoles
class DamagedConfig(FEMesh):
def __init__(self, fuselage, debrisList, damageCriteria, selectionMethod='AllNodes'):
self.fuselage = fuselage # objeto FEMesh
self.debrisList = debrisList
self.damageCriteria = damageCriteria
self.selectionMethod = selectionMethod
self._intersectLinesAndCutHoles()
def _intersectLinesAndCutHoles(self):
'''
Aplica la clase HolesFromDebris, donde para una debris dado, agrupa los elementos asciados a un mismo agujero
groupedHoles es una lista de instancias de Hole, donde cada objeto tiene info de la malla de cada agujero
para cada linea tengo un groupedHoles
'''
self.holesFromDebrisList = [HolesFromDebris(self.fuselage, debris, self.damageCriteria, self.selectionMethod)
for debris in self.debrisList]
elementsToRemove = []
for holesFromDebris in self.holesFromDebrisList:
for hole in holesFromDebris.damagedHoles:
elementsToRemove += hole.getElementsId()
np.savetxt('medIds.txt', elementsToRemove, fmt='%d')
self.mesh = smesh.CopyMesh(self.fuselage.mesh, 'DamagedMesh')
self.mesh.RemoveElements(elementsToRemove)
def addToStudy(self):
super().addToStudy(name='damagedMesh') # call the parent method
for debris in self.debrisList: debris.line.addToStudy() # los elementos de esta lista ya son objetos de tipo Line
|
[
"numpy.radians",
"salome.salome_init",
"numpy.allclose",
"numpy.sqrt",
"numpy.cross",
"numpy.tan",
"salome.geom.geomBuilder.New",
"pathlib.Path.cwd",
"numpy.linalg.norm",
"numpy.column_stack",
"numpy.array",
"numpy.savetxt",
"numpy.random.uniform",
"auxiliaryFunctions.clusteringAlgorithm",
"auxiliaryFunctions.getTranslationalRiskAngleRefAxis",
"salome.smesh.smeshBuilder.New"
] |
[((392, 412), 'salome.salome_init', 'salome.salome_init', ([], {}), '()\n', (410, 412), False, 'import salome\n'), ((422, 439), 'salome.geom.geomBuilder.New', 'geomBuilder.New', ([], {}), '()\n', (437, 439), False, 'from salome.geom import geomBuilder\n'), ((448, 466), 'salome.smesh.smeshBuilder.New', 'smeshBuilder.New', ([], {}), '()\n', (464, 466), False, 'from salome.smesh import smeshBuilder\n'), ((3289, 3322), 'numpy.array', 'np.array', (['(self.dest - self.origin)'], {}), '(self.dest - self.origin)\n', (3297, 3322), True, 'import numpy as np\n'), ((3709, 3734), 'numpy.radians', 'np.radians', (['self.fwdAngle'], {}), '(self.fwdAngle)\n', (3719, 3734), True, 'import numpy as np\n'), ((3760, 3785), 'numpy.radians', 'np.radians', (['self.aftAngle'], {}), '(self.aftAngle)\n', (3770, 3785), True, 'import numpy as np\n'), ((4265, 4337), 'auxiliaryFunctions.getTranslationalRiskAngleRefAxis', 'getTranslationalRiskAngleRefAxis', (['self.orientation3D', 'self.rotatingPlane'], {}), '(self.orientation3D, self.rotatingPlane)\n', (4297, 4337), False, 'from auxiliaryFunctions import getTranslationalRiskAngleRefAxis\n'), ((5697, 5744), 'numpy.random.uniform', 'uniform', (['self.aftAngle', 'self.fwdAngle', 'numLines'], {}), '(self.aftAngle, self.fwdAngle, numLines)\n', (5704, 5744), False, 'from numpy.random import uniform\n'), ((5762, 5847), 'numpy.random.uniform', 'uniform', (['self.translationalRiskAngle_lb', 'self.translationalRiskAngle_ub', 'numLines'], {}), '(self.translationalRiskAngle_lb, self.translationalRiskAngle_ub,\n numLines)\n', (5769, 5847), False, 'from numpy.random import uniform\n'), ((7018, 7034), 'numpy.radians', 'np.radians', (['beta'], {}), '(beta)\n', (7028, 7034), True, 'import numpy as np\n'), ((7052, 7069), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (7062, 7069), True, 'import numpy as np\n'), ((7590, 7604), 'numpy.cross', 'np.cross', (['x', 'y'], {}), '(x, y)\n', (7598, 7604), True, 'import numpy as np\n'), ((7671, 7697), 'numpy.column_stack', 'np.column_stack', (['[x, y, z]'], {}), '([x, y, z])\n', (7686, 7697), True, 'import numpy as np\n'), ((9994, 10015), 'numpy.array', 'np.array', (['elementsCoG'], {}), '(elementsCoG)\n', (10002, 10015), True, 'import numpy as np\n'), ((10333, 10360), 'numpy.linalg.norm', 'norm', (['(maxvalues - minvalues)'], {}), '(maxvalues - minvalues)\n', (10337, 10360), False, 'from numpy.linalg import norm\n'), ((10713, 10729), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (10721, 10729), True, 'import numpy as np\n'), ((10744, 10781), 'numpy.array', 'np.array', (["boundVertices['bnd_1_near']"], {}), "(boundVertices['bnd_1_near'])\n", (10752, 10781), True, 'import numpy as np\n'), ((10795, 10831), 'numpy.array', 'np.array', (["boundVertices['bnd_1_far']"], {}), "(boundVertices['bnd_1_far'])\n", (10803, 10831), True, 'import numpy as np\n'), ((10990, 11027), 'numpy.array', 'np.array', (["boundVertices['bnd_2_near']"], {}), "(boundVertices['bnd_2_near'])\n", (10998, 11027), True, 'import numpy as np\n'), ((11041, 11077), 'numpy.array', 'np.array', (["boundVertices['bnd_2_far']"], {}), "(boundVertices['bnd_2_far'])\n", (11049, 11077), True, 'import numpy as np\n'), ((11350, 11375), 'numpy.array', 'np.array', (['tangent_point_1'], {}), '(tangent_point_1)\n', (11358, 11375), True, 'import numpy as np\n'), ((11402, 11427), 'numpy.array', 'np.array', (['tangent_point_2'], {}), '(tangent_point_2)\n', (11410, 11427), True, 'import numpy as np\n'), ((11451, 11513), 'auxiliaryFunctions.getTranslationalRiskAngleRefAxis', 'getTranslationalRiskAngleRefAxis', (['orientation3D', 'rotatingPlane'], {}), '(orientation3D, rotatingPlane)\n', (11483, 11513), False, 'from auxiliaryFunctions import getTranslationalRiskAngleRefAxis\n'), ((12251, 12291), 'numpy.cross', 'np.cross', (['orientation3D', 'nVRotatingPlane'], {}), '(orientation3D, nVRotatingPlane)\n', (12259, 12291), True, 'import numpy as np\n'), ((15347, 15375), 'numpy.sqrt', 'np.sqrt', (['(4 * minArea / np.pi)'], {}), '(4 * minArea / np.pi)\n', (15354, 15375), True, 'import numpy as np\n'), ((24660, 24712), 'numpy.savetxt', 'np.savetxt', (['"""medIds.txt"""', 'elementsToRemove'], {'fmt': '"""%d"""'}), "('medIds.txt', elementsToRemove, fmt='%d')\n", (24670, 24712), True, 'import numpy as np\n'), ((1642, 1676), 'numpy.allclose', 'np.allclose', (['v1coords', 'self.origin'], {}), '(v1coords, self.origin)\n', (1653, 1676), True, 'import numpy as np\n'), ((1884, 1903), 'numpy.array', 'np.array', (['newOrigin'], {}), '(newOrigin)\n', (1892, 1903), True, 'import numpy as np\n'), ((1906, 1927), 'numpy.array', 'np.array', (['self.origin'], {}), '(self.origin)\n', (1914, 1927), True, 'import numpy as np\n'), ((1972, 1991), 'numpy.array', 'np.array', (['self.dest'], {}), '(self.dest)\n', (1980, 1991), True, 'import numpy as np\n'), ((2409, 2435), 'numpy.linalg.norm', 'norm', (['(center - self.origin)'], {}), '(center - self.origin)\n', (2413, 2435), False, 'from numpy.linalg import norm\n'), ((4389, 4414), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (4397, 4414), True, 'import numpy as np\n'), ((7114, 7127), 'numpy.tan', 'np.tan', (['betaR'], {}), '(betaR)\n', (7120, 7127), True, 'import numpy as np\n'), ((7142, 7156), 'numpy.tan', 'np.tan', (['thetaR'], {}), '(thetaR)\n', (7148, 7156), True, 'import numpy as np\n'), ((8116, 8138), 'numpy.random.uniform', 'uniform', (['(0.0)', '(180.0)', '(1)'], {}), '(0.0, 180.0, 1)\n', (8123, 8138), False, 'from numpy.random import uniform\n'), ((8167, 8190), 'numpy.random.uniform', 'uniform', (['(-45.0)', '(45.0)', '(1)'], {}), '(-45.0, 45.0, 1)\n', (8174, 8190), False, 'from numpy.random import uniform\n'), ((9510, 9520), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (9518, 9520), False, 'from pathlib import Path\n'), ((15151, 15164), 'numpy.linalg.norm', 'norm', (['(ub - lb)'], {}), '(ub - lb)\n', (15155, 15164), False, 'from numpy.linalg import norm\n'), ((22222, 22264), 'auxiliaryFunctions.clusteringAlgorithm', 'clusteringAlgorithm', (['elements', 'elementsCoG'], {}), '(elements, elementsCoG)\n', (22241, 22264), False, 'from auxiliaryFunctions import clusteringAlgorithm\n'), ((4467, 4492), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (4475, 4492), True, 'import numpy as np\n'), ((7627, 7634), 'numpy.linalg.norm', 'norm', (['v'], {}), '(v)\n', (7631, 7634), False, 'from numpy.linalg import norm\n'), ((19109, 19137), 'numpy.linalg.norm', 'norm', (['(center - debris.origin)'], {}), '(center - debris.origin)\n', (19113, 19137), False, 'from numpy.linalg import norm\n'), ((19602, 19612), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (19610, 19612), False, 'from pathlib import Path\n'), ((21125, 21135), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21132, 21135), True, 'import numpy as np\n'), ((4545, 4570), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (4553, 4570), True, 'import numpy as np\n'), ((13895, 13911), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (13903, 13911), True, 'import numpy as np\n'), ((13914, 13930), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (13922, 13930), True, 'import numpy as np\n'), ((3385, 3401), 'numpy.linalg.norm', 'norm', (['lineVector'], {}), '(lineVector)\n', (3389, 3401), False, 'from numpy.linalg import norm\n'), ((3402, 3414), 'numpy.linalg.norm', 'norm', (['vector'], {}), '(vector)\n', (3406, 3414), False, 'from numpy.linalg import norm\n')]
|
"""
.. module:: Augmentation
:platform: Unix, Windows
:synopsis: A useful module indeed.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import random
from nltk.corpus import wordnet
import collections
import math
#import nltk
#nltk.download('wordnet')
class Augmentation:
r"""
This is the class to do data augmentation.
Args:
documents (:obj:`list`, optional, defaults to None):
A list of documents.
labels (:obj:`float`, optional, defaults to None):
A list of labels.
dataset_name (:obj:`string`, optional, defaults to ''):
Name of the dataset.
path (:obj:`string`, optional, defaults to ''):
Path to save the report.
Example::
from Manteia.Statistic import Statistic
documents=['a text','text b']
labels=['a','b']
Statistic(documents,labels)
Attributes:
"""
def __init__(self,documents=[],labels=[],strategy='daia',verbose=True):
self.documents = documents
self.labels = labels
self.verbose = verbose
if verbose:
print('Augmentation %s.' % strategy)
if strategy=='eda':
self.documents_augmented,self.labels_augmented = eda(self.documents,self.labels)
if strategy=='uda':
self.documents_augmented,self.labels_augmented = eda(self.documents,self.labels)
if strategy=='pyramid':
self.documents_augmented,self.labels_augmented = pyramid(self.documents,self.labels)
def test(self):
return "Mantรฉรฏa Augmentation."
def uda(documents,labels):
documents_augmented=[]
labels_augmented=[]
data_stats=get_data_stats(documents)
token_prob=0.9
op = TfIdfWordRep(token_prob, data_stats)
for text,label in zip(documents,labels):
text_aug=op(text)
documents_augmented.append(text_aug)
labels_augmented.append(label)
return documents_augmented,labels_augmented
#https://github.com/google-research/uda/blob/master/text/augmentation/word_level_augment.py
def pyramid(documents,labels,level):
r"""
This function compute DAIA.
Args:
documents
labels
level
return
documents_augmented
labels_augmented
Example::
"""
documents_augmented=[]
labels_augmented=[]
if level < 2:level=2
if level > 5:level=5
for text,label in zip(documents,labels):
text_list,label_list=split_text(text,label,level)
documents_augmented = documents_augmented+text_list
labels_augmented = labels_augmented+label_list
return documents_augmented,labels_augmented
def get_data_stats(texts):
"""Compute the IDF score for each word. Then compute the TF-IDF score."""
word_doc_freq = collections.defaultdict(int)
# Compute IDF
for text in texts:
cur_word_dict = {}
cur_sent = text.split(' ')
for word in cur_sent:
cur_word_dict[word] = 1
for word in cur_word_dict:
word_doc_freq[word] += 1
idf = {}
for word in word_doc_freq:
idf[word] = math.log(len(texts) * 1. / word_doc_freq[word])
# Compute TF-IDF
tf_idf = {}
for text in texts:
cur_word_dict = {}
cur_sent = text.split(' ')
for word in cur_sent:
if word not in tf_idf:
tf_idf[word] = 0
tf_idf[word] += 1. / len(cur_sent) * idf[word]
return {
"idf": idf,
"tf_idf": tf_idf,
}
class EfficientRandomGen(object):
"""A base class that generate multiple random numbers at the same time."""
def reset_random_prob(self):
"""Generate many random numbers at the same time and cache them."""
cache_len = 100000
self.random_prob_cache = np.random.random(size=(cache_len,))
self.random_prob_ptr = cache_len - 1
def get_random_prob(self):
"""Get a random number."""
value = self.random_prob_cache[self.random_prob_ptr]
self.random_prob_ptr -= 1
if self.random_prob_ptr == -1:
self.reset_random_prob()
return value
def get_random_token(self):
"""Get a random token."""
token = self.token_list[self.token_ptr]
self.token_ptr -= 1
if self.token_ptr == -1:
self.reset_token_list()
return token
class TfIdfWordRep(EfficientRandomGen):
"""TF-IDF Based Word Replacement."""
def __init__(self, token_prob, data_stats):
super(TfIdfWordRep, self).__init__()
self.token_prob = token_prob
self.data_stats = data_stats
self.idf = data_stats["idf"]
self.tf_idf = data_stats["tf_idf"]
tf_idf_items = data_stats["tf_idf"].items()
tf_idf_items = sorted(tf_idf_items, key=lambda item: -item[1])
self.tf_idf_keys = []
self.tf_idf_values = []
for key, value in tf_idf_items:
self.tf_idf_keys += [key]
self.tf_idf_values += [value]
self.normalized_tf_idf = np.array(self.tf_idf_values)
self.normalized_tf_idf = (self.normalized_tf_idf.max()
- self.normalized_tf_idf)
self.normalized_tf_idf = (self.normalized_tf_idf
/ self.normalized_tf_idf.sum())
self.reset_token_list()
self.reset_random_prob()
def get_replace_prob(self, all_words):
"""Compute the probability of replacing tokens in a sentence."""
cur_tf_idf = collections.defaultdict(int)
for word in all_words:
cur_tf_idf[word] += 1. / len(all_words) * self.idf[word]
replace_prob = []
for word in all_words:
replace_prob += [cur_tf_idf[word]]
replace_prob = np.array(replace_prob)
replace_prob = np.max(replace_prob) - replace_prob
replace_prob = (replace_prob / replace_prob.sum() *
self.token_prob * len(all_words))
return replace_prob
def __call__(self, example):
all_words = example.split(' ')
replace_prob = self.get_replace_prob(all_words)
all_words = self.replace_tokens(
all_words,
replace_prob[:len(all_words)]
)
return " ".join(all_words)
def replace_tokens(self, word_list, replace_prob):
"""Replace tokens in a sentence."""
for i in range(len(word_list)):
if self.get_random_prob() < replace_prob[i]:
word_list[i] = self.get_random_token()
return word_list
def reset_token_list(self):
cache_len = len(self.tf_idf_keys)
token_list_idx = np.random.choice(
cache_len, (cache_len,), p=self.normalized_tf_idf)
self.token_list = []
for idx in token_list_idx:
self.token_list += [self.tf_idf_keys[idx]]
self.token_ptr = len(self.token_list) - 1
#print("sampled token list: {:s}".format(" ".join(self.token_list)))
def eda(documents,labels):
documents_augmented=[]
labels_augmented=[]
for document,label in zip(documents,labels):
text_list,label_list = eda_text(document,label)
documents_augmented = documents_augmented+text_list
labels_augmented = labels_augmented+label_list
return documents_augmented,labels_augmented
def eda_text(text,label):
text_list,label_list=[],[]
#pour decoupage en word
word_list_1=text.split(' ')
#inversion de deux mot
idx_1 = random.randint(0,len(word_list_1)-1)
idx_2 = random.randint(0,len(word_list_1)-1)
word_list_1[idx_1],word_list_1[idx_2] = word_list_1[idx_2],word_list_1[idx_1]
text_list = [' '.join(word_list_1)]
label_list= [label]
#suppression d'un mot mot
word_list_2=text.split(' ')
idx_3 = random.randint(0,len(word_list_2)-1)
del word_list_2[idx_1]
text_list.append(' '.join(word_list_2))
label_list.append(label)
#Synonym Replacement
word_list_3=text.split(' ')
idx_4 = random.randint(0,len(word_list_3)-1)
if len(wordnet.synsets(word_list_3[idx_4]))>0:
idx_synonym=random.randint(0,len(wordnet.synsets(word_list_3[idx_4]))-1)
synonym = wordnet.synsets(word_list_3[idx_4])[idx_synonym].lemma_names()[0]
if synonym!=word_list_3[idx_4]:
word_list_3[idx_4]=synonym
text_list.append(' '.join(word_list_2))
label_list.append(label)
#Random Insertion (RI)
word_list_4=text.split(' ')
idx_5 = random.randint(0,len(word_list_4)-1)
idx_6 = random.randint(0,len(word_list_4)-1)
if len(wordnet.synsets(word_list_4[idx_5]))>0:
idx_synonym=random.randint(0,len(wordnet.synsets(word_list_4[idx_5]))-1)
synonym = wordnet.synsets(word_list_4[idx_5])[idx_synonym].lemma_names()[0]
if synonym!=word_list_4[idx_5]:
word_list_4.insert(idx_6, synonym)
text_list.append(' '.join(word_list_2))
label_list.append(label)
return text_list,label_list
def split_text(text,label,level=3):
text_list,label_list=[],[]
decoup_1a = int(0.05*len(text))
decoup_1b = int(0.95*len(text))
decoup_2 = int(len(text)/2)
decoup_3 = int(len(text)/3)
decoup_4 = int(len(text)/4)
decoup_5 = int(len(text)/5)
if level >=1 :
text_list = text_list+[text[decoup_1a:decoup_1b]]
label_list = label_list+[label]
if level >=2 :
text_list = text_list+[text[:decoup_2],text[decoup_2:]]
label_list = label_list+[label,label]
if level >=3 :
text_list = text_list+[text[:decoup_3],text[decoup_3:2*decoup_3],text[2*decoup_3:]]
label_list = label_list+[label,label,label]
if level >=4 :
text_list = text_list+[text[:decoup_4],text[decoup_4:2*decoup_4],text[2*decoup_4:3*decoup_4],text[3*decoup_4:]]
label_list = label_list+[label,label,label,label]
if level >=5 :
text_list = text_list+[text[:decoup_5],text[decoup_5:2*decoup_5],text[2*decoup_5:3*decoup_5],text[3*decoup_5:4*decoup_5],text[4*decoup_5:]]
label_list = label_list+[label,label,label,label,label]
return text_list,label_list
|
[
"numpy.random.random",
"numpy.random.choice",
"numpy.max",
"numpy.array",
"collections.defaultdict",
"nltk.corpus.wordnet.synsets"
] |
[((2556, 2584), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (2579, 2584), False, 'import collections\n'), ((3466, 3501), 'numpy.random.random', 'np.random.random', ([], {'size': '(cache_len,)'}), '(size=(cache_len,))\n', (3482, 3501), True, 'import numpy as np\n'), ((4591, 4619), 'numpy.array', 'np.array', (['self.tf_idf_values'], {}), '(self.tf_idf_values)\n', (4599, 4619), True, 'import numpy as np\n'), ((5035, 5063), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (5058, 5063), False, 'import collections\n'), ((5263, 5285), 'numpy.array', 'np.array', (['replace_prob'], {}), '(replace_prob)\n', (5271, 5285), True, 'import numpy as np\n'), ((6071, 6138), 'numpy.random.choice', 'np.random.choice', (['cache_len', '(cache_len,)'], {'p': 'self.normalized_tf_idf'}), '(cache_len, (cache_len,), p=self.normalized_tf_idf)\n', (6087, 6138), True, 'import numpy as np\n'), ((5305, 5325), 'numpy.max', 'np.max', (['replace_prob'], {}), '(replace_prob)\n', (5311, 5325), True, 'import numpy as np\n'), ((7362, 7397), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_3[idx_4]'], {}), '(word_list_3[idx_4])\n', (7377, 7397), False, 'from nltk.corpus import wordnet\n'), ((7851, 7886), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_4[idx_5]'], {}), '(word_list_4[idx_5])\n', (7866, 7886), False, 'from nltk.corpus import wordnet\n'), ((7438, 7473), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_3[idx_4]'], {}), '(word_list_3[idx_4])\n', (7453, 7473), False, 'from nltk.corpus import wordnet\n'), ((7927, 7962), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_4[idx_5]'], {}), '(word_list_4[idx_5])\n', (7942, 7962), False, 'from nltk.corpus import wordnet\n'), ((7491, 7526), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_3[idx_4]'], {}), '(word_list_3[idx_4])\n', (7506, 7526), False, 'from nltk.corpus import wordnet\n'), ((7980, 8015), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', (['word_list_4[idx_5]'], {}), '(word_list_4[idx_5])\n', (7995, 8015), False, 'from nltk.corpus import wordnet\n')]
|
"""
THIS CODE IS UNDER THE BSD 2-Clause LICENSE. YOU CAN FIND THE COMPLETE
FILE AT THE SOURCE DIRECTORY.
Copyright (C) 2017 <NAME> - All rights reserved
@author : <EMAIL>
Publication:
A Novel Unsupervised Analysis of Electrophysiological
Signals Reveals New Sleep Sub-stages in Mice
*****************************************************************************
Class implementing the mean-covariance Restricted Boltzmann Machine (mcRBM)
by <NAME>.
It is based on the original code with minor modifications according to
the needs of our experiments.
Refer to:
"<NAME>, <NAME>, "Modeling Pixel Means and Covariances Using
Factorized Third-Order Boltzmann Machines", CVPR 2010"
You can find the original code at
http://www.cs.toronto.edu/~ranzato/publications/mcRBM/code/mcRBM_04May2010.zip
COPYRIGHT of the original code has been included in the currect directory.
<vkatsageorgiou@vassia-PC>
"""
import sys
import numpy as np
import os
import cudamat as cmt
import _pickle as cPickle
import matplotlib.pyplot as plt
import shutil
from numpy.random import RandomState
from scipy.io import loadmat, savemat
from configparser import *
from datetime import datetime
import sys
sys.path.insert(0, '../dataPreprocessing/')
from dataPreproc import DataPreproc
class mcRBM:
def __init__(self, refDir, expConfigFilename, modelConfigFilename, gpuId):
# directory containing all the configuration files for the experiment
self.refDir = refDir
# file with configuration details for the launched experiment
self.expConfigFilename = refDir + '/' + expConfigFilename
# file with configuration details for the model to be trained
self.modelConfigFilename = refDir + '/' + modelConfigFilename
# data pre-processing object
self.dpp = DataPreproc()
# loading details from configuration files
self.loadExpConfig()
self.loadModelConfig()
# id of the GPU which will be used for computation
self.gpuId = int(gpuId)
def loadExpConfig(self):
'''
Function loading the configuration details for the experiment &
data pre-pocessing flags
'''
config = ConfigParser()
config.read(self.expConfigFilename)
self.npRandSeed = config.getint('PARAMETERS','npRandSeed')
self.npRandState = config.getint('PARAMETERS','npRandState')
self.dataDir = config.get('EXP_DETAILS','dsetDir')
self.expsDir = config.get('EXP_DETAILS','expsDir')
self.expName = config.get('EXP_DETAILS','expID')
self.dSetName = config.get('EXP_DETAILS','dSetName')
self.logFlag = config.getboolean('EXP_DETAILS','logFlag')
self.meanSubtructionFlag = config.getboolean('EXP_DETAILS','meanSubtructionFlag')
self.scaleFlag = config.getboolean('EXP_DETAILS','scaleFlag')
self.scaling = config.get('EXP_DETAILS','scaling')
self.doPCA = config.getboolean('EXP_DETAILS','doPCA')
self.whitenFlag = config.getboolean('EXP_DETAILS','whitenFlag')
self.rescaleFlag = config.getboolean('EXP_DETAILS','rescaleFlag')
self.rescaling = config.get('EXP_DETAILS','rescaling')
self.dataFilename = self.dataDir + self.dSetName
self.saveDir = self.expsDir + self.expName
if not os.path.exists(self.saveDir):
os.makedirs(self.saveDir)
#shutil.copy2(self.expConfigFilename, self.saveDir)
#shutil.copy2(self.modelConfigFilename, self.saveDir)
def loadModelConfig(self):
'''
Function loading the configuration details for the model to be trained
'''
config = ConfigParser()
config.read(self.modelConfigFilename)
self.verbose = config.getint('VERBOSITY','verbose')
self.num_epochs = config.getint('MAIN_PARAMETER_SETTING','num_epochs')
self.batch_size = config.getint('MAIN_PARAMETER_SETTING','batch_size')
self.startFH = config.getint('MAIN_PARAMETER_SETTING','startFH')
self.startwd = config.getint('MAIN_PARAMETER_SETTING','startwd')
self.doPCD = config.getint('MAIN_PARAMETER_SETTING','doPCD')
# model parameters
self.num_fac = config.getint('MODEL_PARAMETER_SETTING','num_fac')
self.num_hid_cov = config.getint('MODEL_PARAMETER_SETTING','num_hid_cov')
self.num_hid_mean = config.getint('MODEL_PARAMETER_SETTING','num_hid_mean')
self.apply_mask = config.getint('MODEL_PARAMETER_SETTING','apply_mask')
self.epsilon = config.getfloat('OPTIMIZER_PARAMETERS','epsilon')
self.weightcost_final = config.getfloat('OPTIMIZER_PARAMETERS','weightcost_final')
self.hmc_step_nr = config.getint('HMC_PARAMETERS','hmc_step_nr')
self.hmc_target_ave_rej = config.getfloat('HMC_PARAMETERS','hmc_target_ave_rej')
#-- Data Loading function:
def loadData(self):
'''
Function loading the data
'''
# Create save folder
if not os.path.exists(self.saveDir + '/dataDetails/'):
os.makedirs(self.saveDir + '/dataDetails/')
# load data file:
if self.dataFilename.split('.')[1] == 'npz':
dLoad = np.load(self.dataFilename)
elif self.dataFilename.split('.') == 'mat':
dLoad = loadmat(self.dataFilename)
else:
print("error! Unrecognized data file")
self.d = dLoad['d']
self.obsKeys = dLoad['epochsLinked']
self.epochTime = dLoad['epochTime']
"""
If you want to keep only EEG features, uncomment next line.
"""
#self.d = self.d[:, :self.d.shape[1]-1]
self.d = np.array(self.d, dtype=np.float32)
self.obsKeys = np.array(self.obsKeys, dtype=np.float32)
print("initial size: ", self.d.shape)
#print("FrameIDs : ", self.obsKeys, "of shape : ", self.obsKeys.shape)
with open (self.saveDir + '/dataDetails/' + 'initialData.txt','w') as f:
f.write("\n Modeling: %s " % self.dataFilename)
f.write("\n Dataset size: %s " % str(self.d.shape))
f.write("\n Dataset type: %s " % str(self.d.dtype))
f.write("\n \n d_min: %s " % str(np.min(self.d, axis=0)))
f.write("\n \n d_max: %s " % str(np.max(self.d, axis=0)))
f.write("\n \n d_mean: %s " % str(np.mean(self.d, axis=0)))
f.write("\n \n d_std: %s " % str(np.std(self.d, axis=0)))
f.close()
# Function taken from original code
def compute_energy_mcRBM(self, data,normdata,vel,energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis):
# normalize input data vectors
data.mult(data, target = t6) # DxP (nr input dims x nr samples)
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(0.5, target = energy) # energy of quadratic regularization term
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small) # small prevents division by 0
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
## potential
# covariance contribution
cmt.dot(VF.T, normdata, target = feat) # HxP (nr factors x nr samples)
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP (nr cov hiddens x nr samples)
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
cmt.exp(t1) # OxP
t1.add(1, target = t2) # OxP
cmt.log(t2)
t2.mult(-1)
energy.add_sums(t2, axis=0)
# mean contribution
cmt.dot(w_mean.T, data, target = feat_mean) # HxP (nr mean hiddens x nr samples)
feat_mean.add_col_vec(bias_mean) # HxP
cmt.exp(feat_mean)
feat_mean.add(1)
cmt.log(feat_mean)
feat_mean.mult(-1)
energy.add_sums(feat_mean, axis=0)
# visible bias term
data.mult_by_col(bias_vis, target = t6)
t6.mult(-1) # DxP
energy.add_sums(t6, axis=0) # 1xP
# kinetic
vel.mult(vel, target = t6)
energy.add_sums(t6, axis = 0, mult = .5)
# same as the previous function. Needed only if the energy has to be computed
# and stored to check the training process
def compute_energy_mcRBM_visual(self, data,normdata,energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis):
# normalize input data vectors
data.mult(data, target = t6) # DxP (nr input dims x nr samples)
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(0.5, target = energy) # energy of quadratic regularization term
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small) # small prevents division by 0
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
## potential
# covariance contribution
cmt.dot(VF.T, normdata, target = feat) # HxP (nr factors x nr samples)
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP (nr cov hiddens x nr samples)
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
cmt.exp(t1) # OxP
t1.add(1, target = t2) # OxP
cmt.log(t2)
t2.mult(-1)
energy.add_sums(t2, axis=0)
# mean contribution
cmt.dot(w_mean.T, data, target = feat_mean) # HxP (nr mean hiddens x nr samples)
feat_mean.add_col_vec(bias_mean) # HxP
cmt.exp(feat_mean)
feat_mean.add(1)
cmt.log(feat_mean)
feat_mean.mult(-1)
energy.add_sums(feat_mean, axis=0)
# visible bias term
data.mult_by_col(bias_vis, target = t6)
t6.mult(-1) # DxP
energy.add_sums(t6, axis=0) # 1xP
# kinetic
data.mult(data, target = t6)
energy.add_sums(t6, axis = 0, mult = .5)
# Function taken from original code
#################################################################
# compute the derivative if the free energy at a given input
def compute_gradient_mcRBM(self, data,normdata,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t3,t4,t6,feat,featsq,feat_mean,gradient,normgradient,length,lengthsq,normcoeff,small,num_vis):
# normalize input data
data.mult(data, target = t6) # DxP
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small)
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
cmt.dot(VF.T, normdata, target = feat) # HxP
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP
t1.mult(-.5)
t1.add_col_vec(bias_cov) # OxP
t1.apply_sigmoid(target = t2) # OxP
cmt.dot(FH,t2, target = t3) # HxP
t3.mult(feat)
cmt.dot(VF, t3, target = normgradient) # VxP
# final bprop through normalization
length.mult(lengthsq, target = normcoeff)
normcoeff.reciprocal() # 1xP
normgradient.mult(data, target = gradient) # VxP
gradient.sum(axis = 0, target = t4) # 1xP
t4.mult(-1./num_vis)
data.mult_by_row(t4, target = gradient)
normgradient.mult_by_row(lengthsq, target = t6)
gradient.add(t6)
gradient.mult_by_row(normcoeff)
# add quadratic term gradient
gradient.add(data)
# add visible bias term
gradient.add_col_mult(bias_vis, -1)
# add MEAN contribution to gradient
cmt.dot(w_mean.T, data, target = feat_mean) # HxP
feat_mean.add_col_vec(bias_mean) # HxP
feat_mean.apply_sigmoid() # HxP
gradient.subtract_dot(w_mean,feat_mean) # VxP
# Function taken from original code
############################################################3
# Hybrid Monte Carlo sampler
def draw_HMC_samples(self, data,negdata,normdata,vel,gradient,normgradient,new_energy,old_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,hmc_step,hmc_step_nr,hmc_ave_rej,hmc_target_ave_rej,t1,t2,t3,t4,t5,t6,t7,thresh,feat,featsq,batch_size,feat_mean,length,lengthsq,normcoeff,small,num_vis):
vel.fill_with_randn()
negdata.assign(data)
self.compute_energy_mcRBM(negdata,normdata,vel,old_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis)
self.compute_gradient_mcRBM(negdata,normdata,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t3,t4,t6,feat,featsq,feat_mean,gradient,normgradient,length,lengthsq,normcoeff,small,num_vis)
# half step
vel.add_mult(gradient, -0.5*hmc_step)
negdata.add_mult(vel,hmc_step)
# full leap-frog steps
for ss in range(hmc_step_nr - 1):
## re-evaluate the gradient
self.compute_gradient_mcRBM(negdata,normdata,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t3,t4,t6,feat,featsq,feat_mean,gradient,normgradient,length,lengthsq,normcoeff,small,num_vis)
# update variables
vel.add_mult(gradient, -hmc_step)
negdata.add_mult(vel,hmc_step)
# final half-step
self.compute_gradient_mcRBM(negdata,normdata,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t3,t4,t6,feat,featsq,feat_mean,gradient,normgradient,length,lengthsq,normcoeff,small,num_vis)
vel.add_mult(gradient, -0.5*hmc_step)
# compute new energy
self.compute_energy_mcRBM(negdata,normdata,vel,new_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis)
# rejecton
old_energy.subtract(new_energy, target = thresh)
cmt.exp(thresh)
t4.fill_with_rand()
t4.less_than(thresh)
# update negdata and rejection rate
t4.mult(-1)
t4.add(1) # now 1's detect rejections
t4.sum(axis = 1, target = t5)
t5.copy_to_host()
rej = t5.numpy_array[0,0]/batch_size
data.mult_by_row(t4, target = t6)
negdata.mult_by_row(t4, target = t7)
negdata.subtract(t7)
negdata.add(t6)
hmc_ave_rej = 0.9*hmc_ave_rej + 0.1*rej
if hmc_ave_rej < hmc_target_ave_rej:
hmc_step = min(hmc_step*1.01,0.25)
else:
hmc_step = max(hmc_step*0.99,.001)
return hmc_step, hmc_ave_rej
def saveLsq(self):
'''
Function saving the sum of the square of the data
(needed for training as well as for post-analysis)
'''
d = self.d.astype(np.float32)
dsq = np.square(d)
lsq = np.sum(dsq, axis=0)
with open( self.refDir + 'lsqComplete.pkl', 'wb') as pklFile:
cPickle.dump(lsq, pklFile)
def train(self):
'''
Main train function : modified version of the original train function.
Additions : GPU selection (useful for multi-GPU machines)
Saving the sum of the square of the data for post-processing
Visible data are saved
Data samples are permuted for training
Weights are saved every 100 training epochs
Training energy is visualized every 100 training epochs
NOTE : anneal learning rate used in the initial code, is NOT used here!
'''
#plt.ion()
f1 = plt.figure()
ax1 = f1.add_subplot(111)
#ax2 = f1.add_subplot(122)
#plt.show()
cmt.cuda_set_device(self.gpuId)
cmt.cublas_init()
cmt.CUDAMatrix.init_random(1)
np.random.seed(self.npRandSeed)
prng = RandomState(self.npRandState)
################################################################
##################### CHANGE PATH ##############################
# Move to current experiment path:
os.chdir(self.saveDir)
# Get current path:
os.getcwd()
self.plotsDir = 'plots'
#self.probabilitiesDir = 'p_all'
if not os.path.isdir(self.plotsDir):
os.makedirs(self.plotsDir)
if not os.path.isdir(self.plotsDir + '/energy'):
os.makedirs(self.plotsDir + '/energy')
#if not os.path.isdir(self.probabilitiesDir):
# os.makedirs(self.probabilitiesDir)
if not os.path.isdir('weights'):
os.makedirs('weights')
d = self.d.astype(np.float32)
print("visible size: ", d.shape)
dsq = np.square(d)
lsq = np.sum(dsq, axis=0)
with open('lsqComplete.pkl', 'wb') as pklFile:
cPickle.dump(lsq, pklFile)
del dsq, lsq
# Save visible data :
visData = d
np.savez('visData.npz', data=d, obsKeys=self.obsKeys, epochTime=self.epochTime)
with open ('visData.txt','w') as f:
f.write("\n Dataset : %s" %(self.dataFilename))
f.write("\n visData size: %s " % str(visData.shape))
f.write("\n visData type: %s " % str(visData.dtype))
f.write("\n \n visData Range: %s " % str(np.max(visData, axis=0)-np.min(visData, axis=0)))
f.write("\n \n visData min: %s " % str(np.min(visData, axis=0)))
f.write("\n \n visData max: %s " % str(np.max(visData, axis=0)))
f.write("\n \n visData mean: %s " % str(np.mean(visData, axis=0)))
f.write("\n \n visData std: %s " % str(np.std(visData, axis=0)))
f.close()
del visData #if not needed for computing the latent states
permIdx = prng.permutation(d.shape[0])
d = d[permIdx,:]
#subsetting train and test datasets
#trainPerc = 0.7
#trainSampNum = int(np.ceil(trainPerc*d.shape[0]))
#trainSampNum = int(np.floor(trainSampNum/self.batch_size)*self.batch_size)
#testSampNum = int(d.shape[0]-trainSampNum-1)
# The test dataset is not used at the moment, it can be used as
# a validation set to check for overfitting. To use it, uncomment
# all the variables with 'test' in their name
#~ d_test = d[trainSampNum+1:,:]
#d = d[:trainSampNum,:]
#obsKeys = self.obsKeys[:trainSampNum]
totnumcases = d.shape[0]
num_vis = d.shape[1]
num_batches = int(totnumcases/self.batch_size)
print("num_batches: ", num_batches)
dev_dat = cmt.CUDAMatrix(d.T) # VxP
#~ test_dat = cmt.CUDAMatrix(d_test.T)
del d, self.d, self.epochTime, self.obsKeys
# training parameters (as in the original code by Ranzato)
epsilon = self.epsilon
epsilonVF = 2*epsilon
epsilonFH = 0.02*epsilon
epsilonb = 0.02*epsilon
epsilonw_mean = 0.2*epsilon
epsilonb_mean = 0.1*epsilon
weightcost_final = self.weightcost_final
# HMC setting
hmc_step_nr = self.hmc_step_nr
hmc_step = 0.01
hmc_target_ave_rej = self.hmc_target_ave_rej
hmc_ave_rej = hmc_target_ave_rej
# initialize weights
VF = cmt.CUDAMatrix(np.array(0.02 * prng.randn(num_vis, self.num_fac), dtype=np.float32, order='F')) # VxH
if self.apply_mask == 0:
FH = cmt.CUDAMatrix( np.array( np.eye(self.num_fac,self.num_hid_cov), dtype=np.float32, order='F') ) # HxO
else:
dd = loadmat('your_FHinit_mask_file.mat') # see CVPR2010paper_material/topo2D_3x3_stride2_576filt.mat for an example
FH = cmt.CUDAMatrix( np.array( dd["FH"], dtype=np.float32, order='F') )
bias_cov = cmt.CUDAMatrix( np.array(2.0*np.ones((self.num_hid_cov, 1)), dtype=np.float32, order='F') )
bias_vis = cmt.CUDAMatrix( np.array(np.zeros((num_vis, 1)), dtype=np.float32, order='F') )
w_mean = cmt.CUDAMatrix( np.array( 0.05 * prng.randn(num_vis, self.num_hid_mean), dtype=np.float32, order='F') ) # VxH
bias_mean = cmt.CUDAMatrix( np.array( -2.0*np.ones((self.num_hid_mean,1)), dtype=np.float32, order='F') )
# initialize variables to store derivatives
VFinc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, self.num_fac)), dtype=np.float32, order='F'))
FHinc = cmt.CUDAMatrix( np.array(np.zeros((self.num_fac, self.num_hid_cov)), dtype=np.float32, order='F'))
bias_covinc = cmt.CUDAMatrix( np.array(np.zeros((self.num_hid_cov, 1)), dtype=np.float32, order='F'))
bias_visinc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, 1)), dtype=np.float32, order='F'))
w_meaninc = cmt.CUDAMatrix( np.array(np.zeros((num_vis, self.num_hid_mean)), dtype=np.float32, order='F'))
bias_meaninc = cmt.CUDAMatrix( np.array(np.zeros((self.num_hid_mean, 1)), dtype=np.float32, order='F'))
# initialize temporary storage
data = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
normdata = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
negdataini = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
feat = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
featsq = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
negdata = cmt.CUDAMatrix( np.array(prng.randn(num_vis, self.batch_size), dtype=np.float32, order='F'))
old_energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
new_energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
energy = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
gradient = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
normgradient = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F')) # VxP
thresh = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F'))
feat_mean = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_mean, self.batch_size)), dtype=np.float32, order='F'))
vel = cmt.CUDAMatrix( np.array(prng.randn(num_vis, self.batch_size), dtype=np.float32, order='F'))
length = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
lengthsq = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
normcoeff = cmt.CUDAMatrix( np.array(np.zeros((1, self.batch_size)), dtype=np.float32, order='F')) # 1xP
# commented to avoid computing the energy on test data
#~ data_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F')) # Vxtest_batch
#~ normdata_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F')) # Vxtest_batch
#~ length_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ lengthsq_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ normcoeff_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F')) # 1xtest_batch
#~ vel_test = cmt.CUDAMatrix( np.array(prng.randn(num_vis, testSampNum), dtype=np.float32, order='F'))
#~ feat_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ featsq_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ feat_mean_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_mean, testSampNum)), dtype=np.float32, order='F'))
#~ energy_test = cmt.CUDAMatrix( np.array(np.zeros((1, testSampNum)), dtype=np.float32, order='F'))
if self.apply_mask==1: # this used to constrain very large FH matrices only allowing to change values in a neighborhood
dd = loadmat('your_FHinit_mask_file.mat')
mask = cmt.CUDAMatrix( np.array(dd["mask"], dtype=np.float32, order='F'))
normVF = 1
small = 0.5
# other temporary vars
t1 = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, self.batch_size)), dtype=np.float32, order='F'))
t2 = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, self.batch_size)), dtype=np.float32, order='F'))
t3 = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, self.batch_size)), dtype=np.float32, order='F'))
t4 = cmt.CUDAMatrix( np.array(np.empty((1,self.batch_size)), dtype=np.float32, order='F'))
t5 = cmt.CUDAMatrix( np.array(np.empty((1,1)), dtype=np.float32, order='F'))
t6 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F'))
t7 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.batch_size)), dtype=np.float32, order='F'))
t8 = cmt.CUDAMatrix( np.array(np.empty((num_vis, self.num_fac)), dtype=np.float32, order='F'))
t9 = cmt.CUDAMatrix( np.array(np.zeros((self.num_fac, self.num_hid_cov)), dtype=np.float32, order='F'))
t10 = cmt.CUDAMatrix( np.array(np.empty((1,self.num_fac)), dtype=np.float32, order='F'))
t11 = cmt.CUDAMatrix( np.array(np.empty((1,self.num_hid_cov)), dtype=np.float32, order='F'))
# commented to avoid computing the energy on test data
#~ t1_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, testSampNum)), dtype=np.float32, order='F'))
#~ t2_test = cmt.CUDAMatrix( np.array(np.empty((self.num_hid_cov, testSampNum)), dtype=np.float32, order='F'))
#~ t3_test = cmt.CUDAMatrix( np.array(np.empty((self.num_fac, testSampNum)), dtype=np.float32, order='F'))
#~ t4_test = cmt.CUDAMatrix( np.array(np.empty((1,testSampNum)), dtype=np.float32, order='F'))
#~ t5_test = cmt.CUDAMatrix( np.array(np.empty((1,1)), dtype=np.float32, order='F'))
#~ t6_test = cmt.CUDAMatrix( np.array(np.empty((num_vis, testSampNum)), dtype=np.float32, order='F'))
meanEnergy = np.zeros(self.num_epochs)
minEnergy = np.zeros(self.num_epochs)
maxEnergy = np.zeros(self.num_epochs)
#~ meanEnergy_test = np.zeros(self.num_epochs)
#~ minEnergy_test = np.zeros(self.num_epochs)
#~ maxEnergy_test = np.zeros(self.num_epochs)
# start training
for epoch in range(self.num_epochs):
print ("Epoch " + str(epoch))
# anneal learning rates as found in the original code -
# uncomment if you wish to use annealing!
#~ epsilonVFc = epsilonVF/max(1,epoch/20)
#~ epsilonFHc = epsilonFH/max(1,epoch/20)
#~ epsilonbc = epsilonb/max(1,epoch/20)
#~ epsilonw_meanc = epsilonw_mean/max(1,epoch/20)
#~ epsilonb_meanc = epsilonb_mean/max(1,epoch/20)
# no annealing is used in our experiments because learning
# was stopping too early
epsilonVFc = epsilonVF
epsilonFHc = epsilonFH
epsilonbc = epsilonb
epsilonw_meanc = epsilonw_mean
epsilonb_meanc = epsilonb_mean
weightcost = weightcost_final
if epoch <= self.startFH:
epsilonFHc = 0
if epoch <= self.startwd:
weightcost = 0
# commented to avoid computing the energy on test data
#~ data_test = test_dat
#~ data_test.mult(data_test, target = t6_test) # DxP
#~ t6_test.sum(axis = 0, target = lengthsq_test) # 1xP
#~ lengthsq_test.mult(1./num_vis) # normalize by number of components (like std)
#~ lengthsq_test.add(small) # small avoids division by 0
#~ cmt.sqrt(lengthsq_test, target = length_test)
#~ length_test.reciprocal(target = normcoeff_test) # 1xP
#~ data_test.mult_by_row(normcoeff_test, target = normdata_test) # normalized data
for batch in range(num_batches):
# get current minibatch
data = dev_dat.slice(batch*self.batch_size,(batch + 1)*self.batch_size) # DxP (nr dims x nr samples)
# normalize input data
data.mult(data, target = t6) # DxP
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small) # small avoids division by 0
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
data.mult_by_row(normcoeff, target = normdata) # normalized data
## compute positive sample derivatives
# covariance part
cmt.dot(VF.T, normdata, target = feat) # HxP (nr facs x nr samples)
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP (nr cov hiddens x nr samples)
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
t1.apply_sigmoid(target = t2) # OxP
cmt.dot(featsq, t2.T, target = FHinc) # HxO
cmt.dot(FH,t2, target = t3) # HxP
t3.mult(feat)
cmt.dot(normdata, t3.T, target = VFinc) # VxH
t2.sum(axis = 1, target = bias_covinc)
bias_covinc.mult(-1)
# visible bias
data.sum(axis = 1, target = bias_visinc)
bias_visinc.mult(-1)
# mean part
cmt.dot(w_mean.T, data, target = feat_mean) # HxP (nr mean hiddens x nr samples)
feat_mean.add_col_vec(bias_mean) # HxP
feat_mean.apply_sigmoid() # HxP
feat_mean.mult(-1)
cmt.dot(data, feat_mean.T, target = w_meaninc)
feat_mean.sum(axis = 1, target = bias_meaninc)
# HMC sampling: draw an approximate sample from the model
if self.doPCD == 0: # CD-1 (set negative data to current training samples)
hmc_step, hmc_ave_rej = self.draw_HMC_samples(data,negdata,normdata,vel,gradient,normgradient,new_energy,old_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,hmc_step,hmc_step_nr,hmc_ave_rej,hmc_target_ave_rej,t1,t2,t3,t4,t5,t6,t7,thresh,feat,featsq,self.batch_size,feat_mean,length,lengthsq,normcoeff,small,num_vis)
else: # PCD-1 (use previous negative data as starting point for chain)
negdataini.assign(negdata)
hmc_step, hmc_ave_rej = self.draw_HMC_samples(negdataini,negdata,normdata,vel,gradient,normgradient,new_energy,old_energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,hmc_step,hmc_step_nr,hmc_ave_rej,hmc_target_ave_rej,t1,t2,t3,t4,t5,t6,t7,thresh,feat,featsq,self.batch_size,feat_mean,length,lengthsq,normcoeff,small,num_vis)
# compute derivatives at the negative samples
# normalize input data
negdata.mult(negdata, target = t6) # DxP
t6.sum(axis = 0, target = lengthsq) # 1xP
lengthsq.mult(1./num_vis) # normalize by number of components (like std)
lengthsq.add(small)
cmt.sqrt(lengthsq, target = length)
length.reciprocal(target = normcoeff) # 1xP
negdata.mult_by_row(normcoeff, target = normdata) # normalized data
# covariance part
cmt.dot(VF.T, normdata, target = feat) # HxP
feat.mult(feat, target = featsq) # HxP
cmt.dot(FH.T,featsq, target = t1) # OxP
t1.mult(-0.5)
t1.add_col_vec(bias_cov) # OxP
t1.apply_sigmoid(target = t2) # OxP
FHinc.subtract_dot(featsq, t2.T) # HxO
FHinc.mult(0.5)
cmt.dot(FH,t2, target = t3) # HxP
t3.mult(feat)
VFinc.subtract_dot(normdata, t3.T) # VxH
bias_covinc.add_sums(t2, axis = 1)
# visible bias
bias_visinc.add_sums(negdata, axis = 1)
# mean part
cmt.dot(w_mean.T, negdata, target = feat_mean) # HxP
feat_mean.add_col_vec(bias_mean) # HxP
feat_mean.apply_sigmoid() # HxP
w_meaninc.add_dot(negdata, feat_mean.T)
bias_meaninc.add_sums(feat_mean, axis = 1)
# update parameters
VFinc.add_mult(VF.sign(), weightcost) # L1 regularization
VF.add_mult(VFinc, -epsilonVFc/self.batch_size)
# normalize columns of VF: normalize by running average of their norm
VF.mult(VF, target = t8)
t8.sum(axis = 0, target = t10)
cmt.sqrt(t10)
t10.sum(axis=1,target = t5)
t5.copy_to_host()
normVF = .95*normVF + (.05/self.num_fac) * t5.numpy_array[0,0] # estimate norm
t10.reciprocal()
VF.mult_by_row(t10)
VF.mult(normVF)
bias_cov.add_mult(bias_covinc, -epsilonbc/self.batch_size)
bias_vis.add_mult(bias_visinc, -epsilonbc/self.batch_size)
if epoch > self.startFH:
FHinc.add_mult(FH.sign(), weightcost) # L1 regularization
FH.add_mult(FHinc, -epsilonFHc/self.batch_size) # update
# set to 0 negative entries in FH
FH.greater_than(0, target = t9)
FH.mult(t9)
if self.apply_mask==1:
FH.mult(mask)
# normalize columns of FH: L1 norm set to 1 in each column
FH.sum(axis = 0, target = t11)
t11.reciprocal()
FH.mult_by_row(t11)
w_meaninc.add_mult(w_mean.sign(),weightcost)
w_mean.add_mult(w_meaninc, -epsilonw_meanc/self.batch_size)
bias_mean.add_mult(bias_meaninc, -epsilonb_meanc/self.batch_size)
if self.verbose == 1:
print( "VF: " + '%3.2e' % VF.euclid_norm() + ", DVF: " + '%3.2e' % (VFinc.euclid_norm()*(epsilonVFc/self.batch_size)) + ", FH: " + '%3.2e' % FH.euclid_norm() + ", DFH: " + '%3.2e' % (FHinc.euclid_norm()*(epsilonFHc/self.batch_size)) + ", bias_cov: " + '%3.2e' % bias_cov.euclid_norm() + ", Dbias_cov: " + '%3.2e' % (bias_covinc.euclid_norm()*(epsilonbc/self.batch_size)) + ", bias_vis: " + '%3.2e' % bias_vis.euclid_norm() + ", Dbias_vis: " + '%3.2e' % (bias_visinc.euclid_norm()*(epsilonbc/self.batch_size)) + ", wm: " + '%3.2e' % w_mean.euclid_norm() + ", Dwm: " + '%3.2e' % (w_meaninc.euclid_norm()*(epsilonw_meanc/self.batch_size)) + ", bm: " + '%3.2e' % bias_mean.euclid_norm() + ", Dbm: " + '%3.2e' % (bias_meaninc.euclid_norm()*(epsilonb_meanc/self.batch_size)) + ", step: " + '%3.2e' % hmc_step + ", rej: " + '%3.2e' % hmc_ave_rej )
with open ('terminal.txt','a') as f:
f.write('\n' + "epoch: %s" % str(epoch) + ", VF: " + '%3.2e' % VF.euclid_norm() + ", DVF: " + '%3.2e' % (VFinc.euclid_norm()*(epsilonVFc/self.batch_size)) + ", FH: " + '%3.2e' % FH.euclid_norm() + ", DFH: " + '%3.2e' % (FHinc.euclid_norm()*(epsilonFHc/self.batch_size)) + ", bias_cov: " + '%3.2e' % bias_cov.euclid_norm() + ", Dbias_cov: " + '%3.2e' % (bias_covinc.euclid_norm()*(epsilonbc/self.batch_size)) + ", bias_vis: " + '%3.2e' % bias_vis.euclid_norm() + ", Dbias_vis: " + '%3.2e' % (bias_visinc.euclid_norm()*(epsilonbc/self.batch_size)) + ", wm: " + '%3.2e' % w_mean.euclid_norm() + ", Dwm: " + '%3.2e' % (w_meaninc.euclid_norm()*(epsilonw_meanc/self.batch_size)) + ", bm: " + '%3.2e' % bias_mean.euclid_norm() + ", Dbm: " + '%3.2e' % (bias_meaninc.euclid_norm()*(epsilonb_meanc/self.batch_size)) + ", step: " + '%3.2e' % hmc_step + ", rej: " + '%3.2e' % hmc_ave_rej )
sys.stdout.flush()
# commented to avoid computing the energy on trainig data
self.compute_energy_mcRBM_visual(data,normdata,energy,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1,t2,t6,feat,featsq,feat_mean,length,lengthsq,normcoeff,small,num_vis)
energy.copy_to_host()
meanEnergy[epoch] = np.mean(energy.numpy_array)
minEnergy[epoch] = np.min(energy.numpy_array)
maxEnergy[epoch] = np.max(energy.numpy_array)
# commented to avoid computing the energy on test data
#~ self.compute_energy_mcRBM_visual(data_test,normdata_test,energy_test,VF,FH,bias_cov,bias_vis,w_mean,bias_mean,t1_test,t2_test,t6_test,feat_test,featsq_test,feat_mean_test,length_test,lengthsq_test,normcoeff_test,small,num_vis)
#~ energy_test.copy_to_host()
#~ meanEnergy_test[epoch] = np.mean(energy_test.numpy_array)
#~ minEnergy_test[epoch] = np.min(energy_test.numpy_array)
#~ maxEnergy_test[epoch] = np.max(energy_test.numpy_array)
ax1.cla()
ax1.plot(range(epoch), meanEnergy[0:epoch])
ax1.plot(range(epoch), maxEnergy[0:epoch])
ax1.plot(range(epoch), minEnergy[0:epoch])
if np.mod(epoch,100) == 0:
#f1.savefig(output_folder + str(epoch)+'_'+'fig.png')
f1.savefig(self.plotsDir + '/energy/energyAt_%s.png' %str(epoch))
# back-up every once in a while
if np.mod(epoch,100) == 0:
VF.copy_to_host()
FH.copy_to_host()
bias_cov.copy_to_host()
w_mean.copy_to_host()
bias_mean.copy_to_host()
bias_vis.copy_to_host()
savemat("./weights/ws_temp%s" %str(epoch), {'VF':VF.numpy_array,'FH':FH.numpy_array,'bias_cov': bias_cov.numpy_array, 'bias_vis': bias_vis.numpy_array,'w_mean': w_mean.numpy_array, 'bias_mean': bias_mean.numpy_array, 'epoch':epoch})
# uncomment if computing the energy in order to store its evolution throghout training
#~ savemat(self.refDir + '/' + "training_energy_" + str(self.num_fac) + "_cov" + str(self.num_hid_cov) + "_mean" + str(self.num_hid_mean), {'meanEnergy':meanEnergy,'meanEnergy_test':meanEnergy_test,'maxEnergy': maxEnergy, 'maxEnergy_test': maxEnergy_test, 'minEnergy': minEnergy, 'minEnergy_test': minEnergy_test, 'epoch':epoch})
#savemat("training_energy_" + str(self.num_fac) + "_cov" + str(self.num_hid_cov) + "_mean" + str(self.num_hid_mean), {'meanEnergy':meanEnergy, 'maxEnergy': maxEnergy, 'minEnergy': minEnergy, 'epoch':epoch})
# in order to stop the training gracefully, create an empty file
# named 'stop_now' in the folder containing the experiment
# configuration file
if os.path.isfile('stop_now'):
break
# final back-up
VF.copy_to_host()
FH.copy_to_host()
bias_cov.copy_to_host()
bias_vis.copy_to_host()
w_mean.copy_to_host()
bias_mean.copy_to_host()
savemat("ws_fac%s" %str(self.num_fac) + "_cov%s" %str(self.num_hid_cov) + "_mean%s" %str(self.num_hid_mean), {'VF':VF.numpy_array,'FH':FH.numpy_array,'bias_cov': bias_cov.numpy_array, 'bias_vis': bias_vis.numpy_array, 'w_mean': w_mean.numpy_array, 'bias_mean': bias_mean.numpy_array, 'epoch':epoch})
# uncomment if computing the energy in order to store its evolution throghout training
#~ savemat(self.refDir + '/' + "training_energy_" + str(self.num_fac) + "_cov" + str(self.num_hid_cov) + "_mean" + str(self.num_hid_mean), {'meanEnergy':meanEnergy,'meanEnergy_test':meanEnergy_test,'maxEnergy': maxEnergy, 'maxEnergy_test': maxEnergy_test, 'minEnergy': minEnergy, 'minEnergy_test': minEnergy_test, 'epoch':epoch})
savemat("training_energy_" + str(self.num_fac) + "_cov" + str(self.num_hid_cov) + "_mean" + str(self.num_hid_mean), {'meanEnergy':meanEnergy, 'maxEnergy': maxEnergy, 'minEnergy': minEnergy, 'epoch':epoch})
# Compute states if desired:
# normalise data for covariance hidden:
#dsq = np.square(visData)
#lsq = np.sum(dsq, axis=0)
#lsq /= visData.shape[1]
#lsq += np.spacing(1)
#l = np.sqrt(lsq)
#normD = visData/l
#logisticArg_c = (-0.5*np.dot(FH.numpy_array.T, np.square(np.dot(VF.numpy_array.T, normD.T))) + bias_cov.numpy_array).T
#p_hc = logisticFunc(logisticArg_c)
#logisticArg_m = np.dot(visData, w_mean.numpy_array) + bias_mean.numpy_array.T
#p_hm = logisticFunc(logisticArg_m)
#p_all = np.concatenate((p_hc, p_hm), axis=1)
#savemat(self.probabilitiesDir + '/pAll_%i.mat' % epoch, mdict={'p_all':p_all})
with open('done', 'w') as doneFile:
doneFile.write(datetime.strftime(datetime.now(), '%d/%m/%Y %H:%M:%S'))
#doneFile.closed
|
[
"cudamat.cublas_init",
"sys.path.insert",
"_pickle.dump",
"scipy.io.loadmat",
"numpy.array",
"cudamat.dot",
"numpy.mod",
"numpy.random.RandomState",
"os.path.exists",
"numpy.savez",
"numpy.mean",
"cudamat.CUDAMatrix",
"numpy.max",
"cudamat.log",
"os.path.isdir",
"numpy.empty",
"numpy.random.seed",
"numpy.min",
"dataPreproc.DataPreproc",
"sys.stdout.flush",
"numpy.eye",
"numpy.ones",
"cudamat.sqrt",
"numpy.square",
"os.path.isfile",
"numpy.std",
"cudamat.cuda_set_device",
"os.makedirs",
"cudamat.exp",
"os.getcwd",
"os.chdir",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"datetime.datetime.now",
"cudamat.CUDAMatrix.init_random",
"numpy.load"
] |
[((1300, 1343), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../dataPreprocessing/"""'], {}), "(0, '../dataPreprocessing/')\n", (1315, 1343), False, 'import sys\n'), ((1913, 1926), 'dataPreproc.DataPreproc', 'DataPreproc', ([], {}), '()\n', (1924, 1926), False, 'from dataPreproc import DataPreproc\n'), ((5883, 5917), 'numpy.array', 'np.array', (['self.d'], {'dtype': 'np.float32'}), '(self.d, dtype=np.float32)\n', (5891, 5917), True, 'import numpy as np\n'), ((5941, 5981), 'numpy.array', 'np.array', (['self.obsKeys'], {'dtype': 'np.float32'}), '(self.obsKeys, dtype=np.float32)\n', (5949, 5981), True, 'import numpy as np\n'), ((7326, 7359), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (7334, 7359), True, 'import cudamat as cmt\n'), ((7555, 7591), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (7562, 7591), True, 'import cudamat as cmt\n'), ((7683, 7715), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (7690, 7715), True, 'import cudamat as cmt\n'), ((7822, 7833), 'cudamat.exp', 'cmt.exp', (['t1'], {}), '(t1)\n', (7829, 7833), True, 'import cudamat as cmt\n'), ((7885, 7896), 'cudamat.log', 'cmt.log', (['t2'], {}), '(t2)\n', (7892, 7896), True, 'import cudamat as cmt\n'), ((7989, 8030), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'data'], {'target': 'feat_mean'}), '(w_mean.T, data, target=feat_mean)\n', (7996, 8030), True, 'import cudamat as cmt\n'), ((8125, 8143), 'cudamat.exp', 'cmt.exp', (['feat_mean'], {}), '(feat_mean)\n', (8132, 8143), True, 'import cudamat as cmt\n'), ((8178, 8196), 'cudamat.log', 'cmt.log', (['feat_mean'], {}), '(feat_mean)\n', (8185, 8196), True, 'import cudamat as cmt\n'), ((9224, 9257), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (9232, 9257), True, 'import cudamat as cmt\n'), ((9453, 9489), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (9460, 9489), True, 'import cudamat as cmt\n'), ((9581, 9613), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (9588, 9613), True, 'import cudamat as cmt\n'), ((9720, 9731), 'cudamat.exp', 'cmt.exp', (['t1'], {}), '(t1)\n', (9727, 9731), True, 'import cudamat as cmt\n'), ((9783, 9794), 'cudamat.log', 'cmt.log', (['t2'], {}), '(t2)\n', (9790, 9794), True, 'import cudamat as cmt\n'), ((9887, 9928), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'data'], {'target': 'feat_mean'}), '(w_mean.T, data, target=feat_mean)\n', (9894, 9928), True, 'import cudamat as cmt\n'), ((10023, 10041), 'cudamat.exp', 'cmt.exp', (['feat_mean'], {}), '(feat_mean)\n', (10030, 10041), True, 'import cudamat as cmt\n'), ((10076, 10094), 'cudamat.log', 'cmt.log', (['feat_mean'], {}), '(feat_mean)\n', (10083, 10094), True, 'import cudamat as cmt\n'), ((11025, 11058), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (11033, 11058), True, 'import cudamat as cmt\n'), ((11198, 11234), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (11205, 11234), True, 'import cudamat as cmt\n'), ((11301, 11333), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (11308, 11333), True, 'import cudamat as cmt\n'), ((11453, 11479), 'cudamat.dot', 'cmt.dot', (['FH', 't2'], {'target': 't3'}), '(FH, t2, target=t3)\n', (11460, 11479), True, 'import cudamat as cmt\n'), ((11517, 11553), 'cudamat.dot', 'cmt.dot', (['VF', 't3'], {'target': 'normgradient'}), '(VF, t3, target=normgradient)\n', (11524, 11553), True, 'import cudamat as cmt\n'), ((12191, 12232), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'data'], {'target': 'feat_mean'}), '(w_mean.T, data, target=feat_mean)\n', (12198, 12232), True, 'import cudamat as cmt\n'), ((14347, 14362), 'cudamat.exp', 'cmt.exp', (['thresh'], {}), '(thresh)\n', (14354, 14362), True, 'import cudamat as cmt\n'), ((15247, 15259), 'numpy.square', 'np.square', (['d'], {}), '(d)\n', (15256, 15259), True, 'import numpy as np\n'), ((15274, 15293), 'numpy.sum', 'np.sum', (['dsq'], {'axis': '(0)'}), '(dsq, axis=0)\n', (15280, 15293), True, 'import numpy as np\n'), ((16029, 16041), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (16039, 16041), True, 'import matplotlib.pyplot as plt\n'), ((16148, 16179), 'cudamat.cuda_set_device', 'cmt.cuda_set_device', (['self.gpuId'], {}), '(self.gpuId)\n', (16167, 16179), True, 'import cudamat as cmt\n'), ((16188, 16205), 'cudamat.cublas_init', 'cmt.cublas_init', ([], {}), '()\n', (16203, 16205), True, 'import cudamat as cmt\n'), ((16214, 16243), 'cudamat.CUDAMatrix.init_random', 'cmt.CUDAMatrix.init_random', (['(1)'], {}), '(1)\n', (16240, 16243), True, 'import cudamat as cmt\n'), ((16261, 16292), 'numpy.random.seed', 'np.random.seed', (['self.npRandSeed'], {}), '(self.npRandSeed)\n', (16275, 16292), True, 'import numpy as np\n'), ((16309, 16338), 'numpy.random.RandomState', 'RandomState', (['self.npRandState'], {}), '(self.npRandState)\n', (16320, 16338), False, 'from numpy.random import RandomState\n'), ((16545, 16567), 'os.chdir', 'os.chdir', (['self.saveDir'], {}), '(self.saveDir)\n', (16553, 16567), False, 'import os\n'), ((16604, 16615), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16613, 16615), False, 'import os\n'), ((17195, 17207), 'numpy.square', 'np.square', (['d'], {}), '(d)\n', (17204, 17207), True, 'import numpy as np\n'), ((17222, 17241), 'numpy.sum', 'np.sum', (['dsq'], {'axis': '(0)'}), '(dsq, axis=0)\n', (17228, 17241), True, 'import numpy as np\n'), ((17433, 17512), 'numpy.savez', 'np.savez', (['"""visData.npz"""'], {'data': 'd', 'obsKeys': 'self.obsKeys', 'epochTime': 'self.epochTime'}), "('visData.npz', data=d, obsKeys=self.obsKeys, epochTime=self.epochTime)\n", (17441, 17512), True, 'import numpy as np\n'), ((19172, 19191), 'cudamat.CUDAMatrix', 'cmt.CUDAMatrix', (['d.T'], {}), '(d.T)\n', (19186, 19191), True, 'import cudamat as cmt\n'), ((27006, 27031), 'numpy.zeros', 'np.zeros', (['self.num_epochs'], {}), '(self.num_epochs)\n', (27014, 27031), True, 'import numpy as np\n'), ((27052, 27077), 'numpy.zeros', 'np.zeros', (['self.num_epochs'], {}), '(self.num_epochs)\n', (27060, 27077), True, 'import numpy as np\n'), ((27098, 27123), 'numpy.zeros', 'np.zeros', (['self.num_epochs'], {}), '(self.num_epochs)\n', (27106, 27123), True, 'import numpy as np\n'), ((3485, 3513), 'os.path.exists', 'os.path.exists', (['self.saveDir'], {}), '(self.saveDir)\n', (3499, 3513), False, 'import os\n'), ((3527, 3552), 'os.makedirs', 'os.makedirs', (['self.saveDir'], {}), '(self.saveDir)\n', (3538, 3552), False, 'import os\n'), ((5180, 5226), 'os.path.exists', 'os.path.exists', (["(self.saveDir + '/dataDetails/')"], {}), "(self.saveDir + '/dataDetails/')\n", (5194, 5226), False, 'import os\n'), ((5240, 5283), 'os.makedirs', 'os.makedirs', (["(self.saveDir + '/dataDetails/')"], {}), "(self.saveDir + '/dataDetails/')\n", (5251, 5283), False, 'import os\n'), ((5392, 5418), 'numpy.load', 'np.load', (['self.dataFilename'], {}), '(self.dataFilename)\n', (5399, 5418), True, 'import numpy as np\n'), ((15376, 15402), '_pickle.dump', 'cPickle.dump', (['lsq', 'pklFile'], {}), '(lsq, pklFile)\n', (15388, 15402), True, 'import _pickle as cPickle\n'), ((16721, 16749), 'os.path.isdir', 'os.path.isdir', (['self.plotsDir'], {}), '(self.plotsDir)\n', (16734, 16749), False, 'import os\n'), ((16763, 16789), 'os.makedirs', 'os.makedirs', (['self.plotsDir'], {}), '(self.plotsDir)\n', (16774, 16789), False, 'import os\n'), ((16805, 16845), 'os.path.isdir', 'os.path.isdir', (["(self.plotsDir + '/energy')"], {}), "(self.plotsDir + '/energy')\n", (16818, 16845), False, 'import os\n'), ((16859, 16897), 'os.makedirs', 'os.makedirs', (["(self.plotsDir + '/energy')"], {}), "(self.plotsDir + '/energy')\n", (16870, 16897), False, 'import os\n'), ((17023, 17047), 'os.path.isdir', 'os.path.isdir', (['"""weights"""'], {}), "('weights')\n", (17036, 17047), False, 'import os\n'), ((17061, 17083), 'os.makedirs', 'os.makedirs', (['"""weights"""'], {}), "('weights')\n", (17072, 17083), False, 'import os\n'), ((17309, 17335), '_pickle.dump', 'cPickle.dump', (['lsq', 'pklFile'], {}), '(lsq, pklFile)\n', (17321, 17335), True, 'import _pickle as cPickle\n'), ((20143, 20179), 'scipy.io.loadmat', 'loadmat', (['"""your_FHinit_mask_file.mat"""'], {}), "('your_FHinit_mask_file.mat')\n", (20150, 20179), False, 'from scipy.io import loadmat, savemat\n'), ((24904, 24940), 'scipy.io.loadmat', 'loadmat', (['"""your_FHinit_mask_file.mat"""'], {}), "('your_FHinit_mask_file.mat')\n", (24911, 24940), False, 'from scipy.io import loadmat, savemat\n'), ((37408, 37435), 'numpy.mean', 'np.mean', (['energy.numpy_array'], {}), '(energy.numpy_array)\n', (37415, 37435), True, 'import numpy as np\n'), ((37467, 37493), 'numpy.min', 'np.min', (['energy.numpy_array'], {}), '(energy.numpy_array)\n', (37473, 37493), True, 'import numpy as np\n'), ((37525, 37551), 'numpy.max', 'np.max', (['energy.numpy_array'], {}), '(energy.numpy_array)\n', (37531, 37551), True, 'import numpy as np\n'), ((40029, 40055), 'os.path.isfile', 'os.path.isfile', (['"""stop_now"""'], {}), "('stop_now')\n", (40043, 40055), False, 'import os\n'), ((5491, 5517), 'scipy.io.loadmat', 'loadmat', (['self.dataFilename'], {}), '(self.dataFilename)\n', (5498, 5517), False, 'from scipy.io import loadmat, savemat\n'), ((20288, 20335), 'numpy.array', 'np.array', (["dd['FH']"], {'dtype': 'np.float32', 'order': '"""F"""'}), "(dd['FH'], dtype=np.float32, order='F')\n", (20296, 20335), True, 'import numpy as np\n'), ((20495, 20517), 'numpy.zeros', 'np.zeros', (['(num_vis, 1)'], {}), '((num_vis, 1))\n', (20503, 20517), True, 'import numpy as np\n'), ((20886, 20919), 'numpy.zeros', 'np.zeros', (['(num_vis, self.num_fac)'], {}), '((num_vis, self.num_fac))\n', (20894, 20919), True, 'import numpy as np\n'), ((20992, 21034), 'numpy.zeros', 'np.zeros', (['(self.num_fac, self.num_hid_cov)'], {}), '((self.num_fac, self.num_hid_cov))\n', (21000, 21034), True, 'import numpy as np\n'), ((21113, 21144), 'numpy.zeros', 'np.zeros', (['(self.num_hid_cov, 1)'], {}), '((self.num_hid_cov, 1))\n', (21121, 21144), True, 'import numpy as np\n'), ((21223, 21245), 'numpy.zeros', 'np.zeros', (['(num_vis, 1)'], {}), '((num_vis, 1))\n', (21231, 21245), True, 'import numpy as np\n'), ((21322, 21360), 'numpy.zeros', 'np.zeros', (['(num_vis, self.num_hid_mean)'], {}), '((num_vis, self.num_hid_mean))\n', (21330, 21360), True, 'import numpy as np\n'), ((21440, 21472), 'numpy.zeros', 'np.zeros', (['(self.num_hid_mean, 1)'], {}), '((self.num_hid_mean, 1))\n', (21448, 21472), True, 'import numpy as np\n'), ((21584, 21620), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (21592, 21620), True, 'import numpy as np\n'), ((21702, 21738), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (21710, 21738), True, 'import numpy as np\n'), ((21822, 21858), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (21830, 21858), True, 'import numpy as np\n'), ((21936, 21977), 'numpy.empty', 'np.empty', (['(self.num_fac, self.batch_size)'], {}), '((self.num_fac, self.batch_size))\n', (21944, 21977), True, 'import numpy as np\n'), ((22051, 22092), 'numpy.empty', 'np.empty', (['(self.num_fac, self.batch_size)'], {}), '((self.num_fac, self.batch_size))\n', (22059, 22092), True, 'import numpy as np\n'), ((22281, 22311), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (22289, 22311), True, 'import numpy as np\n'), ((22389, 22419), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (22397, 22419), True, 'import numpy as np\n'), ((22493, 22523), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (22501, 22523), True, 'import numpy as np\n'), ((22599, 22635), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (22607, 22635), True, 'import numpy as np\n'), ((22721, 22757), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (22729, 22757), True, 'import numpy as np\n'), ((22837, 22867), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (22845, 22867), True, 'import numpy as np\n'), ((22944, 22990), 'numpy.empty', 'np.empty', (['(self.num_hid_mean, self.batch_size)'], {}), '((self.num_hid_mean, self.batch_size))\n', (22952, 22990), True, 'import numpy as np\n'), ((23171, 23201), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (23179, 23201), True, 'import numpy as np\n'), ((23283, 23313), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (23291, 23313), True, 'import numpy as np\n'), ((23396, 23426), 'numpy.zeros', 'np.zeros', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (23404, 23426), True, 'import numpy as np\n'), ((24977, 25026), 'numpy.array', 'np.array', (["dd['mask']"], {'dtype': 'np.float32', 'order': '"""F"""'}), "(dd['mask'], dtype=np.float32, order='F')\n", (24985, 25026), True, 'import numpy as np\n'), ((25149, 25194), 'numpy.empty', 'np.empty', (['(self.num_hid_cov, self.batch_size)'], {}), '((self.num_hid_cov, self.batch_size))\n', (25157, 25194), True, 'import numpy as np\n'), ((25264, 25309), 'numpy.empty', 'np.empty', (['(self.num_hid_cov, self.batch_size)'], {}), '((self.num_hid_cov, self.batch_size))\n', (25272, 25309), True, 'import numpy as np\n'), ((25379, 25420), 'numpy.empty', 'np.empty', (['(self.num_fac, self.batch_size)'], {}), '((self.num_fac, self.batch_size))\n', (25387, 25420), True, 'import numpy as np\n'), ((25490, 25520), 'numpy.empty', 'np.empty', (['(1, self.batch_size)'], {}), '((1, self.batch_size))\n', (25498, 25520), True, 'import numpy as np\n'), ((25589, 25605), 'numpy.empty', 'np.empty', (['(1, 1)'], {}), '((1, 1))\n', (25597, 25605), True, 'import numpy as np\n'), ((25674, 25710), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (25682, 25710), True, 'import numpy as np\n'), ((25780, 25816), 'numpy.empty', 'np.empty', (['(num_vis, self.batch_size)'], {}), '((num_vis, self.batch_size))\n', (25788, 25816), True, 'import numpy as np\n'), ((25886, 25919), 'numpy.empty', 'np.empty', (['(num_vis, self.num_fac)'], {}), '((num_vis, self.num_fac))\n', (25894, 25919), True, 'import numpy as np\n'), ((25989, 26031), 'numpy.zeros', 'np.zeros', (['(self.num_fac, self.num_hid_cov)'], {}), '((self.num_fac, self.num_hid_cov))\n', (25997, 26031), True, 'import numpy as np\n'), ((26102, 26129), 'numpy.empty', 'np.empty', (['(1, self.num_fac)'], {}), '((1, self.num_fac))\n', (26110, 26129), True, 'import numpy as np\n'), ((26199, 26230), 'numpy.empty', 'np.empty', (['(1, self.num_hid_cov)'], {}), '((1, self.num_hid_cov))\n', (26207, 26230), True, 'import numpy as np\n'), ((29532, 29565), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (29540, 29565), True, 'import cudamat as cmt\n'), ((29815, 29851), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (29822, 29851), True, 'import cudamat as cmt\n'), ((29956, 29988), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (29963, 29988), True, 'import cudamat as cmt\n'), ((30171, 30206), 'cudamat.dot', 'cmt.dot', (['featsq', 't2.T'], {'target': 'FHinc'}), '(featsq, t2.T, target=FHinc)\n', (30178, 30206), True, 'import cudamat as cmt\n'), ((30231, 30257), 'cudamat.dot', 'cmt.dot', (['FH', 't2'], {'target': 't3'}), '(FH, t2, target=t3)\n', (30238, 30257), True, 'import cudamat as cmt\n'), ((30311, 30348), 'cudamat.dot', 'cmt.dot', (['normdata', 't3.T'], {'target': 'VFinc'}), '(normdata, t3.T, target=VFinc)\n', (30318, 30348), True, 'import cudamat as cmt\n'), ((30620, 30661), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'data'], {'target': 'feat_mean'}), '(w_mean.T, data, target=feat_mean)\n', (30627, 30661), True, 'import cudamat as cmt\n'), ((30855, 30899), 'cudamat.dot', 'cmt.dot', (['data', 'feat_mean.T'], {'target': 'w_meaninc'}), '(data, feat_mean.T, target=w_meaninc)\n', (30862, 30899), True, 'import cudamat as cmt\n'), ((32337, 32370), 'cudamat.sqrt', 'cmt.sqrt', (['lengthsq'], {'target': 'length'}), '(lengthsq, target=length)\n', (32345, 32370), True, 'import cudamat as cmt\n'), ((32568, 32604), 'cudamat.dot', 'cmt.dot', (['VF.T', 'normdata'], {'target': 'feat'}), '(VF.T, normdata, target=feat)\n', (32575, 32604), True, 'import cudamat as cmt\n'), ((32687, 32719), 'cudamat.dot', 'cmt.dot', (['FH.T', 'featsq'], {'target': 't1'}), '(FH.T, featsq, target=t1)\n', (32694, 32719), True, 'import cudamat as cmt\n'), ((32959, 32985), 'cudamat.dot', 'cmt.dot', (['FH', 't2'], {'target': 't3'}), '(FH, t2, target=t3)\n', (32966, 32985), True, 'import cudamat as cmt\n'), ((33262, 33306), 'cudamat.dot', 'cmt.dot', (['w_mean.T', 'negdata'], {'target': 'feat_mean'}), '(w_mean.T, negdata, target=feat_mean)\n', (33269, 33306), True, 'import cudamat as cmt\n'), ((33900, 33913), 'cudamat.sqrt', 'cmt.sqrt', (['t10'], {}), '(t10)\n', (33908, 33913), True, 'import cudamat as cmt\n'), ((37061, 37079), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (37077, 37079), False, 'import sys\n'), ((38360, 38378), 'numpy.mod', 'np.mod', (['epoch', '(100)'], {}), '(epoch, 100)\n', (38366, 38378), True, 'import numpy as np\n'), ((38613, 38631), 'numpy.mod', 'np.mod', (['epoch', '(100)'], {}), '(epoch, 100)\n', (38619, 38631), True, 'import numpy as np\n'), ((20035, 20073), 'numpy.eye', 'np.eye', (['self.num_fac', 'self.num_hid_cov'], {}), '(self.num_fac, self.num_hid_cov)\n', (20041, 20073), True, 'import numpy as np\n'), ((20388, 20418), 'numpy.ones', 'np.ones', (['(self.num_hid_cov, 1)'], {}), '((self.num_hid_cov, 1))\n', (20395, 20418), True, 'import numpy as np\n'), ((20728, 20759), 'numpy.ones', 'np.ones', (['(self.num_hid_mean, 1)'], {}), '((self.num_hid_mean, 1))\n', (20735, 20759), True, 'import numpy as np\n'), ((42101, 42115), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (42113, 42115), False, 'from datetime import datetime\n'), ((6438, 6460), 'numpy.min', 'np.min', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (6444, 6460), True, 'import numpy as np\n'), ((6508, 6530), 'numpy.max', 'np.max', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (6514, 6530), True, 'import numpy as np\n'), ((6579, 6602), 'numpy.mean', 'np.mean', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (6586, 6602), True, 'import numpy as np\n'), ((6650, 6672), 'numpy.std', 'np.std', (['self.d'], {'axis': '(0)'}), '(self.d, axis=0)\n', (6656, 6672), True, 'import numpy as np\n'), ((17910, 17933), 'numpy.min', 'np.min', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (17916, 17933), True, 'import numpy as np\n'), ((17987, 18010), 'numpy.max', 'np.max', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (17993, 18010), True, 'import numpy as np\n'), ((18065, 18089), 'numpy.mean', 'np.mean', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (18072, 18089), True, 'import numpy as np\n'), ((18143, 18166), 'numpy.std', 'np.std', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (18149, 18166), True, 'import numpy as np\n'), ((17809, 17832), 'numpy.max', 'np.max', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (17815, 17832), True, 'import numpy as np\n'), ((17833, 17856), 'numpy.min', 'np.min', (['visData'], {'axis': '(0)'}), '(visData, axis=0)\n', (17839, 17856), True, 'import numpy as np\n')]
|
from os.path import join
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import zscore
from sklearn.decomposition import PCA
import pandas as pd
from itertools import combinations
# Load helper function(s) for interacting with CTF dataset
from ctf_dataset.load import create_wrapped_dataset
base_dir = '/mnt/bucket/labs/hasson/snastase/social-ctf'
data_dir = join(base_dir, 'data')
# Create wrapped CTF dataset
wrap_f = create_wrapped_dataset(data_dir, output_dataset_name="virtual.hdf5")
n_lstms = 512
n_repeats = 8
n_players = 4
map_id = 0
# Get matchups with all same agents (e.g. AA vs AA)
agent_ids = wrap_f['map/matchup/repeat/player/agent_id'][0, :, :, :, 0]
matchup_ids = np.all(agent_ids[:, 0, :] ==
agent_ids[:, 0, 0][:, np.newaxis], axis=1)
n_matchups = np.sum(matchup_ids) # 0, 34, 49, 54
# Extract LSTMs for one map and matchup
lstms_matched = np.tanh(wrap_f['map/matchup/repeat/player/time/lstm'][
map_id, matchup_ids, ...].astype(np.float32))
print("Loaded LSTMs for within-population matchups")
# Loop through matchups, repeats, and players to compute PCA
k = n_lstms
lstm_pca = {}
for m in np.arange(n_matchups):
lstm_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pca[m][r] = {}
for p in np.arange(n_players):
lstm_pca[m][r][p] = {}
pca = PCA(n_components=k)
transformed = pca.fit_transform(
#zscore(lstms_matched[m, r, p], axis=0))
#np.tanh(lstms_matched[m, r, p]))
zscore(lstms_matched[m, r, p], axis=0))
lstm_pca[m][r][p]['transformed'] = transformed
lstm_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, player {p}")
np.save('results/pca_lstm_tanh-z_results.npy', lstm_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pca_long = {'population': [], 'repeat': [], 'player': [],
'variance explained': [], 'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for k, v in enumerate(lstm_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pca_long['population'].append(pops[m])
lstm_pca_long['repeat'].append(r)
lstm_pca_long['player'].append(p)
lstm_pca_long['variance explained'].append(v)
lstm_pca_long['dimension'].append(k + 1)
lstm_pca_long = pd.DataFrame(lstm_pca_long)
max_k = 30
lstm_pca_trunc = lstm_pca_long[lstm_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pca_trunc, x='dimension',
y='variance explained', hue='repeat',
col='population', col_wrap=2,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_players, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_players):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, ..., i]))
min = int(np.amin(percents_vaf[m, ..., i]))
max = int(np.amax(percents_vaf[m, ..., i]))
print(f"Population {pops[m]}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack pairs of players and compute joint PCA
pairs = list(combinations(np.arange(n_players), 2))
n_pairs = len(pairs)
k = n_lstms * 2
coop_ids, comp_ids = [0, 5], [1, 2, 3, 4]
lstm_pair_pca = {}
for m in np.arange(n_matchups):
lstm_pair_pca[m] = {}
for r in np.arange(n_repeats):
lstm_pair_pca[m][r] = {}
for p, pair in enumerate(pairs):
lstm_pair_pca[m][r][p] = {}
stack_lstm = np.hstack((lstms_matched[m, r, pair[0]],
lstms_matched[m, r, pair[1]]))
pca = PCA(n_components=k)
transformed = pca.fit_transform(
zscore(stack_lstm, axis=0))
lstm_pair_pca[m][r][p]['transformed'] = transformed
lstm_pair_pca[m][r][p]['pca'] = pca
print(f"Finished running PCA for matchup {m}, "
f"repeat {r}, pair {pair}")
np.save('results/pair-pca_lstm_tanh-z_results.npy', lstm_pair_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_pair_pca_long = {'population': [], 'repeat': [], 'pair': [],
'variance explained': [], 'dimension': [],
'type': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
pair_type = {c:('cooperative' if c in coop_ids else 'competitive')
for c in np.arange(n_pairs)}
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for k, v in enumerate(lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_):
lstm_pair_pca_long['population'].append(pops[m])
lstm_pair_pca_long['repeat'].append(r)
lstm_pair_pca_long['pair'].append(p)
lstm_pair_pca_long['variance explained'].append(v)
lstm_pair_pca_long['dimension'].append(k + 1)
lstm_pair_pca_long['type'].append(pair_type[p])
lstm_pair_pca_long = pd.DataFrame(lstm_pair_pca_long)
max_k = 10
lstm_pair_pca_trunc = lstm_pair_pca_long[
lstm_pair_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.relplot(data=lstm_pair_pca_trunc, x='dimension',
y='variance explained', hue='type',
col='population', col_wrap=2, linewidth=3,
kind='line')
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, n_repeats, n_pairs, len(percents)))
for m in np.arange(n_matchups):
for r in np.arange(n_repeats):
for p in np.arange(n_pairs):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_pair_pca[m][r][p][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, r, p, i] = k
for m in np.arange(n_matchups):
for type, c in zip(['cooperative', 'competitive'],
[coop_ids, comp_ids]):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, :, c, i]))
min = int(np.amin(percents_vaf[m, :, c, i]))
max = int(np.amax(percents_vaf[m, :, c, i]))
print(f"Population {pops[m]} {type}: {median} dimensions "
f"for {perc} variance (range: {min}-{max})")
print('\n')
# Stack across all repeats and run PCA
k = n_lstms
lstm_stack_pca = {}
for m in np.arange(n_matchups):
lstm_stack_pca[m] = {}
stack_lstm = []
for r in np.arange(n_repeats):
for p in np.arange(n_players):
stack_lstm.append(zscore(lstms_matched[m, r, p],
axis=0))
stack_lstm = np.vstack(stack_lstm)
pca = PCA(n_components=k)
transformed = pca.fit_transform(stack_lstm)
lstm_stack_pca[m]['transformed'] = transformed
lstm_stack_pca[m]['pca'] = pca
print(f"Finished running stacked PCA for matchup {m}")
np.save('results/stack-pca_lstm_tanh-z_results.npy', lstm_stack_pca)
# Convert PCA outputs to long dictionary for plotting
lstm_stack_pca_long = {'population': [], 'variance explained': [],
'dimension': []}
pops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}
for m in np.arange(n_matchups):
for k, v in enumerate(lstm_stack_pca[m][
'pca'].explained_variance_ratio_):
lstm_stack_pca_long['population'].append(pops[m])
lstm_stack_pca_long['variance explained'].append(v)
lstm_stack_pca_long['dimension'].append(k + 1)
lstm_stack_pca_long = pd.DataFrame(lstm_stack_pca_long)
max_k = 8
lstm_stack_pca_trunc = lstm_stack_pca_long[
lstm_stack_pca_long['dimension'] <= max_k]
sns.set(font_scale=1.2, style='white')
sns.lineplot(data=lstm_stack_pca_trunc, x='dimension',
y='variance explained', hue='population',
linewidth=3)
# Compute number of components required for percentage variance
percents = [.5, .75, .9, .95, .99]
percents_vaf = np.zeros((n_matchups, len(percents)))
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
k = np.sum(np.cumsum(
lstm_stack_pca[m][
'pca'].explained_variance_ratio_) <= perc) + 1
percents_vaf[m, i] = k
for m in np.arange(n_matchups):
for i, perc in enumerate(percents):
median = int(np.median(percents_vaf[m, i]))
print(f"Population {pops[m]}: {median} dimensions "
f"for {perc} variance")
print('\n')
# Create reduced-dimension version of data (e.g. k = 100)
k = 100
lstm_pca_reduce = []
for m in np.arange(n_matchups):
stack_lstm = []
for r in np.arange(n_repeats):
for p in np.arange(n_players):
stack_lstm.append(zscore(lstms_matched[m, r, p],
axis=0))
stack_lstm = np.vstack(stack_lstm)
pca = PCA(n_components=k)
transformed = pca.fit_transform(stack_lstm)
percent_vaf = np.sum(pca.explained_variance_ratio_)
# Un-stack PCA-transformed arrays for repeats, players
unstack_lstm = np.stack(np.split(np.stack(
np.split(transformed, 8), axis=0), 4, axis=1), axis=1)
lstm_pca_reduce.append(unstack_lstm)
print(f"Finished running stacked PCA for matchup {m}")
print(f"Proportion variance at for matchup {m} at k = {k}: "
f"{percent_vaf:.3f}")
lstm_pca_reduce = np.stack(lstm_pca_reduce, axis=0)
np.save(f'results/lstms_tanh-z_pca-k{k}.npy', lstm_pca_reduce)
## Compute correlations for PC in comparison to game variable
from features import get_features
from scipy.stats import pearsonr
# Load pre-saved PCA's
k = 100
lstm_pca = np.load(f'results/lstms_tanh-z_pca-k{k}.npy')
# Exclude degenerate features from analysis
feature_set = ['position', 'health', 'events']
all_features, labels = get_features(wrap_f, feature_set=feature_set, map_id=map_id,
matchup_id=matchup_ids, player_id=slice(None),
repeat_id=slice(None))
features_exclude = []
for label in labels:
features = all_features[..., np.array(labels) == label]
n_nonzeros = np.sum(np.nonzero(features))
print(f'checking {label} for all nonzeros; found {n_nonzeros} nonzeros')
if n_nonzeros == 0:
features_exclude.append(label)
print(f'excluding {label}')
labels = [l for l in labels if l not in features_exclude]
# Define a single variable to pull stats for (this may be redundant, review later)
pca_corrs = {}
for game_var in labels:
features = all_features[..., np.array(labels) == game_var]
# code is breaking above because new labels code that removes degenerative features does not match dimensions of
feature_shape = features.shape[:-2]
pca_corrs[game_var] = np.full(feature_shape + (k,), np.nan)
for matchup_id in np.arange(n_matchups):
for repeat_id in np.arange(n_repeats):
for player_id in np.arange(n_players):
for pc_id in np.arange(k):
pc_corr = pearsonr(features[matchup_id, repeat_id, player_id, :, 0],
lstm_pca[matchup_id, repeat_id, player_id,
:, pc_id])[0]
pca_corrs[game_var][matchup_id, repeat_id, player_id, pc_id] = pc_corr
print(f"finished pca correlations w/ {game_var}")
# Save dictionary
np.save(f'results/lstm_pca-k{k}_feature_correlations.npy', pca_corrs)
## Plot
pca_corrs = np.load('results/lstm_pca-k100_feature_correlations.npy', allow_pickle=True)
# Summarize PCA Corrs across players and repeats
pca_corr_means = []
for game_var in pca_corrs:
pca_corr_means.append(np.nanmean(pca_corrs[game_var], axis=(1, 2)))
pca_corr_means = np.stack(pca_corr_means, 1)
assert pca_corr_means.shape[1] == len(labels)
pc_id = 2
for pc_id in np.arange(1,10):
plt.matshow(pca_corr_means[..., pc_id], cmap='RdBu_r')
plt.yticks([0, 1, 2, 3], ['A','B','C','D'])
plt.xticks(np.arange(pca_corr_means.shape[1]), labels, rotation=90);
plt.title(f'PCA Feature Correlations for PC{pc_id}')
plt.colorbar()
|
[
"numpy.hstack",
"numpy.nanmean",
"numpy.array",
"numpy.cumsum",
"scipy.stats.pearsonr",
"numpy.save",
"numpy.arange",
"seaborn.set",
"sklearn.decomposition.PCA",
"numpy.stack",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"pandas.DataFrame",
"numpy.amin",
"seaborn.lineplot",
"scipy.stats.zscore",
"numpy.nonzero",
"matplotlib.pyplot.title",
"matplotlib.pyplot.matshow",
"numpy.median",
"matplotlib.pyplot.colorbar",
"os.path.join",
"numpy.sum",
"numpy.split",
"ctf_dataset.load.create_wrapped_dataset",
"numpy.full",
"numpy.all",
"numpy.load",
"numpy.amax",
"seaborn.relplot"
] |
[((403, 425), 'os.path.join', 'join', (['base_dir', '"""data"""'], {}), "(base_dir, 'data')\n", (407, 425), False, 'from os.path import join\n'), ((465, 533), 'ctf_dataset.load.create_wrapped_dataset', 'create_wrapped_dataset', (['data_dir'], {'output_dataset_name': '"""virtual.hdf5"""'}), "(data_dir, output_dataset_name='virtual.hdf5')\n", (487, 533), False, 'from ctf_dataset.load import create_wrapped_dataset\n'), ((727, 798), 'numpy.all', 'np.all', (['(agent_ids[:, 0, :] == agent_ids[:, 0, 0][:, np.newaxis])'], {'axis': '(1)'}), '(agent_ids[:, 0, :] == agent_ids[:, 0, 0][:, np.newaxis], axis=1)\n', (733, 798), True, 'import numpy as np\n'), ((834, 853), 'numpy.sum', 'np.sum', (['matchup_ids'], {}), '(matchup_ids)\n', (840, 853), True, 'import numpy as np\n'), ((1185, 1206), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (1194, 1206), True, 'import numpy as np\n'), ((1820, 1876), 'numpy.save', 'np.save', (['"""results/pca_lstm_tanh-z_results.npy"""', 'lstm_pca'], {}), "('results/pca_lstm_tanh-z_results.npy', lstm_pca)\n", (1827, 1876), True, 'import numpy as np\n'), ((2106, 2127), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (2115, 2127), True, 'import numpy as np\n'), ((2603, 2630), 'pandas.DataFrame', 'pd.DataFrame', (['lstm_pca_long'], {}), '(lstm_pca_long)\n', (2615, 2630), True, 'import pandas as pd\n'), ((2728, 2766), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)', 'style': '"""white"""'}), "(font_scale=1.2, style='white')\n", (2735, 2766), True, 'import seaborn as sns\n'), ((2767, 2900), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'lstm_pca_trunc', 'x': '"""dimension"""', 'y': '"""variance explained"""', 'hue': '"""repeat"""', 'col': '"""population"""', 'col_wrap': '(2)', 'kind': '"""line"""'}), "(data=lstm_pca_trunc, x='dimension', y='variance explained', hue\n ='repeat', col='population', col_wrap=2, kind='line')\n", (2778, 2900), True, 'import seaborn as sns\n'), ((3117, 3138), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (3126, 3138), True, 'import numpy as np\n'), ((3470, 3491), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (3479, 3491), True, 'import numpy as np\n'), ((4040, 4061), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (4049, 4061), True, 'import numpy as np\n'), ((4729, 4795), 'numpy.save', 'np.save', (['"""results/pair-pca_lstm_tanh-z_results.npy"""', 'lstm_pair_pca'], {}), "('results/pair-pca_lstm_tanh-z_results.npy', lstm_pair_pca)\n", (4736, 4795), True, 'import numpy as np\n'), ((5176, 5197), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (5185, 5197), True, 'import numpy as np\n'), ((5768, 5800), 'pandas.DataFrame', 'pd.DataFrame', (['lstm_pair_pca_long'], {}), '(lstm_pair_pca_long)\n', (5780, 5800), True, 'import pandas as pd\n'), ((5918, 5956), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)', 'style': '"""white"""'}), "(font_scale=1.2, style='white')\n", (5925, 5956), True, 'import seaborn as sns\n'), ((5957, 6105), 'seaborn.relplot', 'sns.relplot', ([], {'data': 'lstm_pair_pca_trunc', 'x': '"""dimension"""', 'y': '"""variance explained"""', 'hue': '"""type"""', 'col': '"""population"""', 'col_wrap': '(2)', 'linewidth': '(3)', 'kind': '"""line"""'}), "(data=lstm_pair_pca_trunc, x='dimension', y='variance explained',\n hue='type', col='population', col_wrap=2, linewidth=3, kind='line')\n", (5968, 6105), True, 'import seaborn as sns\n'), ((6321, 6342), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (6330, 6342), True, 'import numpy as np\n'), ((6677, 6698), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (6686, 6698), True, 'import numpy as np\n'), ((7258, 7279), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (7267, 7279), True, 'import numpy as np\n'), ((7804, 7872), 'numpy.save', 'np.save', (['"""results/stack-pca_lstm_tanh-z_results.npy"""', 'lstm_stack_pca'], {}), "('results/stack-pca_lstm_tanh-z_results.npy', lstm_stack_pca)\n", (7811, 7872), True, 'import numpy as np\n'), ((8086, 8107), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (8095, 8107), True, 'import numpy as np\n'), ((8393, 8426), 'pandas.DataFrame', 'pd.DataFrame', (['lstm_stack_pca_long'], {}), '(lstm_stack_pca_long)\n', (8405, 8426), True, 'import pandas as pd\n'), ((8546, 8584), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.2)', 'style': '"""white"""'}), "(font_scale=1.2, style='white')\n", (8553, 8584), True, 'import seaborn as sns\n'), ((8585, 8699), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'lstm_stack_pca_trunc', 'x': '"""dimension"""', 'y': '"""variance explained"""', 'hue': '"""population"""', 'linewidth': '(3)'}), "(data=lstm_stack_pca_trunc, x='dimension', y=\n 'variance explained', hue='population', linewidth=3)\n", (8597, 8699), True, 'import seaborn as sns\n'), ((8884, 8905), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (8893, 8905), True, 'import numpy as np\n'), ((9117, 9138), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (9126, 9138), True, 'import numpy as np\n'), ((9445, 9466), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (9454, 9466), True, 'import numpy as np\n'), ((10265, 10298), 'numpy.stack', 'np.stack', (['lstm_pca_reduce'], {'axis': '(0)'}), '(lstm_pca_reduce, axis=0)\n', (10273, 10298), True, 'import numpy as np\n'), ((10300, 10362), 'numpy.save', 'np.save', (['f"""results/lstms_tanh-z_pca-k{k}.npy"""', 'lstm_pca_reduce'], {}), "(f'results/lstms_tanh-z_pca-k{k}.npy', lstm_pca_reduce)\n", (10307, 10362), True, 'import numpy as np\n'), ((10540, 10585), 'numpy.load', 'np.load', (['f"""results/lstms_tanh-z_pca-k{k}.npy"""'], {}), "(f'results/lstms_tanh-z_pca-k{k}.npy')\n", (10547, 10585), True, 'import numpy as np\n'), ((12313, 12382), 'numpy.save', 'np.save', (['f"""results/lstm_pca-k{k}_feature_correlations.npy"""', 'pca_corrs'], {}), "(f'results/lstm_pca-k{k}_feature_correlations.npy', pca_corrs)\n", (12320, 12382), True, 'import numpy as np\n'), ((12405, 12481), 'numpy.load', 'np.load', (['"""results/lstm_pca-k100_feature_correlations.npy"""'], {'allow_pickle': '(True)'}), "('results/lstm_pca-k100_feature_correlations.npy', allow_pickle=True)\n", (12412, 12481), True, 'import numpy as np\n'), ((12670, 12697), 'numpy.stack', 'np.stack', (['pca_corr_means', '(1)'], {}), '(pca_corr_means, 1)\n', (12678, 12697), True, 'import numpy as np\n'), ((12770, 12786), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (12779, 12786), True, 'import numpy as np\n'), ((1242, 1262), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (1251, 1262), True, 'import numpy as np\n'), ((2142, 2162), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (2151, 2162), True, 'import numpy as np\n'), ((3153, 3173), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (3162, 3173), True, 'import numpy as np\n'), ((4102, 4122), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (4111, 4122), True, 'import numpy as np\n'), ((5212, 5232), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (5221, 5232), True, 'import numpy as np\n'), ((6357, 6377), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (6366, 6377), True, 'import numpy as np\n'), ((7346, 7366), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (7355, 7366), True, 'import numpy as np\n'), ((7545, 7566), 'numpy.vstack', 'np.vstack', (['stack_lstm'], {}), '(stack_lstm)\n', (7554, 7566), True, 'import numpy as np\n'), ((7577, 7596), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (7580, 7596), False, 'from sklearn.decomposition import PCA\n'), ((9506, 9526), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (9515, 9526), True, 'import numpy as np\n'), ((9705, 9726), 'numpy.vstack', 'np.vstack', (['stack_lstm'], {}), '(stack_lstm)\n', (9714, 9726), True, 'import numpy as np\n'), ((9737, 9756), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (9740, 9756), False, 'from sklearn.decomposition import PCA\n'), ((9828, 9865), 'numpy.sum', 'np.sum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (9834, 9865), True, 'import numpy as np\n'), ((11681, 11718), 'numpy.full', 'np.full', (['(feature_shape + (k,))', 'np.nan'], {}), '(feature_shape + (k,), np.nan)\n', (11688, 11718), True, 'import numpy as np\n'), ((11743, 11764), 'numpy.arange', 'np.arange', (['n_matchups'], {}), '(n_matchups)\n', (11752, 11764), True, 'import numpy as np\n'), ((12791, 12845), 'matplotlib.pyplot.matshow', 'plt.matshow', (['pca_corr_means[..., pc_id]'], {'cmap': '"""RdBu_r"""'}), "(pca_corr_means[..., pc_id], cmap='RdBu_r')\n", (12802, 12845), True, 'import matplotlib.pyplot as plt\n'), ((12850, 12896), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 1, 2, 3]', "['A', 'B', 'C', 'D']"], {}), "([0, 1, 2, 3], ['A', 'B', 'C', 'D'])\n", (12860, 12896), True, 'import matplotlib.pyplot as plt\n'), ((12971, 13023), 'matplotlib.pyplot.title', 'plt.title', (['f"""PCA Feature Correlations for PC{pc_id}"""'], {}), "(f'PCA Feature Correlations for PC{pc_id}')\n", (12980, 13023), True, 'import matplotlib.pyplot as plt\n'), ((13028, 13042), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13040, 13042), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1329), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (1318, 1329), True, 'import numpy as np\n'), ((2181, 2201), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (2190, 2201), True, 'import numpy as np\n'), ((3192, 3212), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (3201, 3212), True, 'import numpy as np\n'), ((3904, 3924), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (3913, 3924), True, 'import numpy as np\n'), ((5146, 5164), 'numpy.arange', 'np.arange', (['n_pairs'], {}), '(n_pairs)\n', (5155, 5164), True, 'import numpy as np\n'), ((5251, 5269), 'numpy.arange', 'np.arange', (['n_pairs'], {}), '(n_pairs)\n', (5260, 5269), True, 'import numpy as np\n'), ((6396, 6414), 'numpy.arange', 'np.arange', (['n_pairs'], {}), '(n_pairs)\n', (6405, 6414), True, 'import numpy as np\n'), ((7386, 7406), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (7395, 7406), True, 'import numpy as np\n'), ((9546, 9566), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (9555, 9566), True, 'import numpy as np\n'), ((11035, 11055), 'numpy.nonzero', 'np.nonzero', (['features'], {}), '(features)\n', (11045, 11055), True, 'import numpy as np\n'), ((11791, 11811), 'numpy.arange', 'np.arange', (['n_repeats'], {}), '(n_repeats)\n', (11800, 11811), True, 'import numpy as np\n'), ((12606, 12650), 'numpy.nanmean', 'np.nanmean', (['pca_corrs[game_var]'], {'axis': '(1, 2)'}), '(pca_corrs[game_var], axis=(1, 2))\n', (12616, 12650), True, 'import numpy as np\n'), ((12909, 12943), 'numpy.arange', 'np.arange', (['pca_corr_means.shape[1]'], {}), '(pca_corr_means.shape[1])\n', (12918, 12943), True, 'import numpy as np\n'), ((1384, 1403), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (1387, 1403), False, 'from sklearn.decomposition import PCA\n'), ((3554, 3588), 'numpy.median', 'np.median', (['percents_vaf[m, ..., i]'], {}), '(percents_vaf[m, ..., i])\n', (3563, 3588), True, 'import numpy as np\n'), ((3608, 3640), 'numpy.amin', 'np.amin', (['percents_vaf[m, ..., i]'], {}), '(percents_vaf[m, ..., i])\n', (3615, 3640), True, 'import numpy as np\n'), ((3660, 3692), 'numpy.amax', 'np.amax', (['percents_vaf[m, ..., i]'], {}), '(percents_vaf[m, ..., i])\n', (3667, 3692), True, 'import numpy as np\n'), ((4263, 4334), 'numpy.hstack', 'np.hstack', (['(lstms_matched[m, r, pair[0]], lstms_matched[m, r, pair[1]])'], {}), '((lstms_matched[m, r, pair[0]], lstms_matched[m, r, pair[1]]))\n', (4272, 4334), True, 'import numpy as np\n'), ((4389, 4408), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'k'}), '(n_components=k)\n', (4392, 4408), False, 'from sklearn.decomposition import PCA\n'), ((9201, 9230), 'numpy.median', 'np.median', (['percents_vaf[m, i]'], {}), '(percents_vaf[m, i])\n', (9210, 9230), True, 'import numpy as np\n'), ((11845, 11865), 'numpy.arange', 'np.arange', (['n_players'], {}), '(n_players)\n', (11854, 11865), True, 'import numpy as np\n'), ((1572, 1610), 'scipy.stats.zscore', 'zscore', (['lstms_matched[m, r, p]'], {'axis': '(0)'}), '(lstms_matched[m, r, p], axis=0)\n', (1578, 1610), False, 'from scipy.stats import zscore\n'), ((4470, 4496), 'scipy.stats.zscore', 'zscore', (['stack_lstm'], {'axis': '(0)'}), '(stack_lstm, axis=0)\n', (4476, 4496), False, 'from scipy.stats import zscore\n'), ((6870, 6905), 'numpy.median', 'np.median', (['percents_vaf[m, :, c, i]'], {}), '(percents_vaf[m, :, c, i])\n', (6879, 6905), True, 'import numpy as np\n'), ((6929, 6962), 'numpy.amin', 'np.amin', (['percents_vaf[m, :, c, i]'], {}), '(percents_vaf[m, :, c, i])\n', (6936, 6962), True, 'import numpy as np\n'), ((6986, 7019), 'numpy.amax', 'np.amax', (['percents_vaf[m, :, c, i]'], {}), '(percents_vaf[m, :, c, i])\n', (6993, 7019), True, 'import numpy as np\n'), ((7438, 7476), 'scipy.stats.zscore', 'zscore', (['lstms_matched[m, r, p]'], {'axis': '(0)'}), '(lstms_matched[m, r, p], axis=0)\n', (7444, 7476), False, 'from scipy.stats import zscore\n'), ((9598, 9636), 'scipy.stats.zscore', 'zscore', (['lstms_matched[m, r, p]'], {'axis': '(0)'}), '(lstms_matched[m, r, p], axis=0)\n', (9604, 9636), False, 'from scipy.stats import zscore\n'), ((9985, 10009), 'numpy.split', 'np.split', (['transformed', '(8)'], {}), '(transformed, 8)\n', (9993, 10009), True, 'import numpy as np\n'), ((10984, 11000), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (10992, 11000), True, 'import numpy as np\n'), ((11467, 11483), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (11475, 11483), True, 'import numpy as np\n'), ((11897, 11909), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (11906, 11909), True, 'import numpy as np\n'), ((8966, 9027), 'numpy.cumsum', 'np.cumsum', (["lstm_stack_pca[m]['pca'].explained_variance_ratio_"], {}), "(lstm_stack_pca[m]['pca'].explained_variance_ratio_)\n", (8975, 9027), True, 'import numpy as np\n'), ((11941, 12058), 'scipy.stats.pearsonr', 'pearsonr', (['features[matchup_id, repeat_id, player_id, :, 0]', 'lstm_pca[matchup_id, repeat_id, player_id, :, pc_id]'], {}), '(features[matchup_id, repeat_id, player_id, :, 0], lstm_pca[\n matchup_id, repeat_id, player_id, :, pc_id])\n', (11949, 12058), False, 'from scipy.stats import pearsonr\n'), ((3289, 3350), 'numpy.cumsum', 'np.cumsum', (["lstm_pca[m][r][p]['pca'].explained_variance_ratio_"], {}), "(lstm_pca[m][r][p]['pca'].explained_variance_ratio_)\n", (3298, 3350), True, 'import numpy as np\n'), ((6491, 6557), 'numpy.cumsum', 'np.cumsum', (["lstm_pair_pca[m][r][p]['pca'].explained_variance_ratio_"], {}), "(lstm_pair_pca[m][r][p]['pca'].explained_variance_ratio_)\n", (6500, 6557), True, 'import numpy as np\n')]
|
import json
import os
import time
from copy import deepcopy
import TransportMaps.Distributions as dist
import TransportMaps.Likelihoods as like
from typing import List, Dict
from matplotlib import pyplot as plt
from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, \
LikelihoodFactor, BinaryFactorMixture, KWayFactor
from sampler.NestedSampling import GlobalNestedSampler
from sampler.SimulationBasedSampler import SimulationBasedSampler
from slam.Variables import Variable, VariableType
from slam.FactorGraph import FactorGraph
from slam.BayesTree import BayesTree, BayesTreeNode
import numpy as np
from sampler.sampler_utils import JointFactor
from utils.Functions import sort_pair_lists
from utils.Visualization import plot_2d_samples
from utils.Functions import sample_dict_to_array, array_order_to_dict
class SolverArgs:
def __init__(self,
elimination_method: str = "natural",
posterior_sample_num: int = 500,
local_sample_num: int = 500,
store_clique_samples: bool = False,
local_sampling_method="direct",
adaptive_posterior_sampling=None,
*args, **kwargs
):
# graph-related and tree-related params
self.elimination_method = elimination_method
self.posterior_sample_num = posterior_sample_num
self.store_clique_samples = store_clique_samples
self.local_sampling_method = local_sampling_method
self.local_sample_num = local_sample_num
self.adaptive_posterior_sampling = adaptive_posterior_sampling
def jsonStr(self):
return json.dumps(self.__dict__)
class CliqueSeparatorFactor(ImplicitPriorFactor):
def sample(self, num_samples: int, **kwargs):
return NotImplementedError("implementation depends on density models")
class ConditionalSampler:
def conditional_sample_given_observation(self, conditional_dim,
obs_samples=None,
sample_number=None):
"""
This method returns samples with the dimension of conditional_dim.
If sample_number is given, samples of the first conditional_dim variables are return.
If obs_samples is given, samples of the first conditional_dim variables after
the dimension of obs_samples will be returned. obs_samples.shape = (sample num, dim)
Note that the dims here are of the vectorized point on manifolds not the dim of manifold.
"""
raise NotImplementedError("Implementation depends on density estimation method.")
class FactorGraphSolver:
"""
This is the abstract class of factor graph solvers.
It mainly works as:
1. the interface for users to define and solve factor graphs.
2. the maintainer of factor graphs and Bayes tree for incremental inference
3. fitting probabilistic models to the working part of factor graph and Bayes tree
4. inference (sampling) on the entire Bayes tree
The derived class may reply on different probabilistic modeling approaches.
"""
def __init__(self, args: SolverArgs):
"""
Parameters
----------
elimination_method : string
option of heuristics for variable elimination ordering.
TODO: this can be a dynamic parameter when updating Bayes tree
"""
self._args = args
self._physical_graph = FactorGraph()
self._working_graph = FactorGraph()
self._physical_bayes_tree = None
self._working_bayes_tree = None
self._conditional_couplings = {} # map from Bayes tree clique to flows
self._implicit_factors = {} # map from Bayes tree clique to factor
self._samples = {} # map from variable to samples
self._new_nodes = []
self._new_factors = []
self._clique_samples = {} # map from Bayes tree clique to samples
self._clique_true_obs = {} # map from Bayes tree clique to observations which augments flow models
self._clique_density_model = {} # map from Bayes tree clique to flow model
# map from Bayes tree clique to variable pattern; (Separator,Frontal) in reverse elimination order
self._clique_variable_pattern = {}
self._elimination_ordering = []
self._reverse_ordering_map = {}
self._temp_training_loss = {}
def set_args(self, args: SolverArgs):
raise NotImplementedError("Implementation depends on probabilistic modeling approaches.")
@property
def elimination_method(self) -> str:
return self._args.elimination_method
@property
def elimination_ordering(self) -> List[Variable]:
return self._elimination_ordering
@property
def physical_vars(self) -> List[Variable]:
return self._physical_graph.vars
@property
def new_vars(self) -> List[Variable]:
return self._new_nodes
@property
def working_vars(self) -> List[Variable]:
return self._working_graph.vars
@property
def physical_factors(self) -> List[Factor]:
return self._physical_graph.factors
@property
def new_factors(self) -> List[Factor]:
return self._new_factors
@property
def working_factors(self) -> List[Factor]:
return self._working_graph.factors
@property
def working_factor_graph(self) -> FactorGraph:
return self._working_graph
@property
def physical_factor_graph(self) -> FactorGraph:
return self._physical_graph
@property
def working_bayes_tree(self) -> BayesTree:
return self._working_bayes_tree
@property
def physical_bayes_tree(self) -> BayesTree:
return self._physical_bayes_tree
def generate_natural_ordering(self) -> None:
"""
Generate the ordering by which nodes are added
"""
self._elimination_ordering = self._physical_graph.vars + self._new_nodes
def generate_pose_first_ordering(self) -> None:
"""
Generate the ordering by which nodes are added and lmk eliminated later
"""
natural_order = self._physical_graph.vars + self._new_nodes
pose_list = []
lmk_list = []
for node in natural_order:
if node._type == VariableType.Landmark:
lmk_list.append(node)
else:
pose_list.append(node)
self._elimination_ordering = pose_list + lmk_list
def generate_ccolamd_ordering(self) -> None:
"""
"""
physical_graph_ordering = [var for var in self._elimination_ordering if var not in self._working_graph.vars]
working_graph_ordering = self._working_graph.analyze_elimination_ordering(
method="ccolamd", last_vars=
[[var for var in self._working_graph.vars if
var.type == VariableType.Pose][-1]])
self._elimination_ordering = physical_graph_ordering + working_graph_ordering
def generate_ordering(self) -> None:
"""
Generate the ordering by which Bayes tree should be generated
"""
if self._args.elimination_method == "natural":
self.generate_natural_ordering()
elif self._args.elimination_method == "ccolamd":
self.generate_ccolamd_ordering()
elif self._args.elimination_method == "pose_first":
self.generate_pose_first_ordering()
self._reverse_ordering_map = {
var: index for index, var in
enumerate(self._elimination_ordering[::-1])}
# TODO: Add other ordering methods
def add_node(self, var: Variable = None, name: str = None,
dim: int = None) -> "FactorGraphSolver":
"""
Add a new node
The node has not been added to the physical or current factor graphs
:param var:
:param name: used only when variable is not specified
:param dim: used only when variable is not specified
:return: the current problem
"""
if var:
self._new_nodes.append(var)
else:
self._new_nodes.append(Variable(name, dim))
return self
def add_factor(self, factor: Factor) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param factor
:return: the current problem
"""
self._new_factors.append(factor)
return self
def add_prior_factor(self, vars: List[Variable],
distribution: dist.Distribution) -> "FactorGraphSolver":
"""
Add a prior factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param distribution
:return: the current problem
"""
self._new_factors.append(ExplicitPriorFactor(
vars=vars, distribution=distribution))
return self
def add_likelihood_factor(self, vars: List[Variable],
likelihood: like.LikelihoodBase) -> "FactorGraphSolver":
"""
Add a likelihood factor to specified nodes
The factor has not been added to physical or current factor graphs
:param vars
:param likelihood
:return: the current problem
"""
self._new_factors.append(LikelihoodFactor(
vars=vars, log_likelihood=likelihood))
return self
def update_physical_and_working_graphs(self, timer: List[float] = None, device: str = "cpu"
) -> "FactorGraphSolver":
"""
Add all new nodes and factors into the physical factor graph,
retrieve the working factor graph, update Bayes trees
:return: the current problem
"""
start = time.time()
# Determine the affected variables in the physical Bayes tree
old_nodes = set(self.physical_vars)
nodes_of_new_factors = set.union(*[set(factor.vars) for
factor in self._new_factors])
old_nodes_of_new_factors = set.intersection(old_nodes,
nodes_of_new_factors)
# Get the working factor graph
if self._physical_bayes_tree: # if not first step, get sub graph
affected_nodes, sub_bayes_trees = \
self._physical_bayes_tree. \
get_affected_vars_and_partial_bayes_trees(
vars=old_nodes_of_new_factors)
self._working_graph = self._physical_graph.get_sub_factor_graph_with_prior(
variables=affected_nodes,
sub_trees=sub_bayes_trees,
clique_prior_dict=self._implicit_factors)
else:
sub_bayes_trees = set()
for node in self._new_nodes:
self._working_graph.add_node(node)
for factor in self._new_factors:
self._working_graph.add_factor(factor)
# Get the working Bayes treeget_sub_factor_graph
old_ordering = self._elimination_ordering
self.generate_ordering()
self._working_bayes_tree = self._working_graph.get_bayes_tree(
ordering=[var for var in self._elimination_ordering
if var in set(self.working_vars)])
# Update the physical factor graph
for node in self._new_nodes:
self._physical_graph.add_node(node)
for factor in self._new_factors:
self._physical_graph.add_factor(factor)
# Update the physical Bayesian tree
self._physical_bayes_tree = self._working_bayes_tree.__copy__()
self._physical_bayes_tree.append_child_bayes_trees(sub_bayes_trees)
# Delete legacy conditional samplers in the old tree and
# convert the density model w/o separator at leaves to density model w/ separator.
cliques_to_delete = set()
for old_clique in set(self._clique_density_model.keys()).difference(self._physical_bayes_tree.clique_nodes):
for new_clique in self._working_bayes_tree.clique_nodes:
if old_clique.vars == new_clique.vars and [var for var in old_ordering if var in old_clique.vars] == \
[var for var in self._elimination_ordering if var in new_clique.vars]:
# This clique was the root in the old tree but is leaf in the new tree.
# If the ordering of variables remains the same, its density model can be re-used.
# Update the clique to density model dict
self._clique_true_obs[new_clique] = self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
self._clique_variable_pattern[new_clique] = self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
self._clique_samples[new_clique] = self._clique_samples[old_clique]
self._clique_density_model[new_clique] = \
self.root_clique_density_model_to_leaf(old_clique, new_clique, device)
# since new clique will be skipped, related factors shall be eliminated beforehand.
# TODO: update _clique_density_model.keys() in which some clique parents change
# TODO: this currently has no impact on results
# TODO: if we store all models or clique-depend values on cliques, this issue will disappear
new_separator_factor = None
if new_clique.separator:
# extract new factor over separator
separator_var_list = sorted(new_clique.separator, key=lambda x: self._reverse_ordering_map[x])
new_separator_factor = \
self.clique_density_to_separator_factor(separator_var_list,
self._clique_density_model[new_clique],
self._clique_true_obs[old_clique])
self._implicit_factors[new_clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=new_clique,
new_factor=new_separator_factor)
break
cliques_to_delete.add(old_clique)
for old_clique in cliques_to_delete:
del self._clique_density_model[old_clique]
del self._clique_true_obs[old_clique]
if old_clique in self._clique_variable_pattern:
del self._clique_variable_pattern[old_clique]
if old_clique in self._clique_samples:
del self._clique_samples[old_clique]
# Clear all newly added variables and factors
self._new_nodes = []
self._new_factors = []
end = time.time()
if timer is not None:
timer.append(end - start)
return self
def root_clique_density_model_to_leaf(self,
old_clique: BayesTreeNode,
new_clique: BayesTreeNode,
device) -> "ConditionalSampler":
"""
when old clique and new clique have same variables but different division of frontal and separator vars,
recycle the density model in the old clique and convert it to that in the new clique.
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def clique_density_to_separator_factor(self,
separator_var_list: List[Variable],
density_model,
true_obs: np.ndarray) -> CliqueSeparatorFactor:
"""
extract marginal of separator variables from clique density as separator factor
"""
raise NotImplementedError("Implementation depends on probabilistic modeling")
def incremental_inference(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs
):
self.fit_tree_density_models(timer=timer,
clique_dim_timer=clique_dim_timer,
*args, **kwargs)
if self._args.adaptive_posterior_sampling is None:
self._samples = self.sample_posterior(timer=timer, *args, **kwargs)
else:
self._samples = self.adaptive_posterior(timer=timer, *args, **kwargs)
return self._samples
def fit_clique_density_model(self,
clique,
samples,
var_ordering,
timer,
*args, **kwargs) -> "ConditionalSampler":
raise NotImplementedError("Implementation depends on probabilistic modeling.")
def adaptive_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
raise NotADirectoryError("implementation depends on density models.")
def fit_tree_density_models(self,
timer: List[float] = None,
clique_dim_timer: List[List[float]] = None,
*args, **kwargs):
"""
By the order of Bayes tree, perform local sampling and training
on all cliques
:return:
"""
self._temp_training_loss = {}
clique_ordering = self._working_bayes_tree.clique_ordering()
total_clique_num = len(clique_ordering)
clique_cnt = 1
before_clique_time = time.time()
while clique_ordering:
start_clique_time = time.time()
clique = clique_ordering.pop()
if clique in self._clique_density_model:
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec")
clique_cnt += 1
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
continue
# local sampling
sampler_start = time.time()
local_samples, sample_var_ordering, true_obs = \
self.clique_training_sampler(clique,
num_samples=self._args.local_sample_num,
method=self._args.local_sampling_method)
sampler_end = time.time()
if timer is not None:
timer.append(sampler_end - sampler_start)
self._clique_true_obs[clique] = true_obs
if self._args.store_clique_samples:
self._clique_samples[clique] = local_samples
local_density_model = \
self.fit_clique_density_model(clique=clique,
samples=local_samples,
var_ordering=sample_var_ordering,
timer=timer)
self._clique_density_model[clique] = local_density_model
new_separator_factor = None
if clique.separator:
# extract new factor over separator
separator_list = sorted(clique.separator,
key=lambda x:
self._reverse_ordering_map[x])
new_separator_factor = self.clique_density_to_separator_factor(separator_list,
local_density_model,
true_obs)
self._implicit_factors[clique] = new_separator_factor
self._working_graph = self._working_graph.eliminate_clique_variables(clique=clique,
new_factor=new_separator_factor)
end_clique_time = time.time()
print(f"\tTime for clique {clique_cnt}/{total_clique_num}: " + str(
end_clique_time - start_clique_time) + " sec, "
"total time elapsed: " + str(
end_clique_time - before_clique_time) + " sec" + ", clique_dim is " + str(clique.dim))
if (clique_dim_timer is not None):
clique_dim_timer.append([clique.dim, end_clique_time - before_clique_time])
clique_cnt += 1
def clique_training_sampler(self, clique: BayesTreeNode, num_samples: int, method: str):
r""" This function returns training samples, simulated variables, and unused observations
"""
graph = self._working_graph.get_clique_factor_graph(clique)
variable_pattern = \
self._working_bayes_tree.clique_variable_pattern(clique)
if method == "direct":
sampler = SimulationBasedSampler(factors=graph.factors, vars=variable_pattern)
samples, var_list, unused_obs = sampler.sample(num_samples)
elif method == "nested" or method == "dynamic nested":
ns_sampler = GlobalNestedSampler(nodes=variable_pattern, factors=graph.factors)
samples = ns_sampler.sample(live_points=num_samples, sampling_method=method)
var_list = variable_pattern
unused_obs = np.array([])
else:
raise ValueError("Unknown sampling method.")
return samples, var_list, unused_obs
def sample_posterior(self, timer: List[float] = None, *args, **kwargs
) -> Dict[Variable, np.ndarray]:
"""
Generate samples for all variables
"""
num_samples = self._args.posterior_sample_num
start = time.time()
stack = [self._physical_bayes_tree.root]
samples = {}
while stack:
# Retrieve the working clique
clique = stack.pop()
# Local sampling
frontal_list = sorted(clique.frontal,
key=lambda x: self._reverse_ordering_map[x])
separator_list = sorted(clique.separator,
key=lambda x: self._reverse_ordering_map[x])
clique_density_model = self._clique_density_model[clique]
obs = self._clique_true_obs[clique]
aug_separator_samples = np.zeros(shape=(num_samples, 0))
if len(obs) != 0:
aug_separator_samples = np.tile(obs, (num_samples, 1))
for var in separator_list:
aug_separator_samples = np.hstack((aug_separator_samples,
samples[var]))
if aug_separator_samples.shape[1] != 0:
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
obs_samples=aug_separator_samples)
else: # the root clique
frontal_samples = clique_density_model. \
conditional_sample_given_observation(conditional_dim=clique.frontal_dim,
sample_number=num_samples)
# Dispatch samples
cur_index = 0
for var in frontal_list:
samples[var] = frontal_samples[:,
cur_index: cur_index + var.dim]
cur_index += var.dim
if clique.children:
for child in clique.children:
stack.append(child)
end = time.time()
if timer is not None:
timer.append(end - start)
return samples
def plot2d_posterior(self, title: str = None, xlim=None, ylim=None,
marker_size: float = 1, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
for i in range(len_var):
cur_sample = self._samples[vars[i]]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], marker=".",
s=marker_size)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def results(self):
return list(self._samples.values()), list(self._samples.keys())
def plot2d_mean_points(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot2d_mean_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None, show_plot=False, **kwargs):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = self._samples[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
if show_plot:
plt.show()
return fig_handle
def plot2d_MAP_rbt_only(self, title: str = None, xlim=None, ylim=None,
if_legend: bool = False, fname=None, front_size=None):
# xlim and ylim are tuples
vars = self._elimination_ordering
jf = JointFactor(self.physical_factors, vars)
# list(self._samples.keys())
all_sample = sample_dict_to_array(self._samples, vars)
log_pdf = jf.log_pdf(all_sample)
max_idx = np.argmax(log_pdf)
map_sample = all_sample[max_idx:max_idx+1]
map_sample_dict = array_order_to_dict(map_sample, vars)
len_var = len(vars)
x_list = []
y_list = []
lmk_list = []
for i in range(len_var):
if vars[i]._type == VariableType.Landmark:
lmk_list.append(vars[i])
else:
cur_sample = map_sample_dict[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
for var in lmk_list:
cur_sample = map_sample_dict[var]
plt.scatter(cur_sample[:, 0], cur_sample[:, 1], label=var.name)
if if_legend:
if front_size is not None:
plt.legend()
else:
plt.legend(fontsize=front_size)
if front_size is not None:
plt.xlabel('x (m)', fontsize=front_size)
plt.ylabel('y (m)', fontsize=front_size)
else:
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
if front_size is not None:
plt.title(title, fontsize=front_size)
else:
plt.title(title)
fig_handle = plt.gcf()
if fname is not None:
plt.savefig(fname)
plt.show()
return fig_handle
def plot2d_mean_poses(self, title: str = None, xlim=None, ylim=None,
width: float = 0.05, if_legend: bool = False):
# xlim and ylim are tuples
vars = self._elimination_ordering
# list(self._samples.keys())
len_var = len(vars)
x_list = []
y_list = []
for i in range(len_var):
cur_sample = self._samples[vars[i]]
x = np.mean(cur_sample[:, 0])
y = np.mean(cur_sample[:, 1])
x_list.append(x)
y_list.append(y)
# th_mean = circmean(cur_sample[:,2])
# dx, dy = np.cos(th_mean), np.sin(th_mean)
# plt.arrow(x-dx/2, y-dy/2, dx, dy,
# head_width=4*width,
# width=0.05)
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.plot(x_list, y_list)
if if_legend:
plt.legend([var.name for var in vars])
plt.xlabel('x (m)')
plt.ylabel('y (m)')
if title is not None:
plt.title(title)
fig_handle = plt.gcf()
plt.show()
return fig_handle
def plot_factor_graph(self):
pass
def plot_bayes_tree(self):
pass
def run_incrementally(case_dir: str, solver: FactorGraphSolver, nodes_factors_by_step, truth=None, traj_plot=False,
plot_args=None, check_root_transform=False) -> None:
run_count = 1
while os.path.exists(f"{case_dir}/run{run_count}"):
run_count += 1
os.mkdir(f"{case_dir}/run{run_count}")
run_dir = f"{case_dir}/run{run_count}"
print("create run dir: " + run_dir)
file = open(f"{run_dir}/parameters", "w+")
params = solver._args.jsonStr()
print(params)
file.write(params)
file.close()
num_batches = len(nodes_factors_by_step)
observed_nodes = []
step_timer = []
step_list = []
posterior_sampling_timer = []
fitting_timer = []
mixture_factor2weights = {}
show_plot = True
if "show_plot" in plot_args and not plot_args["show_plot"]:
show_plot = False
for i in range(num_batches):
step_nodes, step_factors = nodes_factors_by_step[i]
for node in step_nodes:
solver.add_node(node)
for factor in step_factors:
solver.add_factor(factor)
if isinstance(factor, BinaryFactorMixture):
mixture_factor2weights[factor] = []
observed_nodes += step_nodes
step_list.append(i)
step_file_prefix = f"{run_dir}/step{i}"
detailed_timer = []
clique_dim_timer = []
start = time.time()
solver.update_physical_and_working_graphs(timer=detailed_timer)
cur_sample = solver.incremental_inference(timer=detailed_timer, clique_dim_timer=clique_dim_timer)
end = time.time()
step_timer.append(end - start)
print(f"step {i}/{num_batches} time: {step_timer[-1]} sec, "
f"total time: {sum(step_timer)}")
file = open(f"{step_file_prefix}_ordering", "w+")
file.write(" ".join([var.name for var in solver.elimination_ordering]))
file.close()
file = open(f"{step_file_prefix}_split_timing", "w+")
file.write(" ".join([str(t) for t in detailed_timer]))
file.close()
file = open(f"{step_file_prefix}_step_training_loss", "w+")
last_training_loss = json.dumps(solver._temp_training_loss)
file.write(last_training_loss)
file.close()
posterior_sampling_timer.append(detailed_timer[-1])
fitting_timer.append(sum(detailed_timer[1:-1]))
X = np.hstack([cur_sample[var] for var in solver.elimination_ordering])
np.savetxt(fname=step_file_prefix, X=X)
# check transformation
if check_root_transform:
root_clique = solver.physical_bayes_tree.root
root_clique_model = solver._clique_density_model[root_clique]
y = root_clique_model.prior.sample((3000,))
tx = deepcopy(y)
if hasattr(root_clique_model, "flows"):
for f in root_clique_model.flows[::-1]:
tx = f.inverse_given_separator(tx, None)
y = y.detach().numpy()
tx = tx.detach().numpy()
np.savetxt(fname=step_file_prefix + '_root_normal_data', X=y)
np.savetxt(fname=step_file_prefix + '_root_transformed', X=tx)
plt.figure()
x_sort, tx_sort = sort_pair_lists(tx[:,0], y[:,0])
plt.plot(x_sort, tx_sort)
plt.ylabel("T(x)")
plt.xlabel("x")
plt.savefig(f"{step_file_prefix}_transform.png", bbox_inches="tight")
if show_plot: plt.show()
plt.close()
# clique dim and timing
np.savetxt(fname=step_file_prefix + '_dim_time', X=np.array(clique_dim_timer))
if traj_plot:
plot_2d_samples(samples_mapping=cur_sample,
equal_axis=True,
truth={variable: pose for variable, pose in
truth.items() if variable in solver.physical_vars},
truth_factors={factor for factor in solver.physical_factors if
set(factor.vars).issubset(solver.physical_vars)},
title=f'Step {i}',
plot_all_meas=False,
plot_meas_give_pose=[var for var in step_nodes if var.type == VariableType.Pose],
rbt_traj_no_samples=True,
truth_R2=True,
truth_SE2=False,
truth_odometry_color='k',
truth_landmark_markersize=10,
truth_landmark_marker='x',
file_name=f"{step_file_prefix}.png",
**plot_args)
else:
plot_2d_samples(samples_mapping=cur_sample,
equal_axis=True,
truth={variable: pose for variable, pose in
truth.items() if variable in solver.physical_vars},
truth_factors={factor for factor in solver.physical_factors if
set(factor.vars).issubset(solver.physical_vars)},
file_name=f"{step_file_prefix}.png", title=f'Step {i}',
**plot_args)
solver.plot2d_mean_rbt_only(title=f"step {i} posterior", if_legend=False, fname=f"{step_file_prefix}.png", **plot_args)
# solver.plot2d_MAP_rbt_only(title=f"step {i} posterior", if_legend=False, fname=f"{step_file_prefix}.png")
file = open(f"{run_dir}/step_timing", "w+")
file.write(" ".join(str(t) for t in step_timer))
file.close()
file = open(f"{run_dir}/step_list", "w+")
file.write(" ".join(str(s) for s in step_list))
file.close()
file = open(f"{run_dir}/posterior_sampling_timer", "w+")
file.write(" ".join(str(t) for t in posterior_sampling_timer))
file.close()
file = open(f"{run_dir}/fitting_timer", "w+")
file.write(" ".join(str(t) for t in fitting_timer))
file.close()
plt.figure()
plt.plot(np.array(step_list)*5+5, step_timer, 'go-', label='Total')
plt.plot(np.array(step_list)*5+5, posterior_sampling_timer, 'ro-', label='Posterior sampling')
plt.plot(np.array(step_list)*5+5, fitting_timer, 'bd-', label='Learning NF')
plt.ylabel(f"Time (sec)")
plt.xlabel(f"Key poses")
plt.legend()
plt.savefig(f"{run_dir}/step_timing.png", bbox_inches="tight")
if show_plot: plt.show()
plt.close()
if mixture_factor2weights:
# write updated hypothesis weights
hypo_file = open(run_dir + f'/step{i}.hypoweights', 'w+')
plt.figure()
for factor, weights in mixture_factor2weights.items():
hypo_weights = factor.posterior_weights(cur_sample)
line = ' '.join([var.name for var in factor.vars]) + ' : ' + ','.join(
[str(w) for w in hypo_weights])
hypo_file.writelines(line + '\n')
weights.append(hypo_weights)
for i_w in range(len(hypo_weights)):
plt.plot(np.arange(i + 1 - len(weights), i + 1), np.array(weights)[:, i_w], '-o',
label=f"H{i_w}at{factor.observer_var.name}" if not isinstance(factor, KWayFactor) else
f"{factor.observer_var.name} to {factor.observed_vars[i_w].name}")
hypo_file.close()
plt.legend()
plt.xlabel('Step')
plt.ylabel('Hypothesis weights')
plt.savefig(run_dir + f'/step{i}_hypoweights.png', dpi=300)
if show_plot: plt.show()
plt.close()
|
[
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"sampler.sampler_utils.JointFactor",
"numpy.array",
"utils.Functions.sample_dict_to_array",
"copy.deepcopy",
"sampler.SimulationBasedSampler.SimulationBasedSampler",
"slam.Variables.Variable",
"os.path.exists",
"numpy.mean",
"matplotlib.pyplot.xlabel",
"json.dumps",
"matplotlib.pyplot.plot",
"factors.Factors.LikelihoodFactor",
"matplotlib.pyplot.close",
"os.mkdir",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"slam.FactorGraph.FactorGraph",
"utils.Functions.array_order_to_dict",
"numpy.tile",
"matplotlib.pyplot.savefig",
"factors.Factors.ExplicitPriorFactor",
"matplotlib.pyplot.gcf",
"numpy.argmax",
"utils.Functions.sort_pair_lists",
"numpy.savetxt",
"sampler.NestedSampling.GlobalNestedSampler",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.zeros"
] |
[((32008, 32052), 'os.path.exists', 'os.path.exists', (['f"""{case_dir}/run{run_count}"""'], {}), "(f'{case_dir}/run{run_count}')\n", (32022, 32052), False, 'import os\n'), ((32081, 32119), 'os.mkdir', 'os.mkdir', (['f"""{case_dir}/run{run_count}"""'], {}), "(f'{case_dir}/run{run_count}')\n", (32089, 32119), False, 'import os\n'), ((1672, 1697), 'json.dumps', 'json.dumps', (['self.__dict__'], {}), '(self.__dict__)\n', (1682, 1697), False, 'import json\n'), ((3514, 3527), 'slam.FactorGraph.FactorGraph', 'FactorGraph', ([], {}), '()\n', (3525, 3527), False, 'from slam.FactorGraph import FactorGraph\n'), ((3558, 3571), 'slam.FactorGraph.FactorGraph', 'FactorGraph', ([], {}), '()\n', (3569, 3571), False, 'from slam.FactorGraph import FactorGraph\n'), ((9948, 9959), 'time.time', 'time.time', ([], {}), '()\n', (9957, 9959), False, 'import time\n'), ((15198, 15209), 'time.time', 'time.time', ([], {}), '()\n', (15207, 15209), False, 'import time\n'), ((18262, 18273), 'time.time', 'time.time', ([], {}), '()\n', (18271, 18273), False, 'import time\n'), ((22714, 22725), 'time.time', 'time.time', ([], {}), '()\n', (22723, 22725), False, 'import time\n'), ((24602, 24613), 'time.time', 'time.time', ([], {}), '()\n', (24611, 24613), False, 'import time\n'), ((25398, 25417), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (25408, 25417), True, 'from matplotlib import pyplot as plt\n'), ((25426, 25445), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (25436, 25445), True, 'from matplotlib import pyplot as plt\n'), ((25526, 25535), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (25533, 25535), True, 'from matplotlib import pyplot as plt\n'), ((25544, 25554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25552, 25554), True, 'from matplotlib import pyplot as plt\n'), ((26346, 26370), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (26354, 26370), True, 'from matplotlib import pyplot as plt\n'), ((26452, 26471), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (26462, 26471), True, 'from matplotlib import pyplot as plt\n'), ((26480, 26499), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (26490, 26499), True, 'from matplotlib import pyplot as plt\n'), ((26580, 26589), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (26587, 26589), True, 'from matplotlib import pyplot as plt\n'), ((26598, 26608), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26606, 26608), True, 'from matplotlib import pyplot as plt\n'), ((27536, 27560), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (27544, 27560), True, 'from matplotlib import pyplot as plt\n'), ((28282, 28291), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (28289, 28291), True, 'from matplotlib import pyplot as plt\n'), ((28674, 28714), 'sampler.sampler_utils.JointFactor', 'JointFactor', (['self.physical_factors', 'vars'], {}), '(self.physical_factors, vars)\n', (28685, 28714), False, 'from sampler.sampler_utils import JointFactor\n'), ((28773, 28814), 'utils.Functions.sample_dict_to_array', 'sample_dict_to_array', (['self._samples', 'vars'], {}), '(self._samples, vars)\n', (28793, 28814), False, 'from utils.Functions import sample_dict_to_array, array_order_to_dict\n'), ((28883, 28901), 'numpy.argmax', 'np.argmax', (['log_pdf'], {}), '(log_pdf)\n', (28892, 28901), True, 'import numpy as np\n'), ((28981, 29018), 'utils.Functions.array_order_to_dict', 'array_order_to_dict', (['map_sample', 'vars'], {}), '(map_sample, vars)\n', (29000, 29018), False, 'from utils.Functions import sample_dict_to_array, array_order_to_dict\n'), ((29620, 29644), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (29628, 29644), True, 'from matplotlib import pyplot as plt\n'), ((30368, 30377), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (30375, 30377), True, 'from matplotlib import pyplot as plt\n'), ((30447, 30457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30455, 30457), True, 'from matplotlib import pyplot as plt\n'), ((31406, 31430), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {}), '(x_list, y_list)\n', (31414, 31430), True, 'from matplotlib import pyplot as plt\n'), ((31512, 31531), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (31522, 31531), True, 'from matplotlib import pyplot as plt\n'), ((31540, 31559), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (31550, 31559), True, 'from matplotlib import pyplot as plt\n'), ((31640, 31649), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (31647, 31649), True, 'from matplotlib import pyplot as plt\n'), ((31658, 31668), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31666, 31668), True, 'from matplotlib import pyplot as plt\n'), ((33187, 33198), 'time.time', 'time.time', ([], {}), '()\n', (33196, 33198), False, 'import time\n'), ((33392, 33403), 'time.time', 'time.time', ([], {}), '()\n', (33401, 33403), False, 'import time\n'), ((33966, 34004), 'json.dumps', 'json.dumps', (['solver._temp_training_loss'], {}), '(solver._temp_training_loss)\n', (33976, 34004), False, 'import json\n'), ((34195, 34262), 'numpy.hstack', 'np.hstack', (['[cur_sample[var] for var in solver.elimination_ordering]'], {}), '([cur_sample[var] for var in solver.elimination_ordering])\n', (34204, 34262), True, 'import numpy as np\n'), ((34271, 34310), 'numpy.savetxt', 'np.savetxt', ([], {'fname': 'step_file_prefix', 'X': 'X'}), '(fname=step_file_prefix, X=X)\n', (34281, 34310), True, 'import numpy as np\n'), ((37916, 37928), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37926, 37928), True, 'from matplotlib import pyplot as plt\n'), ((38201, 38226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""Time (sec)"""'], {}), "(f'Time (sec)')\n", (38211, 38226), True, 'from matplotlib import pyplot as plt\n'), ((38235, 38259), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Key poses"""'], {}), "(f'Key poses')\n", (38245, 38259), True, 'from matplotlib import pyplot as plt\n'), ((38268, 38280), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (38278, 38280), True, 'from matplotlib import pyplot as plt\n'), ((38289, 38351), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{run_dir}/step_timing.png"""'], {'bbox_inches': '"""tight"""'}), "(f'{run_dir}/step_timing.png', bbox_inches='tight')\n", (38300, 38351), True, 'from matplotlib import pyplot as plt\n'), ((38393, 38404), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (38402, 38404), True, 'from matplotlib import pyplot as plt\n'), ((8980, 9037), 'factors.Factors.ExplicitPriorFactor', 'ExplicitPriorFactor', ([], {'vars': 'vars', 'distribution': 'distribution'}), '(vars=vars, distribution=distribution)\n', (8999, 9037), False, 'from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, LikelihoodFactor, BinaryFactorMixture, KWayFactor\n'), ((9484, 9538), 'factors.Factors.LikelihoodFactor', 'LikelihoodFactor', ([], {'vars': 'vars', 'log_likelihood': 'likelihood'}), '(vars=vars, log_likelihood=likelihood)\n', (9500, 9538), False, 'from factors.Factors import Factor, ExplicitPriorFactor, ImplicitPriorFactor, LikelihoodFactor, BinaryFactorMixture, KWayFactor\n'), ((18337, 18348), 'time.time', 'time.time', ([], {}), '()\n', (18346, 18348), False, 'import time\n'), ((19062, 19073), 'time.time', 'time.time', ([], {}), '()\n', (19071, 19073), False, 'import time\n'), ((19386, 19397), 'time.time', 'time.time', ([], {}), '()\n', (19395, 19397), False, 'import time\n'), ((20931, 20942), 'time.time', 'time.time', ([], {}), '()\n', (20940, 20942), False, 'import time\n'), ((21865, 21933), 'sampler.SimulationBasedSampler.SimulationBasedSampler', 'SimulationBasedSampler', ([], {'factors': 'graph.factors', 'vars': 'variable_pattern'}), '(factors=graph.factors, vars=variable_pattern)\n', (21887, 21933), False, 'from sampler.SimulationBasedSampler import SimulationBasedSampler\n'), ((23343, 23375), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_samples, 0)'}), '(shape=(num_samples, 0))\n', (23351, 23375), True, 'import numpy as np\n'), ((25090, 25164), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cur_sample[:, 0]', 'cur_sample[:, 1]'], {'marker': '"""."""', 's': 'marker_size'}), "(cur_sample[:, 0], cur_sample[:, 1], marker='.', s=marker_size)\n", (25101, 25164), True, 'from matplotlib import pyplot as plt\n'), ((25351, 25389), 'matplotlib.pyplot.legend', 'plt.legend', (['[var.name for var in vars]'], {}), '([var.name for var in vars])\n', (25361, 25389), True, 'from matplotlib import pyplot as plt\n'), ((25488, 25504), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (25497, 25504), True, 'from matplotlib import pyplot as plt\n'), ((26084, 26109), 'numpy.mean', 'np.mean', (['cur_sample[:, 0]'], {}), '(cur_sample[:, 0])\n', (26091, 26109), True, 'import numpy as np\n'), ((26126, 26151), 'numpy.mean', 'np.mean', (['cur_sample[:, 1]'], {}), '(cur_sample[:, 1])\n', (26133, 26151), True, 'import numpy as np\n'), ((26405, 26443), 'matplotlib.pyplot.legend', 'plt.legend', (['[var.name for var in vars]'], {}), '([var.name for var in vars])\n', (26415, 26443), True, 'from matplotlib import pyplot as plt\n'), ((26542, 26558), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (26551, 26558), True, 'from matplotlib import pyplot as plt\n'), ((27646, 27709), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cur_sample[:, 0]', 'cur_sample[:, 1]'], {'label': 'var.name'}), '(cur_sample[:, 0], cur_sample[:, 1], label=var.name)\n', (27657, 27709), True, 'from matplotlib import pyplot as plt\n'), ((27914, 27954), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {'fontsize': 'front_size'}), "('x (m)', fontsize=front_size)\n", (27924, 27954), True, 'from matplotlib import pyplot as plt\n'), ((27967, 28007), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {'fontsize': 'front_size'}), "('y (m)', fontsize=front_size)\n", (27977, 28007), True, 'from matplotlib import pyplot as plt\n'), ((28034, 28053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (28044, 28053), True, 'from matplotlib import pyplot as plt\n'), ((28066, 28085), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (28076, 28085), True, 'from matplotlib import pyplot as plt\n'), ((28334, 28352), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (28345, 28352), True, 'from matplotlib import pyplot as plt\n'), ((28387, 28397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28395, 28397), True, 'from matplotlib import pyplot as plt\n'), ((29732, 29795), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cur_sample[:, 0]', 'cur_sample[:, 1]'], {'label': 'var.name'}), '(cur_sample[:, 0], cur_sample[:, 1], label=var.name)\n', (29743, 29795), True, 'from matplotlib import pyplot as plt\n'), ((30000, 30040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {'fontsize': 'front_size'}), "('x (m)', fontsize=front_size)\n", (30010, 30040), True, 'from matplotlib import pyplot as plt\n'), ((30053, 30093), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {'fontsize': 'front_size'}), "('y (m)', fontsize=front_size)\n", (30063, 30093), True, 'from matplotlib import pyplot as plt\n'), ((30120, 30139), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (30130, 30139), True, 'from matplotlib import pyplot as plt\n'), ((30152, 30171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (30162, 30171), True, 'from matplotlib import pyplot as plt\n'), ((30420, 30438), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (30431, 30438), True, 'from matplotlib import pyplot as plt\n'), ((30910, 30935), 'numpy.mean', 'np.mean', (['cur_sample[:, 0]'], {}), '(cur_sample[:, 0])\n', (30917, 30935), True, 'import numpy as np\n'), ((30952, 30977), 'numpy.mean', 'np.mean', (['cur_sample[:, 1]'], {}), '(cur_sample[:, 1])\n', (30959, 30977), True, 'import numpy as np\n'), ((31465, 31503), 'matplotlib.pyplot.legend', 'plt.legend', (['[var.name for var in vars]'], {}), '([var.name for var in vars])\n', (31475, 31503), True, 'from matplotlib import pyplot as plt\n'), ((31602, 31618), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (31611, 31618), True, 'from matplotlib import pyplot as plt\n'), ((34581, 34592), 'copy.deepcopy', 'deepcopy', (['y'], {}), '(y)\n', (34589, 34592), False, 'from copy import deepcopy\n'), ((34846, 34907), 'numpy.savetxt', 'np.savetxt', ([], {'fname': "(step_file_prefix + '_root_normal_data')", 'X': 'y'}), "(fname=step_file_prefix + '_root_normal_data', X=y)\n", (34856, 34907), True, 'import numpy as np\n'), ((34920, 34982), 'numpy.savetxt', 'np.savetxt', ([], {'fname': "(step_file_prefix + '_root_transformed')", 'X': 'tx'}), "(fname=step_file_prefix + '_root_transformed', X=tx)\n", (34930, 34982), True, 'import numpy as np\n'), ((34996, 35008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35006, 35008), True, 'from matplotlib import pyplot as plt\n'), ((35039, 35073), 'utils.Functions.sort_pair_lists', 'sort_pair_lists', (['tx[:, 0]', 'y[:, 0]'], {}), '(tx[:, 0], y[:, 0])\n', (35054, 35073), False, 'from utils.Functions import sort_pair_lists\n'), ((35084, 35109), 'matplotlib.pyplot.plot', 'plt.plot', (['x_sort', 'tx_sort'], {}), '(x_sort, tx_sort)\n', (35092, 35109), True, 'from matplotlib import pyplot as plt\n'), ((35122, 35140), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""T(x)"""'], {}), "('T(x)')\n", (35132, 35140), True, 'from matplotlib import pyplot as plt\n'), ((35153, 35168), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (35163, 35168), True, 'from matplotlib import pyplot as plt\n'), ((35181, 35250), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{step_file_prefix}_transform.png"""'], {'bbox_inches': '"""tight"""'}), "(f'{step_file_prefix}_transform.png', bbox_inches='tight')\n", (35192, 35250), True, 'from matplotlib import pyplot as plt\n'), ((35300, 35311), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (35309, 35311), True, 'from matplotlib import pyplot as plt\n'), ((38374, 38384), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38382, 38384), True, 'from matplotlib import pyplot as plt\n'), ((38570, 38582), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (38580, 38582), True, 'from matplotlib import pyplot as plt\n'), ((39361, 39373), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (39371, 39373), True, 'from matplotlib import pyplot as plt\n'), ((39386, 39404), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Step"""'], {}), "('Step')\n", (39396, 39404), True, 'from matplotlib import pyplot as plt\n'), ((39417, 39449), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hypothesis weights"""'], {}), "('Hypothesis weights')\n", (39427, 39449), True, 'from matplotlib import pyplot as plt\n'), ((39462, 39521), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_dir + f'/step{i}_hypoweights.png')"], {'dpi': '(300)'}), "(run_dir + f'/step{i}_hypoweights.png', dpi=300)\n", (39473, 39521), True, 'from matplotlib import pyplot as plt\n'), ((39571, 39582), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (39580, 39582), True, 'from matplotlib import pyplot as plt\n'), ((8209, 8228), 'slam.Variables.Variable', 'Variable', (['name', 'dim'], {}), '(name, dim)\n', (8217, 8228), False, 'from slam.Variables import Variable, VariableType\n'), ((18479, 18490), 'time.time', 'time.time', ([], {}), '()\n', (18488, 18490), False, 'import time\n'), ((22094, 22160), 'sampler.NestedSampling.GlobalNestedSampler', 'GlobalNestedSampler', ([], {'nodes': 'variable_pattern', 'factors': 'graph.factors'}), '(nodes=variable_pattern, factors=graph.factors)\n', (22113, 22160), False, 'from sampler.NestedSampling import GlobalNestedSampler\n'), ((22315, 22327), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (22323, 22327), True, 'import numpy as np\n'), ((23447, 23477), 'numpy.tile', 'np.tile', (['obs', '(num_samples, 1)'], {}), '(obs, (num_samples, 1))\n', (23454, 23477), True, 'import numpy as np\n'), ((23557, 23605), 'numpy.hstack', 'np.hstack', (['(aug_separator_samples, samples[var])'], {}), '((aug_separator_samples, samples[var]))\n', (23566, 23605), True, 'import numpy as np\n'), ((25238, 25252), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (25246, 25252), True, 'from matplotlib import pyplot as plt\n'), ((25302, 25316), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (25310, 25316), True, 'from matplotlib import pyplot as plt\n'), ((26259, 26273), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (26267, 26273), True, 'from matplotlib import pyplot as plt\n'), ((26323, 26337), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (26331, 26337), True, 'from matplotlib import pyplot as plt\n'), ((27246, 27271), 'numpy.mean', 'np.mean', (['cur_sample[:, 0]'], {}), '(cur_sample[:, 0])\n', (27253, 27271), True, 'import numpy as np\n'), ((27292, 27317), 'numpy.mean', 'np.mean', (['cur_sample[:, 1]'], {}), '(cur_sample[:, 1])\n', (27299, 27317), True, 'import numpy as np\n'), ((27787, 27799), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (27797, 27799), True, 'from matplotlib import pyplot as plt\n'), ((27834, 27865), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'front_size'}), '(fontsize=front_size)\n', (27844, 27865), True, 'from matplotlib import pyplot as plt\n'), ((28172, 28209), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': 'front_size'}), '(title, fontsize=front_size)\n', (28181, 28209), True, 'from matplotlib import pyplot as plt\n'), ((28244, 28260), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (28253, 28260), True, 'from matplotlib import pyplot as plt\n'), ((29330, 29355), 'numpy.mean', 'np.mean', (['cur_sample[:, 0]'], {}), '(cur_sample[:, 0])\n', (29337, 29355), True, 'import numpy as np\n'), ((29376, 29401), 'numpy.mean', 'np.mean', (['cur_sample[:, 1]'], {}), '(cur_sample[:, 1])\n', (29383, 29401), True, 'import numpy as np\n'), ((29873, 29885), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (29883, 29885), True, 'from matplotlib import pyplot as plt\n'), ((29920, 29951), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'front_size'}), '(fontsize=front_size)\n', (29930, 29951), True, 'from matplotlib import pyplot as plt\n'), ((30258, 30295), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': 'front_size'}), '(title, fontsize=front_size)\n', (30267, 30295), True, 'from matplotlib import pyplot as plt\n'), ((30330, 30346), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (30339, 30346), True, 'from matplotlib import pyplot as plt\n'), ((31319, 31333), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (31327, 31333), True, 'from matplotlib import pyplot as plt\n'), ((31383, 31397), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (31391, 31397), True, 'from matplotlib import pyplot as plt\n'), ((35277, 35287), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35285, 35287), True, 'from matplotlib import pyplot as plt\n'), ((35404, 35430), 'numpy.array', 'np.array', (['clique_dim_timer'], {}), '(clique_dim_timer)\n', (35412, 35430), True, 'import numpy as np\n'), ((39548, 39558), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39556, 39558), True, 'from matplotlib import pyplot as plt\n'), ((27441, 27455), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (27449, 27455), True, 'from matplotlib import pyplot as plt\n'), ((27513, 27527), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (27521, 27527), True, 'from matplotlib import pyplot as plt\n'), ((29525, 29539), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (29533, 29539), True, 'from matplotlib import pyplot as plt\n'), ((29597, 29611), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (29605, 29611), True, 'from matplotlib import pyplot as plt\n'), ((37946, 37965), 'numpy.array', 'np.array', (['step_list'], {}), '(step_list)\n', (37954, 37965), True, 'import numpy as np\n'), ((38022, 38041), 'numpy.array', 'np.array', (['step_list'], {}), '(step_list)\n', (38030, 38041), True, 'import numpy as np\n'), ((38125, 38144), 'numpy.array', 'np.array', (['step_list'], {}), '(step_list)\n', (38133, 38144), True, 'import numpy as np\n'), ((39074, 39091), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (39082, 39091), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
import time
#https://stackoverflow.com/questions/714063/importing-modules-from-parent-folder
import sys
sys.path.insert(0,'../gym')
import numpy as np
from support import *
from model import *
def run_exper(model, steps, get_features, pre_proc_features):
from environment import SIMULATOR
# initializing our environment
my_sim = SIMULATOR()
# beginning of an episode
state_temp = my_sim.reset()
observation = my_sim.state_to_tensor(state_temp)
r_tup, e_tup, rover_poss = [], [], []
# main loop
prev_input = None
total_moves = 0
MAX_MOVES = 25
for i in range(steps):
total_moves += 1
start = time.perf_counter()
cur_input = observation
x = cur_input.astype(np.float).ravel() if prev_input is not None else np.zeros(70)
x = x[10:80] if prev_input is not None else x
x = np.array([x[i] for i in range(len(x)) if not (i%10 == 0)])
x = np.array([x[i] for i in range(len(x)) if not ((i - 8 )% 9 == 0)])
x , rov_pos = get_rover_pos(x, r_tup, e_tup, rover_poss)
x = np.array(x)
rover_poss.append(rov_pos)
"""
x = x[x != 0]
if(len(x) == 1):
x = np.zeros(4)
x = x.tolist()
x.append(-7.)
x = np.array(x)
"""
#print_map(x)
x_t = pre_proc_features.fit_transform(x.reshape(-1, 1))
x_t = x_t.reshape(1, INPUT_SIZE)[0]
print("Shape = ", x_t.shape)
prev_input = cur_input
# forward the policy network and sample action according to the proba distribution
#print_map(x)
proba = model.predict(np.expand_dims(x_t, axis=1).T)
end = time.perf_counter()
action = proba[0].argmax()
print("Time taken = ", end - start)
#run one step
state_temp, reward, done, r_tup, e_tup = my_sim.step(action)
observation = my_sim.state_to_tensor(state_temp)
my_sim.render()
time.sleep(1)
if total_moves == MAX_MOVES:
total_moves = 0
done = True
# if episode is over, reset to beginning
if done:
state_temp = my_sim.reset()
observation = my_sim.state_to_tensor(state_temp)
my_sim.render()
rover_poss = []
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_type', default = 'sparse',
type=str, help = 'Choose between encoded or sparse')
parser.add_argument('--n_steps', default = 30,
type=int, help = 'Choose a number.')
parser.add_argument('--demo_file', default = '',
type=str, help = 'File for demo.')
args = parser.parse_args()
data_type = args.data_type
steps = args.n_steps
latest_file = args.demo_file
model = get_model(data_type)
get_features, pre_proc_features = get_pre_proc_info(data_type)
if(len(latest_file) == 0):
latest_file = get_latest_file()
if latest_file != None and latest_file[0:13] == "rock_my_model":
print("===>", latest_file)
model.load_weights(latest_file)
else:
print("Model not found: Exiting...")
sys.exit(0)
run_exper(model, steps, get_features, pre_proc_features)
|
[
"sys.path.insert",
"argparse.ArgumentParser",
"time.perf_counter",
"time.sleep",
"environment.SIMULATOR",
"numpy.array",
"numpy.zeros",
"numpy.expand_dims",
"sys.exit"
] |
[((144, 172), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../gym"""'], {}), "(0, '../gym')\n", (159, 172), False, 'import sys\n'), ((384, 395), 'environment.SIMULATOR', 'SIMULATOR', ([], {}), '()\n', (393, 395), False, 'from environment import SIMULATOR\n'), ((2414, 2439), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2437, 2439), False, 'import argparse\n'), ((703, 722), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (720, 722), False, 'import time\n'), ((1127, 1138), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1135, 1138), True, 'import numpy as np\n'), ((1741, 1760), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1758, 1760), False, 'import time\n'), ((2021, 2034), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2031, 2034), False, 'import time\n'), ((3269, 3280), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3277, 3280), False, 'import sys\n'), ((833, 845), 'numpy.zeros', 'np.zeros', (['(70)'], {}), '(70)\n', (841, 845), True, 'import numpy as np\n'), ((1696, 1723), 'numpy.expand_dims', 'np.expand_dims', (['x_t'], {'axis': '(1)'}), '(x_t, axis=1)\n', (1710, 1723), True, 'import numpy as np\n')]
|
import numpy as np
one_d_array = [0, 1, 2, 3, 4, 5]
two_d_array = [
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28 ,29, 30],
[31, 32, 33, 34, 35]
]
t = one_d_array[3]
# x: coord(index)
e = two_d_array[2][1]
# y x y:row x:column
arr = np.array(two_d_array) # ๅฐๅ่กจ่ฝฌๆขไธบ ndarray
# print(arr[2, 2])
# ๅฏนไธไธชไบ็ปดๆฐ็ป็ๅ่ฟ่กๅ็
print(arr[1,5:8])
# ๅฏนไธไธชไบ็ปดๆฐ็ป็่ก่ฟ่กๅ็
print(arr[1:, 1:4])
# ๆงๅถๆญฅ่ฟ
print(arr[::4,::4])
# ่กๅ่ฝฌๆข(ๆไธ็นไธขๅผๅ
ถไป้จๅ็ๆๆๅจ้้ข)
print(arr[1, :])
|
[
"numpy.array"
] |
[((320, 341), 'numpy.array', 'np.array', (['two_d_array'], {}), '(two_d_array)\n', (328, 341), True, 'import numpy as np\n')]
|
'''
Created on 2009-08-11
@author: malem303
'''
import unittest
from imugrabber.algorithms import fong_accelero, utils, statistics
from imugrabber.algorithms import io
import os
import scipy as sp
from numpy import testing
class FongTests(unittest.TestCase):
def setUp(self):
self.misalignmentsAndScales = sp.array([[ 4.08269136e-03, -1.96002082e-05, 1.16692771e-04],
[ -6.73123099e-06, 3.86658837e-03, -2.77361987e-04],
[ -6.43895175e-05, 2.91260930e-04, 3.93614477e-03]])
self.biases = sp.array([[ 604.00283039],
[ 480.33539568],
[ 522.23054001]])
def test_fit_parameters(self):
dataSet = io.float_columns_from_CSV(csvFileName = "FongTests.csv", path = io._test_path)
x, y, z = dataSet['avgX'], dataSet['avgY'], dataSet['avgZ']
measures = utils.build_measures_matrix(x, y, z)
misalignmentsAndScales, biases = fong_accelero.fit(measures)
testing.assert_almost_equal(misalignmentsAndScales, self.misalignmentsAndScales)
testing.assert_almost_equal(biases, self.biases)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"imugrabber.algorithms.fong_accelero.fit",
"scipy.array",
"imugrabber.algorithms.utils.build_measures_matrix",
"numpy.testing.assert_almost_equal",
"imugrabber.algorithms.io.float_columns_from_CSV",
"unittest.main"
] |
[((1366, 1381), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1379, 1381), False, 'import unittest\n'), ((332, 502), 'scipy.array', 'sp.array', (['[[0.00408269136, -1.96002082e-05, 0.000116692771], [-6.73123099e-06, \n 0.00386658837, -0.000277361987], [-6.43895175e-05, 0.00029126093, \n 0.00393614477]]'], {}), '([[0.00408269136, -1.96002082e-05, 0.000116692771], [-\n 6.73123099e-06, 0.00386658837, -0.000277361987], [-6.43895175e-05, \n 0.00029126093, 0.00393614477]])\n', (340, 502), True, 'import scipy as sp\n'), ((630, 688), 'scipy.array', 'sp.array', (['[[604.00283039], [480.33539568], [522.23054001]]'], {}), '([[604.00283039], [480.33539568], [522.23054001]])\n', (638, 688), True, 'import scipy as sp\n'), ((821, 895), 'imugrabber.algorithms.io.float_columns_from_CSV', 'io.float_columns_from_CSV', ([], {'csvFileName': '"""FongTests.csv"""', 'path': 'io._test_path'}), "(csvFileName='FongTests.csv', path=io._test_path)\n", (846, 895), False, 'from imugrabber.algorithms import io\n'), ((1005, 1041), 'imugrabber.algorithms.utils.build_measures_matrix', 'utils.build_measures_matrix', (['x', 'y', 'z'], {}), '(x, y, z)\n', (1032, 1041), False, 'from imugrabber.algorithms import fong_accelero, utils, statistics\n'), ((1093, 1120), 'imugrabber.algorithms.fong_accelero.fit', 'fong_accelero.fit', (['measures'], {}), '(measures)\n', (1110, 1120), False, 'from imugrabber.algorithms import fong_accelero, utils, statistics\n'), ((1138, 1223), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (['misalignmentsAndScales', 'self.misalignmentsAndScales'], {}), '(misalignmentsAndScales, self.misalignmentsAndScales\n )\n', (1165, 1223), False, 'from numpy import testing\n'), ((1236, 1284), 'numpy.testing.assert_almost_equal', 'testing.assert_almost_equal', (['biases', 'self.biases'], {}), '(biases, self.biases)\n', (1263, 1284), False, 'from numpy import testing\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 09:44:30 2021
@author: erri
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
########################################################################################################################
# SETUP FOLDERS
########################################################################################################################
# setup working directory and DEM's name
w_dir = '/home/erri/Documents/morphological_approach/3_output_data/q2.0_2/2_prc_laser/surveys/'
DEM1_name = 'matrix_bed_norm_q20S5.txt'
DEM2_name = 'matrix_bed_norm_q20S6.txt'
# array mask for filtering data outside the channel domain
array_mask, array_mask_path = 'array_mask.txt', '/home/erri/Documents/morphological_approach/2_raw_data'
# TODO Modificare la maschera sulla base dei nuovi output Laser [soglia a 12mm]
path_DEM1 = os.path.join(w_dir, DEM1_name)
path_DEM2 = os.path.join(w_dir, DEM2_name)
DoD_name = 'DoD_' + DEM2_name[19:21] + '-' + DEM1_name[19:21] + '_'
# Output folder
name_out = 'script_outputs_' + DEM2_name[19:21] + '-' + DEM1_name[19:21]
dir_out = '/home/erri/Documents/morphological_approach/3_output_data/q1.0_2/2_prc_laser/DoDs/'
path_out = os.path.join(dir_out, name_out)
if os.path.exists(path_out):
pass
else:
os.mkdir(path_out)
########################################################################################################################
# SETUP SCRIPT PARAMETERS
########################################################################################################################
# Thresholds values
thrs_1 = 2.0 # [mm]
thrs_2 = 15.0 # [mm]
neigh_thrs = 4 # [-]
# Pixel dimension
px_x = 50 # [mm]
px_y = 5 # [mm]
# Not a number raster value (NaN)
NaN = -999
##############################################################################
# DATA READING...
##############################################################################
# Header initialization and extraction
lines = []
header = []
with open(path_DEM1, 'r') as file:
for line in file:
lines.append(line) # lines is a list. Each item is a row of the input file
# Header extraction...
for i in range(0, 7):
header.append(lines[i])
# Header printing in a file txt called header.txt
with open(path_out + '/' + DoD_name + 'header.txt', 'w') as head:
head.writelines(header)
##############################################################################
# DATA LOADING...
##############################################################################
DEM1 = np.loadtxt(path_DEM1,
# delimiter=',',
skiprows=8
)
DEM2 = np.loadtxt(path_DEM2,
# delimiter=',',
skiprows=8)
# Shape control:
arr_shape=min(DEM1.shape, DEM2.shape)
if not(DEM1.shape == DEM2.shape):
print('Attention: DEMs have not the same shape.')
# reshaping:
rows = min(DEM1.shape[0], DEM2.shape[0])
cols = min(DEM1.shape[1], DEM2.shape[1])
arr_shape = [rows, cols]
DEM1=DEM1[0:arr_shape[0], 0:arr_shape[1]]
DEM2=DEM2[0:arr_shape[0], 0:arr_shape[1]]
##############################################################################
# PERFORM DEM OF DIFFERENCE - DEM2-DEM1
##############################################################################
# mask for filtering data outside the channel domain
array_mask = np.loadtxt(os.path.join(array_mask_path, array_mask))
if not(array_mask.shape == arr_shape):
array_mask=array_mask[0:arr_shape[0], 0:arr_shape[1]]
array_msk = np.where(np.isnan(array_mask), 0, 1)
array_msk_nan = np.where(np.logical_not(np.isnan(array_mask)), 1, np.nan)
# Raster dimension
dim_x, dim_y = DEM1.shape
# Creating DoD array with np.nan
DoD_raw = np.zeros(DEM1.shape)
DoD_raw = np.where(np.logical_or(DEM1 == NaN, DEM2 == NaN), np.nan, DEM2 - DEM1)
# Creating GIS readable DoD array (np.nan as -999)
DoD_raw_rst = np.zeros(DEM1.shape)
DoD_raw_rst = np.where(np.logical_or(DEM1 == NaN, DEM2 == NaN), NaN, DEM2 - DEM1)
# Count the number of pixels in the channel area
DoD_count = np.count_nonzero(np.where(np.isnan(DoD_raw), 0, 1))
print('Active pixels:', DoD_count)
# DoD statistics
print('The minimum DoD value is:\n', np.nanmin(DoD_raw))
print('The maximum DoD value is:\n', np.nanmax(DoD_raw))
print('The DoD shape is:\n', DoD_raw.shape)
##############################################################################
# DATA FILTERING...
##############################################################################
# Perform domain-wide average
domain_avg = np.pad(DoD_raw, 1, mode='edge') # i size pad with edge values domain
DoD_mean = np.zeros(DEM1.shape)
for i in range (0, dim_x):
for j in range (0, dim_y):
if np.isnan(DoD_raw[i, j]):
DoD_mean[i, j] = np.nan
else:
k = np.array([[domain_avg[i, j], domain_avg[i, j + 1], domain_avg[i, j + 2]],
[domain_avg[i + 1, j], domain_avg[i + 1, j + 1], domain_avg[i + 1, j + 2]],
[domain_avg[i + 2, j], domain_avg[i + 2, j + 1], domain_avg[i + 2, j + 2]]])
w = np.array([[0, 1, 0],
[0, 2, 0],
[0, 1, 0]])
w_norm = w / (sum(sum(w))) # Normalizing ker
DoD_mean[i, j] = np.nansum(k * w_norm)
# Filtered array weighted average by nan.array mask
DoD_mean_msk = DoD_mean * array_msk_nan
# Create a GIS readable DoD mean (np.nann as -999)
DoD_mean_rst = np.where(np.isnan(DoD_mean_msk), NaN, DoD_mean_msk)
# Filtering data less than thrs_1
mask_thrs_1 = abs(DoD_mean_msk) > thrs_1
DoD_mean_th1 = DoD_mean_msk * mask_thrs_1 # * array_msk_nan
DoD_mean_th1_rst = np.where(np.isnan(DoD_mean_th1), NaN, DoD_mean_th1)
# Neighbourhood coalition analysis
domain_neigh = np.pad(DoD_mean_th1, 1, mode='edge') # Analysis domain
coal_neigh = np.zeros(DEM1.shape) # Initialized output array
# TODO Controllare che nessun valore venga escluso da questa analisi
for i in range(0, dim_x):
for j in range(0, dim_y):
if np.isnan(DoD_mean_th1[i, j]):
coal_neigh[i, j] = np.nan
elif thrs_1 <= abs(DoD_mean_th1[i, j]) <= thrs_2:
ker = np.array([[domain_neigh[i, j], domain_neigh[i, j + 1], domain_neigh[i, j + 2]],
[domain_neigh[i + 1, j], domain_neigh[i + 1, j + 1], domain_neigh[i + 1, j + 2]],
[domain_neigh[i + 2, j], domain_neigh[i + 2, j + 1], domain_neigh[i + 2, j + 2]]])
if DoD_mean_th1[i, j] < 0 and np.count_nonzero(ker < 0) > neigh_thrs:
coal_neigh[i, j] = DoD_mean_th1[i, j]
elif DoD_mean_th1[i, j] > 0 and np.count_nonzero(ker > 0) > neigh_thrs:
coal_neigh[i, j] = DoD_mean_th1[i, j]
else:
coal_neigh[i, j] = 0
else:
coal_neigh[i,j] = DoD_mean_th1[i,j]
...
# Avoiding zero-surrounded pixel
domain_neigh2 = np.pad(coal_neigh, 1, mode='edge') # Analysis domain
for i in range(0, dim_x):
for j in range(0,dim_y):
ker = np.array([[domain_neigh2[i, j], domain_neigh2[i, j + 1], domain_neigh2[i, j + 2]],
[domain_neigh2[i + 1, j], 0, domain_neigh2[i + 1, j + 2]],
[domain_neigh2[i + 2, j], domain_neigh2[i + 2, j + 1], domain_neigh2[i + 2, j + 2]]])
num = np.count_nonzero(ker == 0) + np.count_nonzero(~np.isnan(ker))
if num == 8:
coal_neigh[i,j]=0
...
DoD_out = coal_neigh # * array_msk_nan
# Create a GIS readable filtered DoD (np.nann as -999)
DoD_out_rst = np.where(np.isnan(DoD_out), NaN, DoD_out)
##############################################################################
# PLOT RAW DOD, MEAN DOD AND FILTERED DOD
##############################################################################
# Plot data using nicer colors
colors = ['linen', 'lightgreen', 'darkgreen', 'maroon']
class_bins = [-10.5, -1.5, 0, 1.5, 10.5]
cmap = ListedColormap(colors)
norm = BoundaryNorm(class_bins,
len(colors))
fig, (ax1, ax2, ax3) = plt.subplots(3,1)
raw= ax1.imshow(DoD_raw, cmap=cmap, norm=norm)
ax1.set_title('raw DoD')
mean = ax2.imshow(DoD_mean_th1, cmap=cmap, norm=norm)
ax2.set_title('mean DoD')
filt = ax3.imshow(DoD_out, cmap=cmap, norm=norm)
ax3.set_title('Filtered DoD')
#fig.colorbar()
fig.tight_layout()
plt.show()
plt.savefig(path_out + '/raster.pdf') # raster (png, jpg, rgb, tif), vector (pdf, eps), latex (pgf)
#plt.imshow(DoD_out, cmap='RdYlGn')
##############################################################################
# VOLUMES
##############################################################################
# DoD filtered name: coal_neigh
# Create new raster where apply volume calculation
# DoD>0 --> Deposition, DoD<0 --> Scour
DoD_vol = np.where(np.isnan(coal_neigh), 0, coal_neigh)
DEP = (DoD_vol>0)*DoD_vol
SCO = (DoD_vol<0)*DoD_vol
print('Total volume [mm^3]:', np.sum(DoD_vol)*px_x*px_y)
print('Deposition volume [mm^3]:', np.sum(DEP)*px_x*px_y)
print('Scour volume [mm^3]:', np.sum(SCO)*px_x*px_y)
#volume_filt1 = np.sum(np.abs(filtered1_raster_volume))*px_x*px_y
#print('DoD filt_1 volume:', volume_filt1, 'mm^3')
##############################################################################
# SAVE DATA
##############################################################################
#percorso = '/home/erri/Documents/morphological_approach/3_output_data/q1.5/2_prc_laser/script_outputs_s7-s6/verifica/'
#np.savetxt(percorso + 'DoD_raw.txt', DoD_raw, fmt='%0.1f', delimiter='\t')
#np.savetxt(percorso + 'DoD_mean.txt', DoD_mean, fmt='%0.1f', delimiter='\t')
# RAW DoD
# Print raw DoD in txt file (NaN as np.nan)
np.savetxt(path_out + '/' + DoD_name + 'raw.txt', DoD_raw, fmt='%0.1f', delimiter='\t')
# Printing raw DoD in txt file (NaN as -999)
np.savetxt(path_out + '/' + DoD_name + 'raw_rst.txt', DoD_raw_rst, fmt='%0.1f', delimiter='\t')
# MEAN DoD
# Print DoD mean in txt file (NaN as np.nan)
np.savetxt(path_out + '/' + DoD_name + 'DoD_mean.txt', DoD_mean_rst , fmt='%0.1f', delimiter='\t')
# MEAN + THRS1 DoD
# Print DoD mean, threshold 1 filtered in txt file (NaN as np.nan)
np.savetxt(path_out + '/' + DoD_name + 'DoD_mean_th1.txt', DoD_mean_th1, fmt='%0.1f', delimiter='\t')
# # Print filtered DoD (with NaN as -999)
np.savetxt(path_out + '/' + DoD_name + 'DoD_mean_th1_rst.txt', DoD_mean_th1_rst, fmt='%0.1f', delimiter='\t')
#MEAN + THRS_1 + NEIGH ANALYSIS DoD
# Print filtered DoD (with np.nan)...
np.savetxt(path_out + '/' + DoD_name + 'filt_.txt', DoD_out, fmt='%0.1f', delimiter='\t')
# Print filtered DoD (with NaN as -999)
np.savetxt(path_out + 'DoD_name' + 'filt_raw_rst.txt', DoD_out_rst, fmt='%0.1f', delimiter='\t')
# # Print DoD and filtered DoD (with NaN as -999) in a GIS readable format (ASCII grid):
# with open(path_out + '/' + DoD_name + 'header.txt') as f_head:
# w_header = f_head.read() # Header
# with open(path_out + '/' + DoD_name + 'raw_rst.txt') as DoD:
# w_DoD_raw= DoD.read() # Raw DoD
# with open(path_out + 'DoD_name' + 'filt_raw_rst.txt') as DoD_filt:
# w_DoD_filt = DoD_filt.read() # Filtered DoD
# with open(path_out + '/' + DoD_name + 'DoD_mean_th1_rst.txt') as DoD_mn_th1:
# w_DoD_mean_th1 = DoD_mn_th1.read()
# with open(path_out + '/' + DoD_name + 'DoD_mean.txt') as DoD_mn:
# w_DoD_mean = DoD_mn.read() # DoD mean
# # Print GIS readable raster [raw DoD, mean DOD, filtered DoD]
# DoD = w_header + w_DoD_raw
# DoD_mean = w_header + w_DoD_mean
# DoD_mean_th1 = w_header + w_DoD_mean_th1
# DoD_filt = w_header + w_DoD_filt
# with open(path_out + '/' +'gis-'+ DoD_name + 'raw.txt', 'w') as fp:
# fp.write(DoD)
# with open(path_out + '/' + 'gis-' + DoD_name + 'mean.txt', 'w') as fp:
# fp.write(DoD_mean)
# with open(path_out + '/' + 'gis-' + DoD_name + 'mean_th1.txt', 'w') as fp:
# fp.write(DoD_mean_th1)
# with open(path_out + '/' + 'gis-' + DoD_name + 'filt.txt', 'w') as fp:
# fp.write(DoD_filt)
# Cross section analysis
#n_cross=1
#y_values = np.arange(0,144*5,5)
#cross_sct = DoD_out[:,n_cross]
#fig, ax = plt.subplots(figsize=(20,5))
#ax.plot(y_values, cross_sct)
#title = 'Section_'+str(n_cross)
#ax.set(xlabel='Cross section coordinates [mm]',
# ylabel='Elevation [mm]',
# title=title)
#ax.grid()
#fig.savefig(path_out+'/section'+n_cross+'.png')
#plt.show()
|
[
"numpy.count_nonzero",
"numpy.array",
"numpy.loadtxt",
"numpy.nanmin",
"os.path.exists",
"matplotlib.colors.ListedColormap",
"os.mkdir",
"numpy.nanmax",
"matplotlib.pyplot.savefig",
"numpy.isnan",
"numpy.savetxt",
"numpy.nansum",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.logical_or",
"numpy.sum",
"numpy.zeros",
"numpy.pad",
"matplotlib.pyplot.subplots"
] |
[((959, 989), 'os.path.join', 'os.path.join', (['w_dir', 'DEM1_name'], {}), '(w_dir, DEM1_name)\n', (971, 989), False, 'import os\n'), ((1002, 1032), 'os.path.join', 'os.path.join', (['w_dir', 'DEM2_name'], {}), '(w_dir, DEM2_name)\n', (1014, 1032), False, 'import os\n'), ((1297, 1328), 'os.path.join', 'os.path.join', (['dir_out', 'name_out'], {}), '(dir_out, name_out)\n', (1309, 1328), False, 'import os\n'), ((1332, 1356), 'os.path.exists', 'os.path.exists', (['path_out'], {}), '(path_out)\n', (1346, 1356), False, 'import os\n'), ((2642, 2675), 'numpy.loadtxt', 'np.loadtxt', (['path_DEM1'], {'skiprows': '(8)'}), '(path_DEM1, skiprows=8)\n', (2652, 2675), True, 'import numpy as np\n'), ((2755, 2788), 'numpy.loadtxt', 'np.loadtxt', (['path_DEM2'], {'skiprows': '(8)'}), '(path_DEM2, skiprows=8)\n', (2765, 2788), True, 'import numpy as np\n'), ((3846, 3866), 'numpy.zeros', 'np.zeros', (['DEM1.shape'], {}), '(DEM1.shape)\n', (3854, 3866), True, 'import numpy as np\n'), ((4013, 4033), 'numpy.zeros', 'np.zeros', (['DEM1.shape'], {}), '(DEM1.shape)\n', (4021, 4033), True, 'import numpy as np\n'), ((4664, 4695), 'numpy.pad', 'np.pad', (['DoD_raw', '(1)'], {'mode': '"""edge"""'}), "(DoD_raw, 1, mode='edge')\n", (4670, 4695), True, 'import numpy as np\n'), ((4744, 4764), 'numpy.zeros', 'np.zeros', (['DEM1.shape'], {}), '(DEM1.shape)\n', (4752, 4764), True, 'import numpy as np\n'), ((5897, 5933), 'numpy.pad', 'np.pad', (['DoD_mean_th1', '(1)'], {'mode': '"""edge"""'}), "(DoD_mean_th1, 1, mode='edge')\n", (5903, 5933), True, 'import numpy as np\n'), ((5966, 5986), 'numpy.zeros', 'np.zeros', (['DEM1.shape'], {}), '(DEM1.shape)\n', (5974, 5986), True, 'import numpy as np\n'), ((7053, 7087), 'numpy.pad', 'np.pad', (['coal_neigh', '(1)'], {'mode': '"""edge"""'}), "(coal_neigh, 1, mode='edge')\n", (7059, 7087), True, 'import numpy as np\n'), ((8085, 8107), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['colors'], {}), '(colors)\n', (8099, 8107), False, 'from matplotlib.colors import ListedColormap, BoundaryNorm\n'), ((8197, 8215), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (8209, 8215), True, 'import matplotlib.pyplot as plt\n'), ((8485, 8495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8493, 8495), True, 'import matplotlib.pyplot as plt\n'), ((8496, 8533), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_out + '/raster.pdf')"], {}), "(path_out + '/raster.pdf')\n", (8507, 8533), True, 'import matplotlib.pyplot as plt\n'), ((9820, 9911), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'raw.txt')", 'DoD_raw'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'raw.txt', DoD_raw, fmt='%0.1f',\n delimiter='\\t')\n", (9830, 9911), True, 'import numpy as np\n'), ((9953, 10053), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'raw_rst.txt')", 'DoD_raw_rst'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'raw_rst.txt', DoD_raw_rst, fmt=\n '%0.1f', delimiter='\\t')\n", (9963, 10053), True, 'import numpy as np\n'), ((10106, 10208), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'DoD_mean.txt')", 'DoD_mean_rst'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'DoD_mean.txt', DoD_mean_rst, fmt=\n '%0.1f', delimiter='\\t')\n", (10116, 10208), True, 'import numpy as np\n'), ((10292, 10397), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'DoD_mean_th1.txt')", 'DoD_mean_th1'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'DoD_mean_th1.txt', DoD_mean_th1,\n fmt='%0.1f', delimiter='\\t')\n", (10302, 10397), True, 'import numpy as np\n'), ((10436, 10549), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'DoD_mean_th1_rst.txt')", 'DoD_mean_th1_rst'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'DoD_mean_th1_rst.txt',\n DoD_mean_th1_rst, fmt='%0.1f', delimiter='\\t')\n", (10446, 10549), True, 'import numpy as np\n'), ((10621, 10714), 'numpy.savetxt', 'np.savetxt', (["(path_out + '/' + DoD_name + 'filt_.txt')", 'DoD_out'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + '/' + DoD_name + 'filt_.txt', DoD_out, fmt='%0.1f',\n delimiter='\\t')\n", (10631, 10714), True, 'import numpy as np\n'), ((10751, 10852), 'numpy.savetxt', 'np.savetxt', (["(path_out + 'DoD_name' + 'filt_raw_rst.txt')", 'DoD_out_rst'], {'fmt': '"""%0.1f"""', 'delimiter': '"""\t"""'}), "(path_out + 'DoD_name' + 'filt_raw_rst.txt', DoD_out_rst, fmt=\n '%0.1f', delimiter='\\t')\n", (10761, 10852), True, 'import numpy as np\n'), ((1377, 1395), 'os.mkdir', 'os.mkdir', (['path_out'], {}), '(path_out)\n', (1385, 1395), False, 'import os\n'), ((3493, 3534), 'os.path.join', 'os.path.join', (['array_mask_path', 'array_mask'], {}), '(array_mask_path, array_mask)\n', (3505, 3534), False, 'import os\n'), ((3654, 3674), 'numpy.isnan', 'np.isnan', (['array_mask'], {}), '(array_mask)\n', (3662, 3674), True, 'import numpy as np\n'), ((3886, 3925), 'numpy.logical_or', 'np.logical_or', (['(DEM1 == NaN)', '(DEM2 == NaN)'], {}), '(DEM1 == NaN, DEM2 == NaN)\n', (3899, 3925), True, 'import numpy as np\n'), ((4057, 4096), 'numpy.logical_or', 'np.logical_or', (['(DEM1 == NaN)', '(DEM2 == NaN)'], {}), '(DEM1 == NaN, DEM2 == NaN)\n', (4070, 4096), True, 'import numpy as np\n'), ((4320, 4338), 'numpy.nanmin', 'np.nanmin', (['DoD_raw'], {}), '(DoD_raw)\n', (4329, 4338), True, 'import numpy as np\n'), ((4377, 4395), 'numpy.nanmax', 'np.nanmax', (['DoD_raw'], {}), '(DoD_raw)\n', (4386, 4395), True, 'import numpy as np\n'), ((5593, 5615), 'numpy.isnan', 'np.isnan', (['DoD_mean_msk'], {}), '(DoD_mean_msk)\n', (5601, 5615), True, 'import numpy as np\n'), ((5801, 5823), 'numpy.isnan', 'np.isnan', (['DoD_mean_th1'], {}), '(DoD_mean_th1)\n', (5809, 5823), True, 'import numpy as np\n'), ((7716, 7733), 'numpy.isnan', 'np.isnan', (['DoD_out'], {}), '(DoD_out)\n', (7724, 7733), True, 'import numpy as np\n'), ((8943, 8963), 'numpy.isnan', 'np.isnan', (['coal_neigh'], {}), '(coal_neigh)\n', (8951, 8963), True, 'import numpy as np\n'), ((3722, 3742), 'numpy.isnan', 'np.isnan', (['array_mask'], {}), '(array_mask)\n', (3730, 3742), True, 'import numpy as np\n'), ((4204, 4221), 'numpy.isnan', 'np.isnan', (['DoD_raw'], {}), '(DoD_raw)\n', (4212, 4221), True, 'import numpy as np\n'), ((4834, 4857), 'numpy.isnan', 'np.isnan', (['DoD_raw[i, j]'], {}), '(DoD_raw[i, j])\n', (4842, 4857), True, 'import numpy as np\n'), ((6151, 6179), 'numpy.isnan', 'np.isnan', (['DoD_mean_th1[i, j]'], {}), '(DoD_mean_th1[i, j])\n', (6159, 6179), True, 'import numpy as np\n'), ((7176, 7416), 'numpy.array', 'np.array', (['[[domain_neigh2[i, j], domain_neigh2[i, j + 1], domain_neigh2[i, j + 2]], [\n domain_neigh2[i + 1, j], 0, domain_neigh2[i + 1, j + 2]], [\n domain_neigh2[i + 2, j], domain_neigh2[i + 2, j + 1], domain_neigh2[i +\n 2, j + 2]]]'], {}), '([[domain_neigh2[i, j], domain_neigh2[i, j + 1], domain_neigh2[i, j +\n 2]], [domain_neigh2[i + 1, j], 0, domain_neigh2[i + 1, j + 2]], [\n domain_neigh2[i + 2, j], domain_neigh2[i + 2, j + 1], domain_neigh2[i +\n 2, j + 2]]])\n', (7184, 7416), True, 'import numpy as np\n'), ((4925, 5166), 'numpy.array', 'np.array', (['[[domain_avg[i, j], domain_avg[i, j + 1], domain_avg[i, j + 2]], [\n domain_avg[i + 1, j], domain_avg[i + 1, j + 1], domain_avg[i + 1, j + 2\n ]], [domain_avg[i + 2, j], domain_avg[i + 2, j + 1], domain_avg[i + 2, \n j + 2]]]'], {}), '([[domain_avg[i, j], domain_avg[i, j + 1], domain_avg[i, j + 2]], [\n domain_avg[i + 1, j], domain_avg[i + 1, j + 1], domain_avg[i + 1, j + 2\n ]], [domain_avg[i + 2, j], domain_avg[i + 2, j + 1], domain_avg[i + 2, \n j + 2]]])\n', (4933, 5166), True, 'import numpy as np\n'), ((5220, 5263), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 2, 0], [0, 1, 0]]'], {}), '([[0, 1, 0], [0, 2, 0], [0, 1, 0]])\n', (5228, 5263), True, 'import numpy as np\n'), ((5403, 5424), 'numpy.nansum', 'np.nansum', (['(k * w_norm)'], {}), '(k * w_norm)\n', (5412, 5424), True, 'import numpy as np\n'), ((7466, 7492), 'numpy.count_nonzero', 'np.count_nonzero', (['(ker == 0)'], {}), '(ker == 0)\n', (7482, 7492), True, 'import numpy as np\n'), ((9062, 9077), 'numpy.sum', 'np.sum', (['DoD_vol'], {}), '(DoD_vol)\n', (9068, 9077), True, 'import numpy as np\n'), ((9124, 9135), 'numpy.sum', 'np.sum', (['DEP'], {}), '(DEP)\n', (9130, 9135), True, 'import numpy as np\n'), ((9177, 9188), 'numpy.sum', 'np.sum', (['SCO'], {}), '(SCO)\n', (9183, 9188), True, 'import numpy as np\n'), ((6295, 6553), 'numpy.array', 'np.array', (['[[domain_neigh[i, j], domain_neigh[i, j + 1], domain_neigh[i, j + 2]], [\n domain_neigh[i + 1, j], domain_neigh[i + 1, j + 1], domain_neigh[i + 1,\n j + 2]], [domain_neigh[i + 2, j], domain_neigh[i + 2, j + 1],\n domain_neigh[i + 2, j + 2]]]'], {}), '([[domain_neigh[i, j], domain_neigh[i, j + 1], domain_neigh[i, j + \n 2]], [domain_neigh[i + 1, j], domain_neigh[i + 1, j + 1], domain_neigh[\n i + 1, j + 2]], [domain_neigh[i + 2, j], domain_neigh[i + 2, j + 1],\n domain_neigh[i + 2, j + 2]]])\n', (6303, 6553), True, 'import numpy as np\n'), ((7513, 7526), 'numpy.isnan', 'np.isnan', (['ker'], {}), '(ker)\n', (7521, 7526), True, 'import numpy as np\n'), ((6638, 6663), 'numpy.count_nonzero', 'np.count_nonzero', (['(ker < 0)'], {}), '(ker < 0)\n', (6654, 6663), True, 'import numpy as np\n'), ((6776, 6801), 'numpy.count_nonzero', 'np.count_nonzero', (['(ker > 0)'], {}), '(ker > 0)\n', (6792, 6801), True, 'import numpy as np\n')]
|
import numpy as np
def load_lda(path):
rows = []
with open(path, 'r') as f:
for line in f:
line = line.strip(" []\n")
if line:
rows.append(np.fromstring(line, dtype=np.float32, sep=' '))
matrix = np.array(rows).T
return matrix[:-1], matrix[-1]
|
[
"numpy.array",
"numpy.fromstring"
] |
[((259, 273), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (267, 273), True, 'import numpy as np\n'), ((197, 243), 'numpy.fromstring', 'np.fromstring', (['line'], {'dtype': 'np.float32', 'sep': '""" """'}), "(line, dtype=np.float32, sep=' ')\n", (210, 243), True, 'import numpy as np\n')]
|
#!/usr/bin/python
from __future__ import absolute_import
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision.models import vgg16_bn
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import os
from statistics import stdev, mean
import csv
import re
def plot_imgs():
img_path = "pT1_dataset/dataset/img0/img0_11_normal/img0_11_normal-image.jpg"
# mask_path = "pT1_dataset/dataset/img0/img0_11_normal/img0_11_normal-gt.png"
img = Image.open(img_path)
# mask = Image.open(mask_path)
img = np.asarray(img)
# mask = np.asarray(mask)
# mask = np.repeat(mask[:,:, np.newaxis], 3, axis=2)
# img = np.where(mask, img, mask)
plt.imshow(img)
plt.show()
img_path = "pT1_dataset/dataset/img1/img1_11_abnormal/img1_11_abnormal-image.jpg"
# mask_path = "pT1_dataset/dataset/img1/img1_11_abnormal/img1_11_abnormal-gt.png"
img = Image.open(img_path)
# mask = Image.open(mask_path)
img = np.asarray(img)
# mask = np.asarray(mask)
# mask = np.repeat(mask[:,:, np.newaxis], 3, axis=2)
# img = np.where(mask, img, mask)
plt.imshow(img)
plt.show()
def image_sizes():
path = "pT1_dataset/dataset/"
smallest_width = 10000
smallest_hight = 10000
for patient in os.listdir(path):
if not patient.endswith(".csv"):
for img_folder in os.listdir(path + patient):
img = Image.open(path+patient+ "/" + img_folder + "/" + img_folder + "-image.jpg")
img = np.asarray(img)
if img.shape[0] < smallest_hight:
smallest_hight = img.shape[0]
pic_h = img_folder
if img.shape[1] < smallest_width:
smallest_width = img.shape[1]
pic_w = img_folder
print(smallest_hight, pic_h)
print(smallest_width, pic_w)
# for img in os.listdir(path + paient)
# if not f.startswith("."):
#
# if os.path.isfile(path + f) and k in f:
# with open(path + f, "r") as file:
def plot_all_images():
"""
plots all images of the dataset
:return:
"""
path = "pT1_dataset/dataset/"
counter = 1
for patient in os.listdir(path): # iterate over every patient
if not patient.endswith(".csv"): # only consider the img folders
for img_folder in os.listdir(path + patient): # iterate ofrer ever img folder
img = Image.open(path+patient+ "/" + img_folder + "/" + img_folder + "-image.jpg") # open the image (PIL)
img = np.asarray(img) # convert from PILformat to numpy array
if counter <= 100:
plt.rc("font", size=5) # determine font size
plt.subplot(10,10, counter)
plt.imshow(img)
if "abnormal" in img_folder:
plt.title("dysplastic")
else:
plt.title("normal")
plt.axis("off")
counter+=1
else:
plt.show()
counter=1
plt.rc("font", size=5)
plt.subplot(10, 10, counter)
plt.imshow(img)
if "abnormal" in img_folder:
plt.title("dysplastic")
else:
plt.title("normal")
plt.axis("off")
counter+=1
############################################################################################
def get_opt_param(m, fold):
"""
reads a csv file to return the optimal parameters for a given data-split
"""
k = "fold" + str(fold)
path = "Hyperparameters/CNN/"
for f in os.listdir(path):
if not f.startswith("."):
if os.path.isfile(path + f) and k in f and f.endswith("it50.csv") and f.startswith(m): #TODO: add VGG16_bn
with open(path + f, "r") as file:
hp = list(csv.reader(file))
hp=hp[1]
m = hp[0]
fold = int(hp[1])
opt_lr = float(hp[4])
opt_lr_decay = float(hp[5])
opt_num_epochs = int(float(hp[6]))
opt_step_size = int(float(hp[7]))
opt_weight_decay = float(hp[8])
return m, fold, opt_lr, opt_lr_decay, opt_num_epochs, opt_step_size, opt_weight_decay
def train_val_test_split(fold):
"""
:param fold: determines which data split is used
:return: three (train, val, test) lists containing the IDs of the images,
the ID is like he path to the image, ID looks like: img0/img0_0_normal/img0_0_normal-image.jpg
"""
# open the csv file
with open("pT1_dataset/dataset_split.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
line_count = 0
dic = {}
# iterate over every row of the csv file
for row in csv_reader:
if line_count == 0: # ignore the header
line_count += 1
else: # use a dictionary to save the information about how to split the data into train test and val
dic[row[0]] = [row[fold + 1]] # get a dictionary containing all the needed information to split the data
path = "pT1_dataset/dataset/"
train_IDs, val_IDs, test_IDs = [],[],[]
for patient in os.listdir(path): # iterate over the diretory (iterate over every patient)
if not patient.endswith(".csv"): # ignore the csv file in this folder
if dic[patient][0]=="train": # check if the patient belongs to train
for img_folder in os.listdir(path + patient): # iterate over all images from this patient
train_IDs.append(patient + "/" + img_folder + "/" + img_folder + "-image.jpg") # append the ID
if dic[patient][0]=="val":
for img_folder in os.listdir(path + patient):
val_IDs.append(patient + "/" + img_folder + "/" + img_folder + "-image.jpg")
if dic[patient][0] == "test":
for img_folder in os.listdir(path + patient):
test_IDs.append(patient + "/" + img_folder + "/" + img_folder + "-image.jpg")
return train_IDs, val_IDs, test_IDs
############################################################################################
class ImageDataset(Dataset):
def __init__(self, list_IDs, transform):
self.list_IDs = list_IDs
self.transform = transform
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
img = Image.open("pT1_dataset/dataset/" + ID)
# img = transforms.Resize([64,60])(img) #TODO resize images
if "abnormal" in ID:
label = [1,0]
cls = 1
else:
label = [0,1]
cls = 0
label = torch.tensor(label, dtype=torch.float)
img = self.transform(img)
_, _, filename = ID.split("/")
img_patch = [int(i) for i in re.findall(r"[0-9]+", filename)]
img_patch.append(cls)
name = torch.tensor([img_patch], dtype=torch.float)
# print(img.shape)
return img, label, name
class CNN(nn.Module):
"""
feed forward conv net
"""
def __init__(self, img_size):
super(CNN, self).__init__()
self.final_img_size = int(img_size/8)
self.out_conv1= 16
self.out_conv2 = 32
self.out_conv3 = 64
self.cnn_layers = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=self.out_conv1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.1),
nn.Conv2d(in_channels=self.out_conv1, out_channels=self.out_conv2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.1),
nn.Conv2d(in_channels=self.out_conv2, out_channels=self.out_conv3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(0.1),
)
self.linear_layers = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(in_features=self.final_img_size*self.final_img_size*self.out_conv3, out_features=self.final_img_size*self.final_img_size*16),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(in_features=self.final_img_size*self.final_img_size*16, out_features=2),
nn.Softmax()
)
def forward(self, input):
output = self.cnn_layers(input)
output_flat = output.reshape(-1, self.final_img_size*self.final_img_size*self.out_conv3)
output = self.linear_layers(output_flat)
return output
############################################################################################
def train_and_test_1Fold(fold, m, device):
# get the hyperparameters
_, _, lr, lr_decay, num_epochs, step_size, weight_decay = get_opt_param(m,fold)
bool=True
while bool:
# train and validate the CNN, save the model weights that have highest accuracy on validation set
val_res, bool, train_accs, val_accs, train_losses, val_losses = train_and_val_1Fold(fold=fold, m=m,
num_epochs=num_epochs,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
device=device,
step_size=step_size,
testing=True)
if bool:
print("FOLD" + str(fold) + "rerun due to dead neurons")
print("val acc:", val_res[2])
img_size = 128
train_IDs, val_IDs, test_IDs = train_val_test_split(fold) # split data
val_transform = transforms.Compose([transforms.Resize((img_size, img_size)),
transforms.ToTensor()])
test_data = ImageDataset(test_IDs, val_transform) # get data objects
# data loaders
batchsize = 64
test_loader = DataLoader(test_data, batchsize, shuffle=True) # create Dataloader
model = torch.load("Parameters/CNN/" + m + "_fold"+str(fold) + ".pt") # load model
crit = torch.nn.CrossEntropyLoss(reduction="sum") # define loss function
test_acc, test_loss, img_name, TP_TN_FP_FN = evaluate(model, test_loader, crit, device, testing=True) # evaluation on test_set
print("test_acc:", test_acc)
# print(TP_TN_FP_FN)
return test_acc, train_accs, val_accs, train_losses, val_losses, img_name, TP_TN_FP_FN
def test(runs, m, device):
"""
write train accs and val accs of all runs to one csv file per fold
:param runs: number of times train, val and test is repeated
:param device: "cuda" or "cpu"
:return:
"""
# m = "CNN"
folder_short="CNN/"
all_test_accs = []
test_accs_per_fold = [[],[],[],[]]
path_test = "out/" + folder_short + m + "_test_data.csv"
for fold in range(4):
print("Fold:", fold)
# paths of csv files to which will be written to
path_train_acc = "out/" + folder_short + m + "_train_acc_fold" + str(fold) + ".csv"
path_train_loss = "out/" + folder_short + m + "_train_loss_fold" + str(fold) + ".csv"
path_val_acc = "out/" + folder_short + m + "_val_acc_fold" + str(fold) + ".csv"
path_val_loss = "out/" + folder_short + m + "_val_loss_fold" + str(fold) + ".csv"
path_TP_TN_FP_FN = "out/" + folder_short + m + "_TP_TN_FP_FN_fold" + str(fold) + ".csv"
# write train acc, train loss, val acc and val loss to separate csv files
with open(path_train_acc, "w") as train_acc_file, \
open(path_train_loss, "w") as train_loss_file, \
open(path_val_acc, "w") as val_acc_file, \
open(path_val_loss, "w") as val_loss_file, \
open(path_TP_TN_FP_FN, "w") as cls_file:
train_acc_writer = csv.writer(train_acc_file)
train_loss_writer = csv.writer(train_loss_file)
val_acc_writer = csv.writer(val_acc_file)
val_loss_writer = csv.writer(val_loss_file)
cls_writer = csv.writer(cls_file)
for it in range(runs):
test_acc, train_accs, val_accs, train_losses, val_losses, img_name, TP_TN_FP_FN = train_and_test_1Fold(fold=fold, m=m, device=device)
all_test_accs.append(test_acc)
test_accs_per_fold[fold].append(test_acc)
train_acc_writer.writerow([i for i in train_accs])
train_loss_writer.writerow([i for i in train_losses])
val_acc_writer.writerow([i for i in val_accs])
val_loss_writer.writerow([i for i in val_losses])
final_TPTNFPFN = []
groups = ["TP", "TN", "FP", "FN"]
for group_idx in range(4):
for img in img_name[group_idx]:
img_str = str(img[0][0]) + "_" + str(img[0][1]) + "_" + str(img[0][2]) + "_" + groups[group_idx]
final_TPTNFPFN.append(img_str)
cls_writer.writerow(final_TPTNFPFN)
if it == runs-1:
avg = mean(test_accs_per_fold[fold])
sd = stdev(test_accs_per_fold[fold])
test_accs_per_fold[fold].append(avg)
test_accs_per_fold[fold].append(sd)
test_accs_per_fold[fold].reverse()
# write test results (mean and sd) of every fold and total to a csv file
with open(path_test, "w") as test_file:
test_writer = csv.writer(test_file)
test_writer.writerow([stdev(all_test_accs), mean(all_test_accs)])
for fold in range(4):
test_writer.writerow(test_accs_per_fold[fold])
print("Results on Testset:")
print("mean:", "\t", mean(all_test_accs)*100)
print("standard deviation:", "\t", stdev(all_test_accs)*100)
############################################################################################
def train_and_val_1Fold(fold, m, num_epochs, lr, lr_decay, step_size, weight_decay, device, plotting=False, testing=False):
img_size = 128
# img_size = 256
train_IDs, val_IDs, test_IDs = train_val_test_split(fold)
if "aug" in m:
print("Augment training data")
train_transform = transforms.Compose([transforms.Resize((img_size,img_size)),
# transforms.RandomHorizontalFlip(),
transforms.RandomRotation((0,360)),
# transforms.RandomVerticalFlip(),
transforms.ToTensor()])
else:
train_transform = transforms.Compose([transforms.Resize((img_size,img_size)),
# transforms.RandomHorizontalFlip(),
# transforms.RandomRotation((0,360)),
# transforms.RandomVerticalFlip(),
transforms.ToTensor()])
val_transform = transforms.Compose([transforms.Resize((img_size,img_size)),
transforms.ToTensor()])
train_data = ImageDataset(train_IDs, train_transform)
val_data = ImageDataset(val_IDs, val_transform)
test_data = ImageDataset(test_IDs, val_transform)
# print("train size: " + str(len(train_data)) + " val size: " + str(len(val_data)))
# data loaders
batchsize = 64
# batchsize = 16
train_loader = DataLoader(train_data, batchsize, shuffle=True, drop_last=True)
val_loader = DataLoader(val_data, batchsize, shuffle=True)
test_loader = DataLoader(test_data, batchsize, shuffle=True)
train_accs = [] # will contain the training accuracy of every epoch
val_accs = [] # will contain the validation accuracy of every epoch
train_losses = [] # will contain the training loss of every epoch
val_losses = [] # will contain the validation loss of every epoch
# initialize model
print("initialize", m)
if m == "CNN":
model = CNN(img_size).to(device)
elif m == "VGG16_bn":
model = vgg16_bn().to(device)
elif m == "VGG16_bn_aug":
model = vgg16_bn().to(device)
elif m == "VGG16_bn_aug_pretrained":
model = vgg16_bn(pretrained=True).to(device)
# model = torch.load("Parameters/CNN/VGG16_bn_pretrained_imagenet.pt").to(device)
elif m == "VGG16_bn_pretrained":
model = vgg16_bn(pretrained=True).to(device)
# model = torch.load("Parameters/CNN/VGG16_bn_pretrained_imagenet.pt").to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) # define the optimizer, weight_decay corresponds to L2 regularization
# optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=lr_decay) # learning rate decay
crit = torch.nn.CrossEntropyLoss(reduction="sum")
for epoch in range(num_epochs):
if epoch == 0: # get train and val accs before training
train_acc, train_loss, _, _ = evaluate(model, train_loader, crit, device)
val_acc, val_loss, img_name, TP_TN_FP_FN = evaluate(model, val_loader, crit, device)
train_accs.append(train_acc)
train_losses.append(train_loss)
val_accs.append(val_acc)
val_losses.append(val_loss)
running_val_acc = np.array([0,0,val_acc])
val_res = np.copy(running_val_acc)
if testing:
torch.save(model, "Parameters/CNN/" + m + "_fold"+str(fold) + ".pt")
# train the model
train(model, train_loader, optimizer, crit, device)
scheduler.step()
# evalutate the model
train_acc, train_loss, _, _= evaluate(model, train_loader, crit, device)
val_acc, val_loss, img_name, TP_TN_FP_FN = evaluate(model, val_loader, crit, device)
train_accs.append(train_acc)
train_losses.append(train_loss)
val_accs.append(val_acc)
val_losses.append(val_loss)
running_val_acc[0] = running_val_acc[1]
running_val_acc[1] = running_val_acc[2]
running_val_acc[2] = val_acc
if np.mean(running_val_acc) > np.mean(val_res) and not testing:
val_res = np.copy(running_val_acc)
if running_val_acc[2] > val_res[2] and testing:
val_res = np.copy(running_val_acc)
torch.save(model,"Parameters/CNN/" + m + "_fold"+str(fold) + ".pt")
if plotting:
for param_group in optimizer.param_groups:
print("Epoch: {:03d}, lr: {:.5f}, train_loss: {:.5f}, val_loss: {:.5f}, train_acc: {:.5f}, val_acc: {:.5f}".format(epoch, param_group["lr"], train_loss, val_loss, train_acc, val_acc))
if stdev(train_losses[-20:]) < 0.05 and mean(train_accs[-20:]) < 0.55:
boolean = True
# print("Oops")
else:
boolean = False
# # plot learning curves
if plotting:
x = np.arange(0,len(train_accs))
plt.subplot(2,1,1)
plt.plot(x, train_accs, color="r")
plt.ylim(0.5, 1)
plt.plot(x, val_accs, color="g")
plt.subplot(2,1,2)
plt.plot(x, train_losses, color="r")
plt.plot(x, val_losses, color="g")
plt.show()
return(val_res, boolean, np.asarray(train_accs), np.asarray(val_accs), np.asarray(train_losses), np.asarray(val_losses))
def train(model, train_loader, optimizer, crit, device):
model.train()
for data, label, name in train_loader:
data = data.to(device) # transfer the data to the device
label = label.to(device) # transfer the labels to the device
optimizer.zero_grad() # set the gradient to 0
output = model(data) # pass the data through the model
loss = crit(output, torch.max(label,1)[1].long()) # compute the loss between output and label
loss.backward() # compute the gradient
optimizer.step()
def evaluate(model, val_loader, crit, device, testing=False):
model.eval()
loss_all =0
img_count=0
batch_count =0
correct_pred = 0
img_name = [[],[], [], []]
TP_TN_FP_FN = np.zeros((4))
with torch.no_grad(): # gradients don't need to be calculated in evaluation
# pass data through the model and get label and prediction
for data, labelT, name in val_loader: # iterate over every batch in validation training set
data = data.to(device) # trainsfer data to device
predT = model(data)#.detach().cpu().numpy() # pass the data through the model and store the predictions in a numpy array
pred = predT.detach().cpu().numpy()
label=labelT.detach().cpu().numpy()
predicted_classes = (pred == pred.max(axis=1)[:,None]).astype(int)
# if testing:
# c=0
# for i in data:
# plt.imshow(np.transpose(i.detach().cpu().numpy(), (1,2,0)))
# plt.title("predicted:" +str(predicted_classes[c,0]) + " class:" +str(label[c,0]))
# plt.show()
# c+=1
correct_pred += np.sum(predicted_classes[:, 0] == labelT[:, 0].numpy())
loss = crit(predT, torch.max(labelT, 1)[1].long().to(device)) # compute the loss between output and label
loss_all += loss.item()
img_count += len(data)
batch_count +=1
# count the false negatives and false positives
false_idx = np.argwhere(predicted_classes[:,0]!=label[:,0]).reshape(-1)
truth = label[false_idx,:]
c=0
for t in truth:
if t[0] == 1:
TP_TN_FP_FN[3] +=1
img_name[3].append(name[false_idx][c].tolist())
if t[0] == 0:
TP_TN_FP_FN[2] +=1
img_name[2].append(name[false_idx][c].tolist())
c+=1
true_idx = np.argwhere(predicted_classes[:,0]==label[:,0]).reshape(-1)
truth = label[true_idx,:]
c=0
for t in truth:
if t[0] == 1:
TP_TN_FP_FN[0] +=1
img_name[0].append(name[true_idx][c].tolist())
if t[0] == 0:
TP_TN_FP_FN[1] += 1
img_name[1].append(name[true_idx][c].tolist())
c+=1
avg_acc = correct_pred / img_count
avg_loss = loss_all/img_count
return avg_acc, avg_loss, img_name, TP_TN_FP_FN
if __name__ == "__main__":
# plot_imgs()
# image_sizes()
# split_images(0)
# train_list_ID0, _, _ = train_val_test_split(0)
# print("num_train_samples:", len(train_list_ID0))
# transform = transforms.Compose([transforms.Resize((128, 128)),
# transforms.ToTensor()])
# train_split0 = ImageDataset(train_list_ID0, transform)
#
#
# train_loader = DataLoader(train_split0, batch_size=32)
# for batch, labels in train_loader:
# print("batchsize:", len(batch))
# for idx, img in enumerate(batch):
# plt.subplot(8,4, idx+1)
# print(img.size())
# tr = transforms.ToPILImage()
# image = tr(img)
# print(image.size)
# image = np.asarray(image)
# plt.imshow(np.asarray(image))
# if labels[idx].item() == 0:
# ttl = "normal"
# else:
# ttl = "dysplastic"
# plt.title(ttl)
# plt.show()
# plot_all_images()
# train_and_val_1Fold(fold=0, m="VGG16_bn", num_epochs=30, lr=0.01, lr_decay=0.8, step_size=3, weight_decay=0.01, device="cuda", plotting=True)
test(runs=2,device="cuda", m="VGG16_bn_aug_pretrained")
|
[
"torch.nn.ReLU",
"torch.nn.Dropout",
"statistics.stdev",
"torch.nn.CrossEntropyLoss",
"torch.max",
"numpy.array",
"matplotlib.pyplot.imshow",
"numpy.mean",
"os.listdir",
"matplotlib.pyplot.plot",
"numpy.asarray",
"torchvision.models.vgg16_bn",
"matplotlib.pyplot.ylim",
"torchvision.transforms.ToTensor",
"matplotlib.pyplot.axis",
"csv.reader",
"csv.writer",
"torch.nn.Dropout2d",
"os.path.isfile",
"torchvision.transforms.Resize",
"matplotlib.pyplot.title",
"re.findall",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show",
"statistics.mean",
"numpy.copy",
"PIL.Image.open",
"torch.nn.Softmax",
"torchvision.transforms.RandomRotation",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.Conv2d",
"torch.tensor",
"numpy.zeros",
"torch.nn.MaxPool2d",
"numpy.argwhere",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.no_grad",
"matplotlib.pyplot.subplot"
] |
[((549, 569), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (559, 569), False, 'from PIL import Image\n'), ((615, 630), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (625, 630), True, 'import numpy as np\n'), ((760, 775), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (770, 775), True, 'import matplotlib.pyplot as plt\n'), ((780, 790), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (788, 790), True, 'import matplotlib.pyplot as plt\n'), ((974, 994), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (984, 994), False, 'from PIL import Image\n'), ((1040, 1055), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1050, 1055), True, 'import numpy as np\n'), ((1185, 1200), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1195, 1200), True, 'import matplotlib.pyplot as plt\n'), ((1205, 1215), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1213, 1215), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1359), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1353, 1359), False, 'import os\n'), ((2299, 2315), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2309, 2315), False, 'import os\n'), ((3922, 3938), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3932, 3938), False, 'import os\n'), ((5602, 5618), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5612, 5618), False, 'import os\n'), ((11015, 11061), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data', 'batchsize'], {'shuffle': '(True)'}), '(test_data, batchsize, shuffle=True)\n', (11025, 11061), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((11226, 11268), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (11251, 11268), False, 'import torch\n'), ((16596, 16659), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data', 'batchsize'], {'shuffle': '(True)', 'drop_last': '(True)'}), '(train_data, batchsize, shuffle=True, drop_last=True)\n', (16606, 16659), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16677, 16722), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data', 'batchsize'], {'shuffle': '(True)'}), '(val_data, batchsize, shuffle=True)\n', (16687, 16722), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((16741, 16787), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data', 'batchsize'], {'shuffle': '(True)'}), '(test_data, batchsize, shuffle=True)\n', (16751, 16787), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((17962, 18041), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'step_size', 'gamma': 'lr_decay'}), '(optimizer, step_size=step_size, gamma=lr_decay)\n', (17993, 18041), False, 'import torch\n'), ((18076, 18118), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (18101, 18118), False, 'import torch\n'), ((21341, 21352), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (21349, 21352), True, 'import numpy as np\n'), ((5025, 5060), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (5035, 5060), False, 'import csv\n'), ((6893, 6932), 'PIL.Image.open', 'Image.open', (["('pT1_dataset/dataset/' + ID)"], {}), "('pT1_dataset/dataset/' + ID)\n", (6903, 6932), False, 'from PIL import Image\n'), ((7154, 7192), 'torch.tensor', 'torch.tensor', (['label'], {'dtype': 'torch.float'}), '(label, dtype=torch.float)\n', (7166, 7192), False, 'import torch\n'), ((7383, 7427), 'torch.tensor', 'torch.tensor', (['[img_patch]'], {'dtype': 'torch.float'}), '([img_patch], dtype=torch.float)\n', (7395, 7427), False, 'import torch\n'), ((14671, 14692), 'csv.writer', 'csv.writer', (['test_file'], {}), '(test_file)\n', (14681, 14692), False, 'import csv\n'), ((20204, 20224), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (20215, 20224), True, 'import matplotlib.pyplot as plt\n'), ((20231, 20265), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_accs'], {'color': '"""r"""'}), "(x, train_accs, color='r')\n", (20239, 20265), True, 'import matplotlib.pyplot as plt\n'), ((20274, 20290), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (20282, 20290), True, 'import matplotlib.pyplot as plt\n'), ((20299, 20331), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_accs'], {'color': '"""g"""'}), "(x, val_accs, color='g')\n", (20307, 20331), True, 'import matplotlib.pyplot as plt\n'), ((20340, 20360), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (20351, 20360), True, 'import matplotlib.pyplot as plt\n'), ((20367, 20403), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_losses'], {'color': '"""r"""'}), "(x, train_losses, color='r')\n", (20375, 20403), True, 'import matplotlib.pyplot as plt\n'), ((20412, 20446), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_losses'], {'color': '"""g"""'}), "(x, val_losses, color='g')\n", (20420, 20446), True, 'import matplotlib.pyplot as plt\n'), ((20455, 20465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20463, 20465), True, 'import matplotlib.pyplot as plt\n'), ((20495, 20517), 'numpy.asarray', 'np.asarray', (['train_accs'], {}), '(train_accs)\n', (20505, 20517), True, 'import numpy as np\n'), ((20519, 20539), 'numpy.asarray', 'np.asarray', (['val_accs'], {}), '(val_accs)\n', (20529, 20539), True, 'import numpy as np\n'), ((20541, 20565), 'numpy.asarray', 'np.asarray', (['train_losses'], {}), '(train_losses)\n', (20551, 20565), True, 'import numpy as np\n'), ((20567, 20589), 'numpy.asarray', 'np.asarray', (['val_losses'], {}), '(val_losses)\n', (20577, 20589), True, 'import numpy as np\n'), ((21364, 21379), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21377, 21379), False, 'import torch\n'), ((1432, 1458), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (1442, 1458), False, 'import os\n'), ((2491, 2517), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (2501, 2517), False, 'import os\n'), ((7805, 7899), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(3)', 'out_channels': 'self.out_conv1', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=3, out_channels=self.out_conv1, kernel_size=3, stride\n =1, padding=1)\n', (7814, 7899), True, 'import torch.nn as nn\n'), ((7908, 7917), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7915, 7917), True, 'import torch.nn as nn\n'), ((7931, 7968), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (7943, 7968), True, 'import torch.nn as nn\n'), ((7982, 7999), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (7994, 7999), True, 'import torch.nn as nn\n'), ((8013, 8119), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.out_conv1', 'out_channels': 'self.out_conv2', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=self.out_conv1, out_channels=self.out_conv2,\n kernel_size=3, stride=1, padding=1)\n', (8022, 8119), True, 'import torch.nn as nn\n'), ((8129, 8138), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8136, 8138), True, 'import torch.nn as nn\n'), ((8152, 8189), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (8164, 8189), True, 'import torch.nn as nn\n'), ((8203, 8220), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (8215, 8220), True, 'import torch.nn as nn\n'), ((8234, 8340), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.out_conv2', 'out_channels': 'self.out_conv3', 'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_channels=self.out_conv2, out_channels=self.out_conv3,\n kernel_size=3, stride=1, padding=1)\n', (8243, 8340), True, 'import torch.nn as nn\n'), ((8350, 8359), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8357, 8359), True, 'import torch.nn as nn\n'), ((8373, 8410), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (8385, 8410), True, 'import torch.nn as nn\n'), ((8424, 8441), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.1)'], {}), '(0.1)\n', (8436, 8441), True, 'import torch.nn as nn\n'), ((8509, 8526), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (8519, 8526), True, 'import torch.nn as nn\n'), ((8540, 8687), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(self.final_img_size * self.final_img_size * self.out_conv3)', 'out_features': '(self.final_img_size * self.final_img_size * 16)'}), '(in_features=self.final_img_size * self.final_img_size * self.\n out_conv3, out_features=self.final_img_size * self.final_img_size * 16)\n', (8549, 8687), True, 'import torch.nn as nn\n'), ((8688, 8697), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8695, 8697), True, 'import torch.nn as nn\n'), ((8711, 8728), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (8721, 8728), True, 'import torch.nn as nn\n'), ((8742, 8831), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(self.final_img_size * self.final_img_size * 16)', 'out_features': '(2)'}), '(in_features=self.final_img_size * self.final_img_size * 16,\n out_features=2)\n', (8751, 8831), True, 'import torch.nn as nn\n'), ((8837, 8849), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (8847, 8849), True, 'import torch.nn as nn\n'), ((10747, 10786), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (10764, 10786), False, 'from torchvision import transforms\n'), ((10828, 10849), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10847, 10849), False, 'from torchvision import transforms\n'), ((12996, 13022), 'csv.writer', 'csv.writer', (['train_acc_file'], {}), '(train_acc_file)\n', (13006, 13022), False, 'import csv\n'), ((13055, 13082), 'csv.writer', 'csv.writer', (['train_loss_file'], {}), '(train_loss_file)\n', (13065, 13082), False, 'import csv\n'), ((13113, 13137), 'csv.writer', 'csv.writer', (['val_acc_file'], {}), '(val_acc_file)\n', (13123, 13137), False, 'import csv\n'), ((13168, 13193), 'csv.writer', 'csv.writer', (['val_loss_file'], {}), '(val_loss_file)\n', (13178, 13193), False, 'import csv\n'), ((13220, 13240), 'csv.writer', 'csv.writer', (['cls_file'], {}), '(cls_file)\n', (13230, 13240), False, 'import csv\n'), ((14916, 14935), 'statistics.mean', 'mean', (['all_test_accs'], {}), '(all_test_accs)\n', (14920, 14935), False, 'from statistics import stdev, mean\n'), ((14980, 15000), 'statistics.stdev', 'stdev', (['all_test_accs'], {}), '(all_test_accs)\n', (14985, 15000), False, 'from statistics import stdev, mean\n'), ((16164, 16203), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (16181, 16203), False, 'from torchvision import transforms\n'), ((16240, 16261), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16259, 16261), False, 'from torchvision import transforms\n'), ((18596, 18621), 'numpy.array', 'np.array', (['[0, 0, val_acc]'], {}), '([0, 0, val_acc])\n', (18604, 18621), True, 'import numpy as np\n'), ((18642, 18666), 'numpy.copy', 'np.copy', (['running_val_acc'], {}), '(running_val_acc)\n', (18649, 18666), True, 'import numpy as np\n'), ((19467, 19491), 'numpy.copy', 'np.copy', (['running_val_acc'], {}), '(running_val_acc)\n', (19474, 19491), True, 'import numpy as np\n'), ((19570, 19594), 'numpy.copy', 'np.copy', (['running_val_acc'], {}), '(running_val_acc)\n', (19577, 19594), True, 'import numpy as np\n'), ((19959, 19984), 'statistics.stdev', 'stdev', (['train_losses[-20:]'], {}), '(train_losses[-20:])\n', (19964, 19984), False, 'from statistics import stdev, mean\n'), ((19996, 20018), 'statistics.mean', 'mean', (['train_accs[-20:]'], {}), '(train_accs[-20:])\n', (20000, 20018), False, 'from statistics import stdev, mean\n'), ((1482, 1561), 'PIL.Image.open', 'Image.open', (["(path + patient + '/' + img_folder + '/' + img_folder + '-image.jpg')"], {}), "(path + patient + '/' + img_folder + '/' + img_folder + '-image.jpg')\n", (1492, 1561), False, 'from PIL import Image\n'), ((1581, 1596), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1591, 1596), True, 'import numpy as np\n'), ((2576, 2655), 'PIL.Image.open', 'Image.open', (["(path + patient + '/' + img_folder + '/' + img_folder + '-image.jpg')"], {}), "(path + patient + '/' + img_folder + '/' + img_folder + '-image.jpg')\n", (2586, 2655), False, 'from PIL import Image\n'), ((2699, 2714), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (2709, 2714), True, 'import numpy as np\n'), ((3989, 4013), 'os.path.isfile', 'os.path.isfile', (['(path + f)'], {}), '(path + f)\n', (4003, 4013), False, 'import os\n'), ((5883, 5909), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (5893, 5909), False, 'import os\n'), ((6146, 6172), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (6156, 6172), False, 'import os\n'), ((6347, 6373), 'os.listdir', 'os.listdir', (['(path + patient)'], {}), '(path + patient)\n', (6357, 6373), False, 'import os\n'), ((7305, 7335), 're.findall', 're.findall', (['"""[0-9]+"""', 'filename'], {}), "('[0-9]+', filename)\n", (7315, 7335), False, 'import re\n'), ((14723, 14743), 'statistics.stdev', 'stdev', (['all_test_accs'], {}), '(all_test_accs)\n', (14728, 14743), False, 'from statistics import stdev, mean\n'), ((14745, 14764), 'statistics.mean', 'mean', (['all_test_accs'], {}), '(all_test_accs)\n', (14749, 14764), False, 'from statistics import stdev, mean\n'), ((15434, 15473), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (15451, 15473), False, 'from torchvision import transforms\n'), ((15583, 15618), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(0, 360)'], {}), '((0, 360))\n', (15608, 15618), False, 'from torchvision import transforms\n'), ((15726, 15747), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (15745, 15747), False, 'from torchvision import transforms\n'), ((15806, 15845), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (15823, 15845), False, 'from torchvision import transforms\n'), ((16100, 16121), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (16119, 16121), False, 'from torchvision import transforms\n'), ((19384, 19408), 'numpy.mean', 'np.mean', (['running_val_acc'], {}), '(running_val_acc)\n', (19391, 19408), True, 'import numpy as np\n'), ((19411, 19427), 'numpy.mean', 'np.mean', (['val_res'], {}), '(val_res)\n', (19418, 19427), True, 'import numpy as np\n'), ((2816, 2838), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(5)'}), "('font', size=5)\n", (2822, 2838), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2914), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(10)', '(10)', 'counter'], {}), '(10, 10, counter)\n', (2897, 2914), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2949), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2944, 2949), True, 'import matplotlib.pyplot as plt\n'), ((3137, 3152), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3145, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3226, 3236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3234, 3236), True, 'import matplotlib.pyplot as plt\n'), ((3287, 3309), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(5)'}), "('font', size=5)\n", (3293, 3309), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3358), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(10)', '(10)', 'counter'], {}), '(10, 10, counter)\n', (3341, 3358), True, 'import matplotlib.pyplot as plt\n'), ((3379, 3394), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (3389, 3394), True, 'import matplotlib.pyplot as plt\n'), ((3582, 3597), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3590, 3597), True, 'import matplotlib.pyplot as plt\n'), ((14272, 14302), 'statistics.mean', 'mean', (['test_accs_per_fold[fold]'], {}), '(test_accs_per_fold[fold])\n', (14276, 14302), False, 'from statistics import stdev, mean\n'), ((14328, 14359), 'statistics.stdev', 'stdev', (['test_accs_per_fold[fold]'], {}), '(test_accs_per_fold[fold])\n', (14333, 14359), False, 'from statistics import stdev, mean\n'), ((17231, 17241), 'torchvision.models.vgg16_bn', 'vgg16_bn', ([], {}), '()\n', (17239, 17241), False, 'from torchvision.models import vgg16_bn\n'), ((22695, 22746), 'numpy.argwhere', 'np.argwhere', (['(predicted_classes[:, 0] != label[:, 0])'], {}), '(predicted_classes[:, 0] != label[:, 0])\n', (22706, 22746), True, 'import numpy as np\n'), ((23158, 23209), 'numpy.argwhere', 'np.argwhere', (['(predicted_classes[:, 0] == label[:, 0])'], {}), '(predicted_classes[:, 0] == label[:, 0])\n', (23169, 23209), True, 'import numpy as np\n'), ((3023, 3046), 'matplotlib.pyplot.title', 'plt.title', (['"""dysplastic"""'], {}), "('dysplastic')\n", (3032, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3097, 3116), 'matplotlib.pyplot.title', 'plt.title', (['"""normal"""'], {}), "('normal')\n", (3106, 3116), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3491), 'matplotlib.pyplot.title', 'plt.title', (['"""dysplastic"""'], {}), "('dysplastic')\n", (3477, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3561), 'matplotlib.pyplot.title', 'plt.title', (['"""normal"""'], {}), "('normal')\n", (3551, 3561), True, 'import matplotlib.pyplot as plt\n'), ((4173, 4189), 'csv.reader', 'csv.reader', (['file'], {}), '(file)\n', (4183, 4189), False, 'import csv\n'), ((17299, 17309), 'torchvision.models.vgg16_bn', 'vgg16_bn', ([], {}), '()\n', (17307, 17309), False, 'from torchvision.models import vgg16_bn\n'), ((20994, 21013), 'torch.max', 'torch.max', (['label', '(1)'], {}), '(label, 1)\n', (21003, 21013), False, 'import torch\n'), ((17378, 17403), 'torchvision.models.vgg16_bn', 'vgg16_bn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (17386, 17403), False, 'from torchvision.models import vgg16_bn\n'), ((17558, 17583), 'torchvision.models.vgg16_bn', 'vgg16_bn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (17566, 17583), False, 'from torchvision.models import vgg16_bn\n'), ((22421, 22441), 'torch.max', 'torch.max', (['labelT', '(1)'], {}), '(labelT, 1)\n', (22430, 22441), False, 'import torch\n')]
|
"""ะะ 3.3, <NAME>, ะ8ะ-303ะ-18"""
import numpy as np
import fire # CLI
import matplotlib.pyplot as plt
from sem1.lab1_1.gauss import lu_decomposition, lu_solve
def f(coeffs, x):
"""ะััะธัะปะตะฝะธะต ะทะฝะฐัะตะฝะธั ะฟะพะปะธะฝะพะผะฐ ั ะบะพัััะธัะธะตะฝัะฐะผะธ coeffs"""
return sum([x ** i * c for i, c in enumerate(coeffs)])
def sum_squared_errors(f, y):
"""ะกัะผะผะฐ ะบะฒะฐะดัะฐัะพะฒ ะพัะธะฑะพะบ"""
return sum((f_i - y_i) ** 2 for f_i, y_i in zip(f, y))
def lsm(x, y, n):
"""ะะพะดะฑะพั ะบะพัััะธัะธะตะฝัะพะฒ ะฟะพะปะธะฝะพะผะฐ ััะตะฟะตะฝะธ n ั ะฟะพะผะพััั ะะะ"""
N = len(x)
mat = [[sum([x_j ** (i + j) for x_j in x]) for i in range(n + 1)] for j in range(n + 1)]
mat[0][0] = N + 1
b = [sum([x_j ** i * y_j for x_j, y_j in zip(x, y)]) for i in range(n + 1)]
mat = np.array(mat)
b = np.array(b)
p, l, u = lu_decomposition(mat)
b = b @ p
coeffs = lu_solve(l, u, b)
return coeffs
def main():
"""ะะฟะฟัะพะบัะธะผะฐัะธั ัะฐะฑะปะธัะฝะพ ะทะฐะดะฐะฝะฝะพะน ััะฝะบัะธะธ ะผะฝะพะณะพัะปะตะฝะฐะผะธ 1-ะน ะธ 2-ะน ััะตะฟะตะฝะตะน ั ะฟะพะผะพััั ะะะ"""
init_dict = {
"x": [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0],
"y": [0.04979, 0.13534, 0.36788, 1.0, 2.7183, 7.3891]
}
x, y = init_dict["x"], init_dict["y"]
xc = np.arange(min(x) - 1, max(x) + 1, .01)
c1 = lsm(x, y, 1)
y1 = f(c1, xc)
c2 = lsm(x, y, 2)
y2 = f(c2, xc)
plt.plot(x, y, "o", label="ะั
ะพะดะฝัะต ะดะฐะฝะฝัะต")
plt.plot(xc, y1, label="ะะพะปะธะฝะพะผ ะฟะตัะฒะพะน ััะตะฟะตะฝะธ")
plt.plot(xc, y2, label="ะะพะปะธะฝะพะผ ะฒัะพัะพะน ััะตะฟะตะฝะธ")
plt.title("ะะฟะฟัะพะบัะธะผะฐัะธั ะะะ")
plt.grid(True)
plt.legend()
plt.savefig("plot.jpg", dpi=300)
plt.show()
e1 = sum_squared_errors(lsm(x, y, 1), y)
e2 = sum_squared_errors(lsm(x, y, 2), y)
print("ะกัะผะผะฐ ะบะฒะฐะดัะฐัะพะฒ ะพัะธะฑะพะบ:")
print("\tn = 1:", e1)
print("\tn = 2:", e2)
print("\nะะฟะฟัะพะบัะธะผะธััััะธะต ััะฝะบัะธะธ:")
xs = ["", "x", "x^2"]
for c, n in zip([c1, c2], [1, 2]):
print(f"\tn = {n}:", " + ".join([f"{v:.6f}{x}" for v, x in zip(c, xs)]))
if __name__ == "__main__":
fire.Fire(main)
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"fire.Fire",
"sem1.lab1_1.gauss.lu_decomposition",
"matplotlib.pyplot.plot",
"sem1.lab1_1.gauss.lu_solve",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((735, 748), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (743, 748), True, 'import numpy as np\n'), ((757, 768), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (765, 768), True, 'import numpy as np\n'), ((783, 804), 'sem1.lab1_1.gauss.lu_decomposition', 'lu_decomposition', (['mat'], {}), '(mat)\n', (799, 804), False, 'from sem1.lab1_1.gauss import lu_decomposition, lu_solve\n'), ((832, 849), 'sem1.lab1_1.gauss.lu_solve', 'lu_solve', (['l', 'u', 'b'], {}), '(l, u, b)\n', (840, 849), False, 'from sem1.lab1_1.gauss import lu_decomposition, lu_solve\n'), ((1295, 1338), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {'label': '"""ะั
ะพะดะฝัะต ะดะฐะฝะฝัะต"""'}), "(x, y, 'o', label='ะั
ะพะดะฝัะต ะดะฐะฝะฝัะต')\n", (1303, 1338), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1391), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'y1'], {'label': '"""ะะพะปะธะฝะพะผ ะฟะตัะฒะพะน ััะตะฟะตะฝะธ"""'}), "(xc, y1, label='ะะพะปะธะฝะพะผ ะฟะตัะฒะพะน ััะตะฟะตะฝะธ')\n", (1351, 1391), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1444), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'y2'], {'label': '"""ะะพะปะธะฝะพะผ ะฒัะพัะพะน ััะตะฟะตะฝะธ"""'}), "(xc, y2, label='ะะพะปะธะฝะพะผ ะฒัะพัะพะน ััะตะฟะตะฝะธ')\n", (1404, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1480), 'matplotlib.pyplot.title', 'plt.title', (['"""ะะฟะฟัะพะบัะธะผะฐัะธั ะะะ"""'], {}), "('ะะฟะฟัะพะบัะธะผะฐัะธั ะะะ')\n", (1459, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1485, 1499), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1493, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1504, 1516), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1514, 1516), True, 'import matplotlib.pyplot as plt\n'), ((1522, 1554), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot.jpg"""'], {'dpi': '(300)'}), "('plot.jpg', dpi=300)\n", (1533, 1554), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1569), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1567, 1569), True, 'import matplotlib.pyplot as plt\n'), ((1973, 1988), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (1982, 1988), False, 'import fire\n')]
|
from argparse import ArgumentParser
from itertools import starmap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fyne import blackscholes, heston
from matplotlib.patches import Patch
from scipy.stats import gaussian_kde
import settings
from align_settings import STARTTIME, ENDTIME
from utils import resample
def safe_xs(*args, **kwargs):
try:
return pd.Series.xs(*args, **kwargs)
except KeyError:
return np.nan
def get_tick_size(quote):
diffs = (quote['Ask'] + quote['Bid']).diff()
diffs = diffs[diffs > 1e-6]
return np.round(diffs.min(), 2)
def filter_tick_size(data, quote, size):
tick_size = quote.groupby('Strike').apply(get_tick_size)
return data.reindex(tick_size[tick_size == size].index, level='Strike')
def filter_trade_on_book(quote, trade):
max_expiry = np.max(quote.index.get_level_values('Expiry'))
trade = trade[trade.index.get_level_values('Expiry') <= max_expiry]
quote_aligned = trade.groupby(['Class', 'Expiry', 'Strike']
).apply(lambda o: resample(quote.xs(o.name),
o.xs(o.name).index))
valid_trades = ((trade['Price'] == quote_aligned['Bid']) |
(trade['Price'] == quote_aligned['Ask']))
filtered = trade[valid_trades]
quote_aligned = quote_aligned.loc[valid_trades]
filtered['Buy'] = filtered['Price'] == quote_aligned['Ask']
filtered['Half-spread'] = (quote_aligned['Ask'] - quote_aligned['Bid']).round(2)/2
return filtered
def compute_duration(quote):
quote = quote.copy()
quote['Half-spread'] = (quote['Ask'] - quote['Bid']).round(2)/2
time = quote.reset_index('Time'
).set_index('Half-spread', append=True)[['Time']]
time['Duration'] = time['Time'].groupby(['Class', 'Expiry', 'Strike']
).transform(lambda t: t.diff().shift(-1))
time['Time'] += time['Duration']/2
duration = time.set_index('Time', append=True)['Duration']
duration /= pd.to_timedelta('1s')
return duration
def compute_volume_duration(quote, trade):
trade = filter_trade_on_book(quote, trade)
volume = trade.set_index(['Half-spread', 'Buy'], append=True)['Volume']
duration = compute_duration(quote)
return volume, duration
def plot_arrival_rates_bubbles(volume, duration):
volume = volume.groupby(['Class', 'Expiry', 'Strike', 'Half-spread', 'Buy']
).sum()
duration = duration.groupby(['Class', 'Expiry', 'Strike',
'Half-spread']).sum()
duration = duration[duration > 300]
arrival_rate = volume.groupby(['Class', 'Expiry', 'Strike', 'Half-spread']
).transform(lambda d: d.xs(d.name
)/safe_xs(duration, d.name))
arrival_rate.name = 'Arrival rate'
fig, axes = plt.subplots(3, 2, sharey=True, sharex=True, figsize=(8, 10))
patches = [Patch(color='b', alpha=.5, label='Call'),
Patch(color='r', alpha=.5, label='Put')]
axes[0, 1].legend(handles=patches)
for row, (e, r_ex) in zip(axes, arrival_rate.groupby('Expiry')):
for bs in ['Buy', 'Sell']:
ax = row[0] if bs == 'Buy' else row[1]
ax.set_title("Expiry: {}, {}".format(
pd.to_datetime(e).strftime('%Y-%m-%d'), bs))
for cp, cl in [('C', 'b'), ('P', 'r')]:
r = r_ex.xs((cp, bs == 'Buy'), level=('Class', 'Buy'))
r.reset_index(['Strike', 'Half-spread']).plot.scatter(
x='Strike', y='Half-spread', s=20*r/r_ex.mean(), ax=ax,
xlim=(325, 550), ylim=(0, None), alpha=.5, color=cl)
return fig
def plot_arrival_rates(arrival_rate):
depths = arrival_rate.index.get_level_values('Half-spread')
arrival_rate = arrival_rate[depths > 0].dropna()
bandwidth = 0.25
levels = ['Class', 'Expiry', 'Buy']
kernel = arrival_rate.groupby(levels).apply(
lambda r: gaussian_kde(np.stack(r.xs(r.name, level=levels).index, axis=-1),
bandwidth, r.values))
xlen, ylen = 200, 150
xmin, xmax, ymin, ymax = -0.2, 0.15, 0.0, 0.3
x = np.linspace(xmin, xmax, xlen)
y = np.linspace(ymin, ymax, ylen)
x_b, y_b = np.broadcast_arrays(x[:, None], y[None, :])
fig, axes = plt.subplots(3, 2, sharex=True, sharey=True, figsize=(8, 10))
patches = [Patch(color='tab:blue', label='Call'),
Patch(color='tab:red', label='Put')]
axes[0, 1].legend(handles=patches)
for row, (e, k) in zip(axes, kernel.groupby('Expiry')):
row[0].set_title("Expiry: {}, Buy".format(
pd.to_datetime(e).strftime('%Y-%m-%d')))
row[1].set_title("Expiry: {}, Sell".format(
pd.to_datetime(e).strftime('%Y-%m-%d')))
for cp, cm in [('C', plt.cm.Blues), ('P', plt.cm.Reds)]:
z = k.xs((cp, e, True))(np.array([x_b.ravel(), y_b.ravel()]))
z = np.rot90(np.reshape(z, x_b.shape))
row[0].imshow(z, extent=[xmin, xmax, ymin, ymax], cmap=cm,
aspect='auto', alpha=.5)
z = k.xs((cp, e, False))(np.array([x_b.ravel(), y_b.ravel()]))
z = np.rot90(np.reshape(z, x_b.shape))
row[1].imshow(z, extent=[xmin, xmax, ymin, ymax], cmap=cm,
aspect='auto', alpha=.5)
return fig
def quote_slice(quote, start, end):
quote = quote.copy()
quote.loc[start] = np.nan
quote.sort_index(inplace=True)
quote.ffill(inplace=True)
return quote.loc[start:end]
def plot_intraday(volume, duration):
filtered_volume = volume.groupby(['Class', 'Expiry']).apply(lambda e: filter_tick_size(e.xs(e.name), quote.xs(e.name), 0.05))
filtered_duration = duration.groupby(['Class', 'Expiry']).apply(lambda e: e.xs(e.name).reindex(np.unique(filtered_volume.xs(e.name).index.get_level_values('Strike')), level='Strike').dropna())
filtered_duration = filtered_duration.groupby(['Class', 'Expiry', 'Strike']).apply(lambda o: o.xs(o.name).reindex(np.unique(filtered_volume.xs(o.name).index.get_level_values('Half-spread')), level='Half-spread').dropna())
volume_by_depth = filtered_volume.groupby('Expiry').apply(lambda e: e.groupby('Half-spread').sum().loc[.025:.225])
duration_by_depth = filtered_duration.groupby('Expiry').apply(lambda e: e.groupby('Half-spread').sum().loc[.025:.225])
volume_kde = filtered_volume.groupby('Expiry').apply(lambda e: e.groupby(['Half-spread', 'Time']).sum().loc[.025:.225].groupby('Half-spread').apply(lambda d: gaussian_kde((d.xs(d.name).index - pd.to_datetime('2016-01-04'))/pd.to_timedelta('1s'), weights=d, bw_method=.25)))
duration_kde = filtered_duration.groupby('Expiry').apply(lambda e: e.groupby(['Half-spread', 'Time']).sum().loc[.025:.225].groupby('Half-spread').apply(lambda d: gaussian_kde((d.xs(d.name).index - pd.to_datetime('2016-01-04'))/pd.to_timedelta('1s'), weights=d, bw_method=.05)))
fig_volume, axes = plt.subplots(3, 1, sharex=True, **settings.PLOT)
tmin, tmax = 8*3600, 16.5*3600
t = np.linspace(tmin, tmax, 100)
for ax, (e, v) in zip(axes, volume_kde.groupby('Expiry')):
z = volume_by_depth.xs(e).values*np.array([k.xs(d)(t) for d, k in v.xs(e).groupby('Half-spread')]).T
ax.imshow(np.rot90(z), extent=[tmin/3600, tmax/3600, 0.025, 0.225], aspect='auto')
ax.set_ylabel('Half-spread (โฌ)')
ax.set_title('Expiry: {}'.format(pd.to_datetime(e).strftime('%Y-%m-%d')))
ax.set_xlabel('Time (hours)')
fig_volume.tight_layout()
fig_duration, axes = plt.subplots(3, 1, sharex=True, **settings.PLOT)
tmin, tmax = 8*3600, 16.5*3600
t = np.linspace(tmin, tmax, 100)
for ax, (e, du) in zip(axes, duration_kde.groupby('Expiry')):
z = duration_by_depth.xs(e).values*np.array([k.xs(d)(t) for d, k in du.xs(e).groupby('Half-spread')]).T
ax.imshow(np.rot90(z), extent=[tmin/3600, tmax/3600, 0.025, 0.225], aspect='auto')
ax.set_ylabel('Half-spread (โฌ)')
ax.set_title('Expiry: {}'.format(pd.to_datetime(e).strftime('%Y-%m-%d')))
ax.set_xlabel('Time (hours)')
fig_duration.tight_layout()
fig_arrival, axes = plt.subplots(3, 1, sharex=True, **settings.PLOT)
tmin, tmax = 8*3600, 16.5*3600
t = np.linspace(tmin, tmax, 100)
for ax, (e, du), (_, v) in zip(axes, duration_kde.groupby('Expiry'), volume_kde.groupby('Expiry')):
z_v = volume_by_depth.xs(e).values*np.array([k.xs(d)(t) for d, k in v.xs(e).groupby('Half-spread')]).T
z_d = duration_by_depth.xs(e).values*np.array([k.xs(d)(t) for d, k in du.xs(e).groupby('Half-spread')]).T
z = np.clip(z_v/z_d, 1e-3, 1e1)
ax.imshow(np.rot90(np.log(z)), extent=[tmin/3600, tmax/3600, 0.025, 0.225], aspect='auto')
ax.set_ylabel('Half-spread (โฌ)')
ax.set_title('Expiry: {}'.format(pd.to_datetime(e).strftime('%Y-%m-%d')))
ax.set_xlabel('Time (hours)')
fig_arrival.tight_layout()
return fig_volume, fig_duration, fig_arrival
if __name__ == '__main__':
cli = ArgumentParser()
cli.add_argument('quote_filename')
cli.add_argument('trade_filename')
cli.add_argument('dest_bubbles_filename')
cli.add_argument('dest_intraday_volume_filename')
cli.add_argument('dest_intraday_duration_filename')
cli.add_argument('dest_intraday_arrival_filename')
args = cli.parse_args()
quote = pd.read_parquet(args.quote_filename)
trade = pd.read_parquet(args.trade_filename).xs('AEX')
quote.sort_index(inplace=True)
volume, duration = compute_volume_duration(quote, trade)
fig = plot_arrival_rates_bubbles(volume, duration)
fig.savefig(args.dest_bubbles_filename)
figs = plot_intraday(volume, duration)
figs[0].savefig(args.dest_intraday_volume_filename)
figs[1].savefig(args.dest_intraday_duration_filename)
figs[2].savefig(args.dest_intraday_arrival_filename)
|
[
"numpy.clip",
"pandas.to_timedelta",
"pandas.read_parquet",
"argparse.ArgumentParser",
"numpy.reshape",
"numpy.log",
"pandas.Series.xs",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.patches.Patch",
"numpy.rot90",
"numpy.broadcast_arrays",
"pandas.to_datetime"
] |
[((2058, 2079), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""1s"""'], {}), "('1s')\n", (2073, 2079), True, 'import pandas as pd\n'), ((2921, 2982), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharey': '(True)', 'sharex': '(True)', 'figsize': '(8, 10)'}), '(3, 2, sharey=True, sharex=True, figsize=(8, 10))\n', (2933, 2982), True, 'import matplotlib.pyplot as plt\n'), ((4251, 4280), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'xlen'], {}), '(xmin, xmax, xlen)\n', (4262, 4280), True, 'import numpy as np\n'), ((4289, 4318), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'ylen'], {}), '(ymin, ymax, ylen)\n', (4300, 4318), True, 'import numpy as np\n'), ((4334, 4377), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['x[:, None]', 'y[None, :]'], {}), '(x[:, None], y[None, :])\n', (4353, 4377), True, 'import numpy as np\n'), ((4395, 4456), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 10)'}), '(3, 2, sharex=True, sharey=True, figsize=(8, 10))\n', (4407, 4456), True, 'import matplotlib.pyplot as plt\n'), ((7056, 7104), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True, **settings.PLOT)\n', (7068, 7104), True, 'import matplotlib.pyplot as plt\n'), ((7148, 7176), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', '(100)'], {}), '(tmin, tmax, 100)\n', (7159, 7176), True, 'import numpy as np\n'), ((7653, 7701), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True, **settings.PLOT)\n', (7665, 7701), True, 'import matplotlib.pyplot as plt\n'), ((7745, 7773), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', '(100)'], {}), '(tmin, tmax, 100)\n', (7756, 7773), True, 'import numpy as np\n'), ((8257, 8305), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True, **settings.PLOT)\n', (8269, 8305), True, 'import matplotlib.pyplot as plt\n'), ((8349, 8377), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', '(100)'], {}), '(tmin, tmax, 100)\n', (8360, 8377), True, 'import numpy as np\n'), ((9123, 9139), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (9137, 9139), False, 'from argparse import ArgumentParser\n'), ((9470, 9506), 'pandas.read_parquet', 'pd.read_parquet', (['args.quote_filename'], {}), '(args.quote_filename)\n', (9485, 9506), True, 'import pandas as pd\n'), ((396, 425), 'pandas.Series.xs', 'pd.Series.xs', (['*args'], {}), '(*args, **kwargs)\n', (408, 425), True, 'import pandas as pd\n'), ((2998, 3039), 'matplotlib.patches.Patch', 'Patch', ([], {'color': '"""b"""', 'alpha': '(0.5)', 'label': '"""Call"""'}), "(color='b', alpha=0.5, label='Call')\n", (3003, 3039), False, 'from matplotlib.patches import Patch\n'), ((3055, 3095), 'matplotlib.patches.Patch', 'Patch', ([], {'color': '"""r"""', 'alpha': '(0.5)', 'label': '"""Put"""'}), "(color='r', alpha=0.5, label='Put')\n", (3060, 3095), False, 'from matplotlib.patches import Patch\n'), ((4472, 4509), 'matplotlib.patches.Patch', 'Patch', ([], {'color': '"""tab:blue"""', 'label': '"""Call"""'}), "(color='tab:blue', label='Call')\n", (4477, 4509), False, 'from matplotlib.patches import Patch\n'), ((4526, 4561), 'matplotlib.patches.Patch', 'Patch', ([], {'color': '"""tab:red"""', 'label': '"""Put"""'}), "(color='tab:red', label='Put')\n", (4531, 4561), False, 'from matplotlib.patches import Patch\n'), ((8719, 8750), 'numpy.clip', 'np.clip', (['(z_v / z_d)', '(0.001)', '(10.0)'], {}), '(z_v / z_d, 0.001, 10.0)\n', (8726, 8750), True, 'import numpy as np\n'), ((7367, 7378), 'numpy.rot90', 'np.rot90', (['z'], {}), '(z)\n', (7375, 7378), True, 'import numpy as np\n'), ((7970, 7981), 'numpy.rot90', 'np.rot90', (['z'], {}), '(z)\n', (7978, 7981), True, 'import numpy as np\n'), ((9519, 9555), 'pandas.read_parquet', 'pd.read_parquet', (['args.trade_filename'], {}), '(args.trade_filename)\n', (9534, 9555), True, 'import pandas as pd\n'), ((5035, 5059), 'numpy.reshape', 'np.reshape', (['z', 'x_b.shape'], {}), '(z, x_b.shape)\n', (5045, 5059), True, 'import numpy as np\n'), ((5283, 5307), 'numpy.reshape', 'np.reshape', (['z', 'x_b.shape'], {}), '(z, x_b.shape)\n', (5293, 5307), True, 'import numpy as np\n'), ((8774, 8783), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (8780, 8783), True, 'import numpy as np\n'), ((4725, 4742), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (4739, 4742), True, 'import pandas as pd\n'), ((4830, 4847), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (4844, 4847), True, 'import pandas as pd\n'), ((7522, 7539), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (7536, 7539), True, 'import pandas as pd\n'), ((8125, 8142), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (8139, 8142), True, 'import pandas as pd\n'), ((8928, 8945), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (8942, 8945), True, 'import pandas as pd\n'), ((3357, 3374), 'pandas.to_datetime', 'pd.to_datetime', (['e'], {}), '(e)\n', (3371, 3374), True, 'import pandas as pd\n'), ((6699, 6720), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""1s"""'], {}), "('1s')\n", (6714, 6720), True, 'import pandas as pd\n'), ((6981, 7002), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""1s"""'], {}), "('1s')\n", (6996, 7002), True, 'import pandas as pd\n'), ((6669, 6697), 'pandas.to_datetime', 'pd.to_datetime', (['"""2016-01-04"""'], {}), "('2016-01-04')\n", (6683, 6697), True, 'import pandas as pd\n'), ((6951, 6979), 'pandas.to_datetime', 'pd.to_datetime', (['"""2016-01-04"""'], {}), "('2016-01-04')\n", (6965, 6979), True, 'import pandas as pd\n')]
|
import os
import random
import datetime
import argparse
import numpy as np
from tqdm import tqdm
from model.unetdsbn import Unet2D
from utils.loss import dice_loss1
from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel
import torch
import torchvision.transforms as tfs
from torch import optim
from torch.optim import Adam
from torch.backends import cudnn
from torch.nn import DataParallel
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser('Dual Normalization U-Net Training')
parser.add_argument('--data_dir', type=str, default='./data/brats/npz_data')
parser.add_argument('--train_domain_list_1', nargs='+')
parser.add_argument('--train_domain_list_2', nargs='+')
parser.add_argument('--result_dir', type=str, default='./results/unet_dn')
parser.add_argument('--n_classes', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--n_epochs', type=int, default=50)
parser.add_argument('--save_step', type=int, default=10)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--gpu_ids', type=str, default='0')
parser.add_argument('--deterministic', dest='deterministic', action='store_true')
args = parser.parse_args()
def repeat_dataloader(iterable):
""" repeat dataloader """
while True:
for x in iterable:
yield x
def worker_init_fn(worker_id):
random.seed(args.seed+worker_id)
if __name__== '__main__':
start_time = datetime.datetime.now()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_ids
base_dir = args.data_dir
batch_size = args.batch_size
save_step = args.save_step
lr = args.lr
train_domain_list_1 = args.train_domain_list_1
train_domain_list_2 = args.train_domain_list_2
max_epoch = args.n_epochs
result_dir = args.result_dir
n_classes = args.n_classes
log_dir = os.path.join(result_dir, 'log')
model_dir = os.path.join(result_dir, 'model')
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
dataloader_train = []
model = Unet2D(num_classes=n_classes, norm='dsbn', num_domains=2)
params_num = sum(p.numel() for p in model.parameters())
print("\nModle's Params: %.3fM" % (params_num / 1e6))
model = DataParallel(model).cuda()
optimizer = Adam(params=model.parameters(), lr=lr, betas=(0.9, 0.999))
exp_lr = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
dataset_1 = Dataset(base_dir=base_dir, split='train', domain_list=train_domain_list_1,
transforms=tfs.Compose([
CreateOnehotLabel(num_classes=n_classes),
ToTensor()
]))
dataloader_1 = DataLoader(dataset_1, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
dataloader_train.append(dataloader_1)
dataset_2 = Dataset(base_dir=base_dir, split='train', domain_list=train_domain_list_2,
transforms=tfs.Compose([
CreateOnehotLabel(num_classes=n_classes),
ToTensor()
]))
dataloader_2 = DataLoader(dataset_2, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)
dataloader_train.append(dataloader_2)
for epoch_num in range(max_epoch):
data_iter = [repeat_dataloader(dataloader_train[i]) for i in range(2)]
print('Epoch: {}, LR: {}'.format(epoch_num, round(exp_lr.get_last_lr()[0], 6)))
tbar = tqdm(dataloader_train[0], ncols=150)
model.train()
for i, batch in enumerate(tbar):
### get all domains' sample_batch ###
sample_batches = [batch]
other_sample_batches = [next(data_iter[i]) for i in range(1, 2)]
sample_batches += other_sample_batches
total_loss = 0
count = 0
for train_idx in range(2):
count += 1
sample_data, sample_label = sample_batches[train_idx]['image'].cuda(), sample_batches[train_idx]['onehot_label'].cuda()
outputs_soft = model(sample_data, domain_label=train_idx*torch.ones(sample_data.shape[0], dtype=torch.long))
loss = dice_loss1(outputs_soft, sample_label)
total_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
tbar.set_description('Total Loss: {}'.format(round((total_loss / count), 6)))
exp_lr.step()
if (epoch_num + 1) % save_step == 0:
model_save_model_path = os.path.join(model_dir, 'epoch_{}.pth'.format(epoch_num))
torch.save(model.module.state_dict(), model_save_model_path)
print('save model to {}'.format(model_save_model_path))
model_save_model_path = os.path.join(model_dir, 'final_model.pth'.format(epoch_num))
torch.save(model.module.state_dict(), model_save_model_path)
print('save model to {}'.format(model_save_model_path))
end_time = datetime.datetime.now()
print('Finish running. Cost total time: {} hours'.format((end_time - start_time).seconds / 3600))
|
[
"torch.manual_seed",
"os.path.exists",
"utils.loss.dice_loss1",
"argparse.ArgumentParser",
"torch.optim.lr_scheduler.ExponentialLR",
"os.makedirs",
"datasets.dataset.CreateOnehotLabel",
"tqdm.tqdm",
"os.path.join",
"torch.nn.DataParallel",
"random.seed",
"datasets.dataset.ToTensor",
"datetime.datetime.now",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed",
"model.unetdsbn.Unet2D",
"torch.ones"
] |
[((454, 514), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Dual Normalization U-Net Training"""'], {}), "('Dual Normalization U-Net Training')\n", (477, 514), False, 'import argparse\n'), ((1464, 1498), 'random.seed', 'random.seed', (['(args.seed + worker_id)'], {}), '(args.seed + worker_id)\n', (1475, 1498), False, 'import random\n'), ((1541, 1564), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1562, 1564), False, 'import datetime\n'), ((1939, 1970), 'os.path.join', 'os.path.join', (['result_dir', '"""log"""'], {}), "(result_dir, 'log')\n", (1951, 1970), False, 'import os\n'), ((1987, 2020), 'os.path.join', 'os.path.join', (['result_dir', '"""model"""'], {}), "(result_dir, 'model')\n", (1999, 2020), False, 'import os\n'), ((2373, 2430), 'model.unetdsbn.Unet2D', 'Unet2D', ([], {'num_classes': 'n_classes', 'norm': '"""dsbn"""', 'num_domains': '(2)'}), "(num_classes=n_classes, norm='dsbn', num_domains=2)\n", (2379, 2430), False, 'from model.unetdsbn import Unet2D\n'), ((2678, 2733), 'torch.optim.lr_scheduler.ExponentialLR', 'optim.lr_scheduler.ExponentialLR', (['optimizer'], {'gamma': '(0.99)'}), '(optimizer, gamma=0.99)\n', (2710, 2733), False, 'from torch import optim\n'), ((3032, 3173), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_1'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)', 'drop_last': '(True)', 'worker_init_fn': 'worker_init_fn'}), '(dataset_1, batch_size=batch_size, shuffle=True, num_workers=8,\n pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)\n', (3042, 3173), False, 'from torch.utils.data import DataLoader\n'), ((3509, 3650), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_2'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)', 'drop_last': '(True)', 'worker_init_fn': 'worker_init_fn'}), '(dataset_2, batch_size=batch_size, shuffle=True, num_workers=8,\n pin_memory=True, drop_last=True, worker_init_fn=worker_init_fn)\n', (3519, 3650), False, 'from torch.utils.data import DataLoader\n'), ((5474, 5497), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5495, 5497), False, 'import datetime\n'), ((2124, 2146), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (2135, 2146), False, 'import random\n'), ((2155, 2180), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2169, 2180), True, 'import numpy as np\n'), ((2189, 2217), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2206, 2217), False, 'import torch\n'), ((2226, 2259), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2248, 2259), False, 'import torch\n'), ((2276, 2301), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (2290, 2301), False, 'import os\n'), ((2311, 2333), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (2322, 2333), False, 'import os\n'), ((3915, 3951), 'tqdm.tqdm', 'tqdm', (['dataloader_train[0]'], {'ncols': '(150)'}), '(dataloader_train[0], ncols=150)\n', (3919, 3951), False, 'from tqdm import tqdm\n'), ((2561, 2580), 'torch.nn.DataParallel', 'DataParallel', (['model'], {}), '(model)\n', (2573, 2580), False, 'from torch.nn import DataParallel\n'), ((4632, 4670), 'utils.loss.dice_loss1', 'dice_loss1', (['outputs_soft', 'sample_label'], {}), '(outputs_soft, sample_label)\n', (4642, 4670), False, 'from utils.loss import dice_loss1\n'), ((2904, 2944), 'datasets.dataset.CreateOnehotLabel', 'CreateOnehotLabel', ([], {'num_classes': 'n_classes'}), '(num_classes=n_classes)\n', (2921, 2944), False, 'from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel\n'), ((2974, 2984), 'datasets.dataset.ToTensor', 'ToTensor', ([], {}), '()\n', (2982, 2984), False, 'from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel\n'), ((3381, 3421), 'datasets.dataset.CreateOnehotLabel', 'CreateOnehotLabel', ([], {'num_classes': 'n_classes'}), '(num_classes=n_classes)\n', (3398, 3421), False, 'from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel\n'), ((3451, 3461), 'datasets.dataset.ToTensor', 'ToTensor', ([], {}), '()\n', (3459, 3461), False, 'from datasets.dataset import Dataset, ToTensor, CreateOnehotLabel\n'), ((4557, 4607), 'torch.ones', 'torch.ones', (['sample_data.shape[0]'], {'dtype': 'torch.long'}), '(sample_data.shape[0], dtype=torch.long)\n', (4567, 4607), False, 'import torch\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.audio.transforms as audio
from mindspore import log as logger
from mindspore.dataset.audio.utils import Modulation, Interpolation
def count_unequal_element(data_expected, data_me, rtol, atol):
assert data_expected.shape == data_me.shape
total_count = len(data_expected.flatten())
error = np.abs(data_expected - data_me)
greater = np.greater(error, atol + np.abs(data_expected) * rtol)
loss_count = np.count_nonzero(greater)
assert (loss_count / total_count) < rtol, "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}".format(
data_expected[greater], data_me[greater], error[greater])
def test_flanger_eager_sinusoidal_linear_float64():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[0.10000000000, 0.19999999536, 0.29999998145],
[0.23391812865, 0.29239766081, 0.35087719298]], dtype=np.float64)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.SINUSOIDAL, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_eager_triangular_linear_float32():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[-1.2, 2, -3.6], [1, 2.4, 3.7]], dtype=np.float32)
# Expect waveform
expect_waveform = np.array([[-1.0000000000, 1.0000000000, -1.0000000000],
[0.58479529619, 1.0000000000, 1.0000000000]], dtype=np.float32)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_eager_triangular_linear_int():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[-2, -3, 0], [2, 2, 3]], dtype=np.int)
# Expect waveform
expect_waveform = np.array([[-1, -1, 0],
[1, 1, 1]], dtype=np.int)
flanger_op = audio.Flanger(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR, Interpolation.LINEAR)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_shape_221():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[1], [1.1]], [[0.9], [0.6]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[1.00000000],
[0.64327485]],
[[0.90000000],
[0.35087719]]], dtype=np.float64)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_shape_11211():
""" mindspore eager mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[[[0.44]], [[0.55]]]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[[[0.44000000]], [[0.55000000]]]]], dtype=np.float64)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
output = flanger_op(waveform)
count_unequal_element(expect_waveform, output, 0.0001, 0.0001)
def test_flanger_pipeline():
""" mindspore pipeline mode normal testcase:flanger op"""
# Original waveform
waveform = np.array([[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6]]], dtype=np.float64)
# Expect waveform
expect_waveform = np.array([[[1.00000000000, 1.00000000000, 1.00000000000],
[0.81871345029, 0.87719298245, 0.93567251461]]], dtype=np.float64)
data = (waveform, np.random.sample((1, 2, 1)))
dataset = ds.NumpySlicesDataset(data, ["channel", "sample"], shuffle=False)
flanger_op = audio.Flanger(44100)
# Filtered waveform by flanger
dataset = dataset.map(
input_columns=["channel"], operations=flanger_op, num_parallel_workers=1)
i = 0
for item in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
count_unequal_element(expect_waveform[i, :],
item['channel'], 0.0001, 0.0001)
i += 1
def test_invalid_flanger_input():
def test_invalid_input(test_name, sample_rate, delay, depth, regen, width, speed, phase, modulation, interpolation,
error, error_msg):
logger.info("Test Flanger with bad input: {0}".format(test_name))
with pytest.raises(error) as error_info:
audio.Flanger(sample_rate, delay, depth, regen, width, speed, phase, modulation, interpolation)
assert error_msg in str(error_info.value)
test_invalid_input("invalid sample_rate parameter value", 0, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input sample_rate is not within the required interval of [-2147483648, 0) and (0, 2147483647].")
test_invalid_input("invalid sample_rate parameter type as a float", 44100.5, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument sample_rate with value 44100.5 is not of "
"type [<class 'int'>], but got <class 'float'>.")
test_invalid_input("invalid sample_rate parameter type as a String", "44100", 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument sample_rate with value 44100 is not of "
"type [<class 'int'>], but got <class 'str'>.")
test_invalid_input("invalid delay parameter type as a String", 44100, "0.0", 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument delay with value 0.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid delay parameter value", 44100, 50, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input delay is not within the required interval of [0, 30].")
test_invalid_input("invalid depth parameter type as a String", 44100, 0.0, "2.0", 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument depth with value 2.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid depth parameter value", 44100, 0.0, 50.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input depth is not within the required interval of [0, 10].")
test_invalid_input("invalid regen parameter type as a String", 44100, 0.0, 2.0, "0.0", 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument regen with value 0.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid regen parameter value", 44100, 0.0, 2.0, 100.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input regen is not within the required interval of [-95, 95].")
test_invalid_input("invalid width parameter type as a String", 44100, 0.0, 2.0, 0.0, "71.0", 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument width with value 71.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid width parameter value", 44100, 0.0, 2.0, 0.0, 150.0, 0.5, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input width is not within the required interval of [0, 100].")
test_invalid_input("invalid speed parameter type as a String", 44100, 0.0, 2.0, 0.0, 71.0, "0.5", 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument speed with value 0.5 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid speed parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 50, 25.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input speed is not within the required interval of [0.1, 10].")
test_invalid_input("invalid phase parameter type as a String", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, "25.0",
Modulation.SINUSOIDAL, Interpolation.LINEAR, TypeError,
"Argument phase with value 25.0 is not of type [<class 'float'>, <class 'int'>],"
" but got <class 'str'>.")
test_invalid_input("invalid phase parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 150.0,
Modulation.SINUSOIDAL, Interpolation.LINEAR, ValueError,
"Input phase is not within the required interval of [0, 100].")
test_invalid_input("invalid modulation parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, "test",
Interpolation.LINEAR, TypeError,
"Argument modulation with value test is not of type [<enum 'Modulation'>], "
"but got <class 'str'>.")
test_invalid_input("invalid modulation parameter value", 44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0,
Modulation.SINUSOIDAL, "test", TypeError,
"Argument interpolation with value test is not of type [<enum 'Interpolation'>], "
"but got <class 'str'>.")
if __name__ == '__main__':
test_flanger_eager_sinusoidal_linear_float64()
test_flanger_eager_triangular_linear_float32()
test_flanger_eager_triangular_linear_int()
test_flanger_shape_221()
test_flanger_shape_11211()
test_flanger_pipeline()
test_invalid_flanger_input()
|
[
"mindspore.dataset.audio.transforms.Flanger",
"numpy.abs",
"mindspore.dataset.NumpySlicesDataset",
"numpy.count_nonzero",
"numpy.array",
"numpy.random.sample",
"pytest.raises"
] |
[((1061, 1092), 'numpy.abs', 'np.abs', (['(data_expected - data_me)'], {}), '(data_expected - data_me)\n', (1067, 1092), True, 'import numpy as np\n'), ((1179, 1204), 'numpy.count_nonzero', 'np.count_nonzero', (['greater'], {}), '(greater)\n', (1195, 1204), True, 'import numpy as np\n'), ((1532, 1594), 'numpy.array', 'np.array', (['[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]'], {'dtype': 'np.float64'}), '([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float64)\n', (1540, 1594), True, 'import numpy as np\n'), ((1639, 1756), 'numpy.array', 'np.array', (['[[0.1, 0.19999999536, 0.29999998145], [0.23391812865, 0.29239766081, \n 0.35087719298]]'], {'dtype': 'np.float64'}), '([[0.1, 0.19999999536, 0.29999998145], [0.23391812865, \n 0.29239766081, 0.35087719298]], dtype=np.float64)\n', (1647, 1756), True, 'import numpy as np\n'), ((1811, 1912), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)', '(0.0)', '(2.0)', '(0.0)', '(71.0)', '(0.5)', '(25.0)', 'Modulation.SINUSOIDAL', 'Interpolation.LINEAR'], {}), '(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.SINUSOIDAL,\n Interpolation.LINEAR)\n', (1824, 1912), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((2197, 2257), 'numpy.array', 'np.array', (['[[-1.2, 2, -3.6], [1, 2.4, 3.7]]'], {'dtype': 'np.float32'}), '([[-1.2, 2, -3.6], [1, 2.4, 3.7]], dtype=np.float32)\n', (2205, 2257), True, 'import numpy as np\n'), ((2302, 2376), 'numpy.array', 'np.array', (['[[-1.0, 1.0, -1.0], [0.58479529619, 1.0, 1.0]]'], {'dtype': 'np.float32'}), '([[-1.0, 1.0, -1.0], [0.58479529619, 1.0, 1.0]], dtype=np.float32)\n', (2310, 2376), True, 'import numpy as np\n'), ((2471, 2572), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)', '(0.0)', '(2.0)', '(0.0)', '(71.0)', '(0.5)', '(25.0)', 'Modulation.TRIANGULAR', 'Interpolation.LINEAR'], {}), '(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR,\n Interpolation.LINEAR)\n', (2484, 2572), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((2853, 2901), 'numpy.array', 'np.array', (['[[-2, -3, 0], [2, 2, 3]]'], {'dtype': 'np.int'}), '([[-2, -3, 0], [2, 2, 3]], dtype=np.int)\n', (2861, 2901), True, 'import numpy as np\n'), ((2946, 2994), 'numpy.array', 'np.array', (['[[-1, -1, 0], [1, 1, 1]]'], {'dtype': 'np.int'}), '([[-1, -1, 0], [1, 1, 1]], dtype=np.int)\n', (2954, 2994), True, 'import numpy as np\n'), ((3044, 3145), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)', '(0.0)', '(2.0)', '(0.0)', '(71.0)', '(0.5)', '(25.0)', 'Modulation.TRIANGULAR', 'Interpolation.LINEAR'], {}), '(44100, 0.0, 2.0, 0.0, 71.0, 0.5, 25.0, Modulation.TRIANGULAR,\n Interpolation.LINEAR)\n', (3057, 3145), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((3408, 3466), 'numpy.array', 'np.array', (['[[[1], [1.1]], [[0.9], [0.6]]]'], {'dtype': 'np.float64'}), '([[[1], [1.1]], [[0.9], [0.6]]], dtype=np.float64)\n', (3416, 3466), True, 'import numpy as np\n'), ((3511, 3585), 'numpy.array', 'np.array', (['[[[1.0], [0.64327485]], [[0.9], [0.35087719]]]'], {'dtype': 'np.float64'}), '([[[1.0], [0.64327485]], [[0.9], [0.35087719]]], dtype=np.float64)\n', (3519, 3585), True, 'import numpy as np\n'), ((3717, 3737), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)'], {}), '(44100)\n', (3730, 3737), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((4006, 4058), 'numpy.array', 'np.array', (['[[[[[0.44]], [[0.55]]]]]'], {'dtype': 'np.float64'}), '([[[[[0.44]], [[0.55]]]]], dtype=np.float64)\n', (4014, 4058), True, 'import numpy as np\n'), ((4103, 4155), 'numpy.array', 'np.array', (['[[[[[0.44]], [[0.55]]]]]'], {'dtype': 'np.float64'}), '([[[[[0.44]], [[0.55]]]]], dtype=np.float64)\n', (4111, 4155), True, 'import numpy as np\n'), ((4186, 4206), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)'], {}), '(44100)\n', (4199, 4206), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((4475, 4539), 'numpy.array', 'np.array', (['[[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6]]]'], {'dtype': 'np.float64'}), '([[[1.1, 1.2, 1.3], [1.4, 1.5, 1.6]]], dtype=np.float64)\n', (4483, 4539), True, 'import numpy as np\n'), ((4584, 4682), 'numpy.array', 'np.array', (['[[[1.0, 1.0, 1.0], [0.81871345029, 0.87719298245, 0.93567251461]]]'], {'dtype': 'np.float64'}), '([[[1.0, 1.0, 1.0], [0.81871345029, 0.87719298245, 0.93567251461]]],\n dtype=np.float64)\n', (4592, 4682), True, 'import numpy as np\n'), ((4807, 4872), 'mindspore.dataset.NumpySlicesDataset', 'ds.NumpySlicesDataset', (['data', "['channel', 'sample']"], {'shuffle': '(False)'}), "(data, ['channel', 'sample'], shuffle=False)\n", (4828, 4872), True, 'import mindspore.dataset as ds\n'), ((4890, 4910), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['(44100)'], {}), '(44100)\n', (4903, 4910), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((4764, 4791), 'numpy.random.sample', 'np.random.sample', (['(1, 2, 1)'], {}), '((1, 2, 1))\n', (4780, 4791), True, 'import numpy as np\n'), ((5564, 5584), 'pytest.raises', 'pytest.raises', (['error'], {}), '(error)\n', (5577, 5584), False, 'import pytest\n'), ((5612, 5711), 'mindspore.dataset.audio.transforms.Flanger', 'audio.Flanger', (['sample_rate', 'delay', 'depth', 'regen', 'width', 'speed', 'phase', 'modulation', 'interpolation'], {}), '(sample_rate, delay, depth, regen, width, speed, phase,\n modulation, interpolation)\n', (5625, 5711), True, 'import mindspore.dataset.audio.transforms as audio\n'), ((1132, 1153), 'numpy.abs', 'np.abs', (['data_expected'], {}), '(data_expected)\n', (1138, 1153), True, 'import numpy as np\n')]
|
'''
Solution for day 13 of the 2021 Advent of Code calendar.
Run it with the command `python -m adventofcode run_solution -y 2021 13` from the project root.
'''
import numpy as np
from adventofcode.types import Solution
def part1(data, exit_on_first_fold=False):
rows = [row for row in data.splitlines() if row and 'fold' not in row]
board = np.zeros(
(max([int(row.split(",")[1]) for row in rows]) + 1,
max([int(row.split(",")[0]) for row in rows]) + 1))
for row in data.splitlines():
if not row:
continue
if 'fold along x=' in row:
# reverse needed
x = int(row.split("=")[1])
base = board[:, 0:x]
fold = board[:, x + 1:]
reversed = np.flip(fold[::-1])
print(base, "\n", reversed, "\n")
board = base + reversed
print(board)
if exit_on_first_fold:
return board
continue
if 'fold along y=' in row:
print("folding..")
y = int(row.split("=")[1])
base = board[0:y, :]
fold = board[y + 1:, :]
reversed = np.fliplr(np.flip(fold))
print(base, "\n", reversed, "\n", fold)
board = base + reversed
print(board)
if exit_on_first_fold:
return board
continue
y, x = [int(c) for c in row.split(",")]
board[x][y] = 1
return board
def run(data: str) -> Solution:
return np.count_nonzero(part1(data, exit_on_first_fold=True)), part1(data, exit_on_first_fold=False)
|
[
"numpy.flip"
] |
[((754, 773), 'numpy.flip', 'np.flip', (['fold[::-1]'], {}), '(fold[::-1])\n', (761, 773), True, 'import numpy as np\n'), ((1173, 1186), 'numpy.flip', 'np.flip', (['fold'], {}), '(fold)\n', (1180, 1186), True, 'import numpy as np\n')]
|
"""
Square
======
"""
import numpy as np
from ..topology_graph import Edge
from .cof import Cof
from .vertices import LinearVertex, NonLinearVertex
class Square(Cof):
"""
Represents a sqaure COF topology graph.
Unoptimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
cof = stk.ConstructedMolecule(
topology_graph=stk.cof.PeriodicSquare(
building_blocks=(
stk.BuildingBlock(
smiles='BrCC(Br)',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles='BrC1=C(Br)C(Br)=C1Br',
functional_groups=[stk.BromoFactory()],
),
),
lattice_size=(3, 3, 1),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cof.get_atoms(),
cof.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cof.get_bonds()
if all(p == 0 for p in bond.get_periodicity())
),
)
``Collapser(scale_steps=False)`` optimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
cof = stk.ConstructedMolecule(
topology_graph=stk.cof.PeriodicSquare(
building_blocks=(
stk.BuildingBlock(
smiles='BrCC(Br)',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles='BrC1=C(Br)C(Br)=C1Br',
functional_groups=[stk.BromoFactory()],
),
),
lattice_size=(3, 3, 1),
optimizer=stk.Collapser(scale_steps=False),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cof.get_atoms(),
cof.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cof.get_bonds()
if all(p == 0 for p in bond.get_periodicity())
),
)
Building blocks with four and two functional groups are required
for this topology graph.
When using a :class:`dict` for the `building_blocks` parameter,
as in :ref:`cof-topology-graph-examples`:
*Multi-Building Block COF Construction*, a
:class:`.BuildingBlock`, with the following number of functional
groups, needs to be assigned to each of the following vertex ids:
| 4-functional groups: 0
| 2-functional groups: 1 to 2
See :class:`.Cof` for more details and examples.
"""
_lattice_constants = _a, _b, _c = (
np.array([1., 0., 0.]),
np.array([0., 1., 0.]),
np.array([0., 0., 1.])
)
_non_linears = (
NonLinearVertex(0, (0.5)*_a + (0.5)*_b + (0.5)*_c),
)
_vertex_prototypes = (
*_non_linears,
LinearVertex.init_at_shifted_center(
id=1,
vertices=(_non_linears[0], _non_linears[0]),
cell_shifts=((0, 0, 0), (1, 0, 0)),
lattice_constants=_lattice_constants,
),
LinearVertex.init_at_shifted_center(
id=2,
vertices=(_non_linears[0], _non_linears[0]),
cell_shifts=((0, 0, 0), (0, 1, 0)),
lattice_constants=_lattice_constants,
),
)
_edge_prototypes = (
Edge(0, _vertex_prototypes[1], _vertex_prototypes[0]),
Edge(
id=1,
vertex1=_vertex_prototypes[1],
vertex2=_vertex_prototypes[0],
periodicity=(1, 0, 0),
),
Edge(2, _vertex_prototypes[2], _vertex_prototypes[0]),
Edge(
id=3,
vertex1=_vertex_prototypes[2],
vertex2=_vertex_prototypes[0],
periodicity=(0, 1, 0),
),
)
|
[
"numpy.array"
] |
[((3649, 3674), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (3657, 3674), True, 'import numpy as np\n'), ((3681, 3706), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (3689, 3706), True, 'import numpy as np\n'), ((3713, 3738), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (3721, 3738), True, 'import numpy as np\n')]
|
import pandas as pd
import matplotlib.cm as cm
import numpy as np
import matplotlib.pyplot as plt
def plot(problemVariants, *, zero, outfile, numThreads):
columns = ['Problem', 'NotTriedYet', 'Scheduled', 'Success', 'Timeout', 'Stopped', 'Ended']
colors = ['w', 'tab:purple', 'tab:green', 'tab:orange', 'tab:red', 'w']
problems = {}
for problemVariant in problemVariants:
problem = problemVariant.problem
if not (problem.filePattern in problems):
problems[problem.filePattern] = problem
variants = {}
for problemVariant in problemVariants:
v = problemVariant.variant
if not (v in variants):
variants[v] = []
'''
Overall time used
'''
t_max = 0
for problemVariant in problemVariants:
t_max = max(t_max, problemVariant.process.timer.getEnd(zero))
for k, problem in sorted(problems.items(), reverse=True):
for v in sorted(variants.keys(), reverse=True):
if not (v in problem.variants):
variants[v].append([
problem.filePattern,
t_max, # time waiting
0, # time scheduled
0, # time success
0, # time timeout
0, # time error
0, # time ended
])
else:
problemVariant = problem.variants[v]
scheduled = problemVariant.process.timer.getScheduled(zero)
started = problemVariant.process.timer.getStart(zero)
ended = problemVariant.process.timer.getEnd(zero)
if problemVariant.isSuccessful():
state = 'Success'
elif problemVariant.szsStatus == 'Timeout':
state = 'Timeout'
else:
state = 'Stopped'
variants[v].append([
problem.filePattern,
scheduled, # time waiting
started - scheduled, # time scheduled
ended - started if state == 'Success' else 0, # time success
ended - started if state == 'Timeout' else 0, # time timeout
ended - started if state == 'Stopped' else 0, # time error
t_max - ended,
])
dfs = []
labels = []
for v, vd in variants.items():
df = pd.DataFrame(vd,
columns=columns,
).set_index('Problem')
dfs.append(df)
labels.append("v"+v)
ax = plot_grouped_stacked_bar(dfs, labels, title='LTB Scheduler - Problem Timings using {} Threads'.format(numThreads), color=colors)
ax.set_ylabel("Problems")
ax.set_xlabel("Time in s")
fig = ax.get_figure()
fig.set_size_inches(15, 1*len(problems))
fig.savefig(outfile)
def plot_grouped_stacked_bar(dfall, labels, *, title, H="/", **kwargs):
'''
Given a list of dataframes, with identical columns and index, create a clustered stacked bar plot.
Args:
* labels: is a list of the names of the dataframe, used for the legend
* title: a string for the title of the plot
* H: is the hatch used for identification of the different dataframe
Shamelessly taken and modified version of https://stackoverflow.com/a/22845857 thank you jrjc
'''
n_df = len(dfall)
n_col = len(dfall[0].columns)
n_ind = len(dfall[0].index)
axe = plt.subplot(111)
for df in dfall:
axe = df.plot(
kind="barh",
linewidth=0,
stacked=True,
ax=axe,
legend=False,
grid=False,
**kwargs
) # single bar plots
h,l = axe.get_legend_handles_labels()
for i in range(0, n_df * n_col, n_col): # len(h) = n_col * n_df
for j, pa in enumerate(h[i:i+n_col]):
for rect in pa.patches: # for each index
rect.set_y(rect.get_y() + 1 / float(n_df + 1) * i / float(n_col))
rect.set_hatch(H * int(i / n_col)) #edited part
rect.set_height(1 / float(n_df + 1))
axe.set_yticks((np.arange(0, 2 * n_ind, 2) + 1 / float(n_df + 1)) / 2.)
axe.set_yticklabels(df.index, rotation = 0)
axe.set_title(title)
# Add invisible data to add another legend
n=[]
for i in range(n_df):
n.append(axe.bar(0, 0, color="gray", hatch=H * i))
l1 = axe.legend(h[:n_col], l[:n_col], loc=[1.01, 0.5])
if labels is not None:
l2 = plt.legend(n, labels, loc=[1.01, 0.1])
axe.add_artist(l1)
return axe
|
[
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"numpy.arange"
] |
[((3598, 3614), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3609, 3614), True, 'import matplotlib.pyplot as plt\n'), ((4671, 4709), 'matplotlib.pyplot.legend', 'plt.legend', (['n', 'labels'], {'loc': '[1.01, 0.1]'}), '(n, labels, loc=[1.01, 0.1])\n', (4681, 4709), True, 'import matplotlib.pyplot as plt\n'), ((2572, 2605), 'pandas.DataFrame', 'pd.DataFrame', (['vd'], {'columns': 'columns'}), '(vd, columns=columns)\n', (2584, 2605), True, 'import pandas as pd\n'), ((4292, 4318), 'numpy.arange', 'np.arange', (['(0)', '(2 * n_ind)', '(2)'], {}), '(0, 2 * n_ind, 2)\n', (4301, 4318), True, 'import numpy as np\n')]
|
import os
from .vendored import colorconv
import numpy as np
import vispy.color
_matplotlib_list_file = os.path.join(os.path.dirname(__file__),
'matplotlib_cmaps.txt')
with open(_matplotlib_list_file) as fin:
matplotlib_colormaps = [line.rstrip() for line in fin]
def _all_rgb():
"""Return all 256**3 valid rgb tuples."""
base = np.arange(256, dtype=np.uint8)
r, g, b = np.meshgrid(base, base, base, indexing='ij')
return np.stack((r, g, b), axis=-1).reshape((-1, 3))
# obtained with colorconv.rgb2luv(_all_rgb().reshape((-1, 256, 3)))
LUVMIN = np.array([0., -83.07790815, -134.09790293])
LUVMAX = np.array([100., 175.01447356, 107.39905336])
LUVRNG = LUVMAX - LUVMIN
# obtained with colorconv.rgb2lab(_all_rgb().reshape((-1, 256, 3)))
LABMIN = np.array([0., -86.18302974, -107.85730021])
LABMAX = np.array([100., 98.23305386, 94.47812228])
LABRNG = LABMAX - LABMIN
def _validate_rgb(colors, *, tolerance=0.):
"""Return the subset of colors that is in [0, 1] for all channels.
Parameters
----------
colors : array of float, shape (N, 3)
Input colors in RGB space.
Other Parameters
----------------
tolerance : float, optional
Values outside of the range by less than ``tolerance`` are allowed and
clipped to be within the range.
Returns
-------
filtered_colors : array of float, shape (M, 3), M <= N
The subset of colors that are in valid RGB space.
Examples
--------
>>> colors = np.array([[ 0. , 1., 1. ],
... [ 1.1, 0., -0.03],
... [ 1.2, 1., 0.5 ]])
>>> _validate_rgb(colors)
array([[0., 1., 1.]])
>>> _validate_rgb(colors, tolerance=0.15)
array([[0., 1., 1.],
[1., 0., 0.]])
"""
lo = 0 - tolerance
hi = 1 + tolerance
valid = np.all((colors > lo) & (colors < hi), axis=1)
filtered_colors = np.clip(colors[valid], 0, 1)
return filtered_colors
def _low_discrepancy(dim, n, seed=0.5):
"""Generate a 1d, 2d, or 3d low discrepancy sequence of coordinates.
Parameters
----------
dim : one of {1, 2, 3}
The dimensionality of the sequence.
n : int
How many points to generate.
seed : float or array of float, shape (dim,)
The seed from which to start the quasirandom sequence.
Returns
-------
pts : array of float, shape (n, dim)
The sampled points.
References
----------
..[1]: http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/
"""
phi1 = 1.6180339887498948482
phi2 = 1.32471795724474602596
phi3 = 1.22074408460575947536
seed = np.broadcast_to(seed, (1, dim))
phi = np.array([phi1, phi2, phi3])
g = 1 / phi
n = np.reshape(np.arange(n), (n, 1))
pts = (seed + (n * g[:dim])) % 1
return pts
def _color_random(n, *, colorspace='lab', tolerance=0.0, seed=0.5):
"""Generate n random RGB colors uniformly from LAB or LUV space.
Parameters
----------
n : int
Number of colors to generate.
colorspace : str, one of {'lab', 'luv', 'rgb'}
The colorspace from which to get random colors.
tolerance : float
How much margin to allow for out-of-range RGB values (these are
clipped to be in-range).
seed : float or array of float, shape (3,)
Value from which to start the quasirandom sequence.
Returns
-------
rgb : array of float, shape (n, 3)
RGB colors chosen uniformly at random from given colorspace.
"""
factor = 6 # about 1/5 of random LUV tuples are inside the space
expand_factor = 2
rgb = np.zeros((0, 3))
while len(rgb) < n:
random = _low_discrepancy(3, n * factor, seed=seed)
if colorspace == 'luv':
raw_rgb = colorconv.luv2rgb(random * LUVRNG + LUVMIN)
elif colorspace == 'rgb':
raw_rgb = random
else: # 'lab' by default
raw_rgb = colorconv.lab2rgb(random * LABRNG + LABMIN)
rgb = _validate_rgb(raw_rgb, tolerance=tolerance)
factor *= expand_factor
return rgb[:n]
def label_colormap(labels, seed=0.5, max_label=None):
"""Produce a colormap suitable for use with a given label set.
Parameters
----------
labels : array of int
A set of labels or label image.
seed : float or array of float, length 3
The seed for the low discrepancy sequence generator.
max_label : int, optional
The maximum label in `labels`. Computed if not given.
Returns
-------
cmap : vispy.color.Colormap
A colormap for use with ``labels``. The labels are remapped so that
the maximum label falls on 1.0, since vispy requires colormaps to map
within [0, 1].
Notes
-----
0 always maps to fully transparent.
"""
unique_labels = np.unique(labels)
if unique_labels[0] != 0:
unique_labels = np.concatenate([[0], unique_labels])
n = len(unique_labels)
max_label = max_label or np.max(unique_labels)
unique_labels_float = unique_labels / max_label
midpoints = np.convolve(unique_labels_float, [0.5, 0.5], mode='valid')
control_points = np.concatenate(([0.], midpoints, [1.]))
# make sure to add an alpha channel to the colors
colors = np.concatenate((_color_random(n, seed=seed),
np.full((n, 1), 0.7)), axis=1)
colors[0, :] = 0 # ensure alpha is 0 for label 0
cmap = vispy.color.Colormap(colors=colors, controls=control_points,
interpolation='zero')
return cmap
|
[
"numpy.clip",
"numpy.convolve",
"numpy.unique",
"numpy.full",
"numpy.max",
"os.path.dirname",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"numpy.concatenate",
"numpy.meshgrid",
"numpy.all",
"numpy.broadcast_to",
"numpy.arange"
] |
[((609, 653), 'numpy.array', 'np.array', (['[0.0, -83.07790815, -134.09790293]'], {}), '([0.0, -83.07790815, -134.09790293])\n', (617, 653), True, 'import numpy as np\n'), ((662, 707), 'numpy.array', 'np.array', (['[100.0, 175.01447356, 107.39905336]'], {}), '([100.0, 175.01447356, 107.39905336])\n', (670, 707), True, 'import numpy as np\n'), ((810, 854), 'numpy.array', 'np.array', (['[0.0, -86.18302974, -107.85730021]'], {}), '([0.0, -86.18302974, -107.85730021])\n', (818, 854), True, 'import numpy as np\n'), ((863, 906), 'numpy.array', 'np.array', (['[100.0, 98.23305386, 94.47812228]'], {}), '([100.0, 98.23305386, 94.47812228])\n', (871, 906), True, 'import numpy as np\n'), ((120, 145), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (135, 145), False, 'import os\n'), ((383, 413), 'numpy.arange', 'np.arange', (['(256)'], {'dtype': 'np.uint8'}), '(256, dtype=np.uint8)\n', (392, 413), True, 'import numpy as np\n'), ((428, 472), 'numpy.meshgrid', 'np.meshgrid', (['base', 'base', 'base'], {'indexing': '"""ij"""'}), "(base, base, base, indexing='ij')\n", (439, 472), True, 'import numpy as np\n'), ((1880, 1925), 'numpy.all', 'np.all', (['((colors > lo) & (colors < hi))'], {'axis': '(1)'}), '((colors > lo) & (colors < hi), axis=1)\n', (1886, 1925), True, 'import numpy as np\n'), ((1948, 1976), 'numpy.clip', 'np.clip', (['colors[valid]', '(0)', '(1)'], {}), '(colors[valid], 0, 1)\n', (1955, 1976), True, 'import numpy as np\n'), ((2721, 2752), 'numpy.broadcast_to', 'np.broadcast_to', (['seed', '(1, dim)'], {}), '(seed, (1, dim))\n', (2736, 2752), True, 'import numpy as np\n'), ((2763, 2791), 'numpy.array', 'np.array', (['[phi1, phi2, phi3]'], {}), '([phi1, phi2, phi3])\n', (2771, 2791), True, 'import numpy as np\n'), ((3705, 3721), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {}), '((0, 3))\n', (3713, 3721), True, 'import numpy as np\n'), ((4917, 4934), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (4926, 4934), True, 'import numpy as np\n'), ((5172, 5230), 'numpy.convolve', 'np.convolve', (['unique_labels_float', '[0.5, 0.5]'], {'mode': '"""valid"""'}), "(unique_labels_float, [0.5, 0.5], mode='valid')\n", (5183, 5230), True, 'import numpy as np\n'), ((5252, 5293), 'numpy.concatenate', 'np.concatenate', (['([0.0], midpoints, [1.0])'], {}), '(([0.0], midpoints, [1.0]))\n', (5266, 5293), True, 'import numpy as np\n'), ((2827, 2839), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2836, 2839), True, 'import numpy as np\n'), ((4989, 5025), 'numpy.concatenate', 'np.concatenate', (['[[0], unique_labels]'], {}), '([[0], unique_labels])\n', (5003, 5025), True, 'import numpy as np\n'), ((5082, 5103), 'numpy.max', 'np.max', (['unique_labels'], {}), '(unique_labels)\n', (5088, 5103), True, 'import numpy as np\n'), ((484, 512), 'numpy.stack', 'np.stack', (['(r, g, b)'], {'axis': '(-1)'}), '((r, g, b), axis=-1)\n', (492, 512), True, 'import numpy as np\n'), ((5433, 5453), 'numpy.full', 'np.full', (['(n, 1)', '(0.7)'], {}), '((n, 1), 0.7)\n', (5440, 5453), True, 'import numpy as np\n')]
|
from dipy.denoise.nlmeans import nlmeans_3d, nlmeans
from dipy.denoise.noise_estimate import estimate_sigma
import cv2 as cv
import numpy as np
import nibabel as nib
def preprocess(nifti, name):
"""Preprocess the 3D MRI image before image segmentation"""
image = nifti.get_fdata()
sigma = estimate_sigma(image, N=16) # N: number of coils in the receiver of the MRI scanner
denoised = nlmeans(image, sigma)
denoised_nifti = nib.Nifti1Image(denoised, nifti.affine)
nib.save(denoised_nifti, f'lab4/data/clean_{name}.nii.gz')
def cluster(nifti, name):
"""Segment the 3D image slice by slice, then merge all slices and save as nifti"""
n_cluster = 7 # number of clusters
image = nifti.get_fdata(dtype=np.float32)
for i, slice in enumerate(image):
data = slice.reshape((-1, 1))
vessel, vessel_id = max(data), np.argmax(data) # vessel is the brightest pixel
if vessel < 10: # slice has no vessels (perhaps outside the brain)
image[i, ...] = 0 # enforce binary property so as to view polygon model in imeka
continue
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 1) # (type, max_iter, epsilon)
_, labels, _ = cv.kmeans(data, n_cluster, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS)
cluster_id = labels[vessel_id] # cluster id of all vessels
data[labels == cluster_id] = 255
data[labels != cluster_id] = 0
image[i, ...] = data.reshape(slice.shape)
output = nib.Nifti1Image(image, nifti.affine)
nib.save(output, f'lab4/data/out_{name}.nii.gz')
def run():
swi = nib.load('lab4/data/invert_swi.nii.gz')
tof = nib.load('lab4/data/bet_tof.nii.gz')
preprocess(swi, 'swi')
preprocess(tof, 'tof')
cluster(nib.load('lab4/data/clean_swi.nii.gz'), "swi")
cluster(nib.load('lab4/data/clean_tof.nii.gz'), "tof")
|
[
"dipy.denoise.noise_estimate.estimate_sigma",
"dipy.denoise.nlmeans.nlmeans",
"nibabel.save",
"nibabel.load",
"cv2.kmeans",
"numpy.argmax",
"nibabel.Nifti1Image"
] |
[((303, 330), 'dipy.denoise.noise_estimate.estimate_sigma', 'estimate_sigma', (['image'], {'N': '(16)'}), '(image, N=16)\n', (317, 330), False, 'from dipy.denoise.noise_estimate import estimate_sigma\n'), ((403, 424), 'dipy.denoise.nlmeans.nlmeans', 'nlmeans', (['image', 'sigma'], {}), '(image, sigma)\n', (410, 424), False, 'from dipy.denoise.nlmeans import nlmeans_3d, nlmeans\n'), ((446, 485), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['denoised', 'nifti.affine'], {}), '(denoised, nifti.affine)\n', (461, 485), True, 'import nibabel as nib\n'), ((490, 548), 'nibabel.save', 'nib.save', (['denoised_nifti', 'f"""lab4/data/clean_{name}.nii.gz"""'], {}), "(denoised_nifti, f'lab4/data/clean_{name}.nii.gz')\n", (498, 548), True, 'import nibabel as nib\n'), ((1522, 1558), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['image', 'nifti.affine'], {}), '(image, nifti.affine)\n', (1537, 1558), True, 'import nibabel as nib\n'), ((1563, 1611), 'nibabel.save', 'nib.save', (['output', 'f"""lab4/data/out_{name}.nii.gz"""'], {}), "(output, f'lab4/data/out_{name}.nii.gz')\n", (1571, 1611), True, 'import nibabel as nib\n'), ((1635, 1674), 'nibabel.load', 'nib.load', (['"""lab4/data/invert_swi.nii.gz"""'], {}), "('lab4/data/invert_swi.nii.gz')\n", (1643, 1674), True, 'import nibabel as nib\n'), ((1685, 1721), 'nibabel.load', 'nib.load', (['"""lab4/data/bet_tof.nii.gz"""'], {}), "('lab4/data/bet_tof.nii.gz')\n", (1693, 1721), True, 'import nibabel as nib\n'), ((1236, 1308), 'cv2.kmeans', 'cv.kmeans', (['data', 'n_cluster', 'None', 'criteria', '(10)', 'cv.KMEANS_RANDOM_CENTERS'], {}), '(data, n_cluster, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS)\n', (1245, 1308), True, 'import cv2 as cv\n'), ((1789, 1827), 'nibabel.load', 'nib.load', (['"""lab4/data/clean_swi.nii.gz"""'], {}), "('lab4/data/clean_swi.nii.gz')\n", (1797, 1827), True, 'import nibabel as nib\n'), ((1848, 1886), 'nibabel.load', 'nib.load', (['"""lab4/data/clean_tof.nii.gz"""'], {}), "('lab4/data/clean_tof.nii.gz')\n", (1856, 1886), True, 'import nibabel as nib\n'), ((866, 881), 'numpy.argmax', 'np.argmax', (['data'], {}), '(data)\n', (875, 881), True, 'import numpy as np\n')]
|
"""
@author: <NAME>
@contact: <EMAIL>
"""
import random
import copy
import numpy as np
from torch.utils.data.dataset import ConcatDataset
from torch.utils.data.sampler import Sampler
class DefaultSampler(Sampler):
r"""Traverse all :math:`N` domains, randomly select :math:`K` samples in each domain to form a mini-batch of size
:math:`N\times K`.
Args:
data_source (ConcatDataset): dataset that contains data from multiple domains
batch_size (int): mini-batch size (:math:`N\times K` here)
"""
def __init__(self, data_source: ConcatDataset, batch_size: int):
super(Sampler, self).__init__()
self.num_all_domains = len(data_source.cumulative_sizes)
self.sample_idxes_per_domain = []
start = 0
for end in data_source.cumulative_sizes:
idxes = [idx for idx in range(start, end)]
self.sample_idxes_per_domain.append(idxes)
start = end
assert batch_size % self.num_all_domains == 0
self.batch_size_per_domain = batch_size // self.num_all_domains
self.length = len(list(self.__iter__()))
def __iter__(self):
sample_idxes_per_domain = copy.deepcopy(self.sample_idxes_per_domain)
final_idxes = []
stop_flag = False
while not stop_flag:
for domain in range(self.num_all_domains):
sample_idxes = sample_idxes_per_domain[domain]
selected_idxes = random.sample(sample_idxes, self.batch_size_per_domain)
final_idxes.extend(selected_idxes)
for idx in selected_idxes:
sample_idxes_per_domain[domain].remove(idx)
remaining_size = len(sample_idxes_per_domain[domain])
if remaining_size < self.batch_size_per_domain:
stop_flag = True
return iter(final_idxes)
def __len__(self):
return self.length
class RandomDomainSampler(Sampler):
r"""Randomly sample :math:`N` domains, then randomly select :math:`K` samples in each domain to form a mini-batch of
size :math:`N\times K`.
Args:
data_source (ConcatDataset): dataset that contains data from multiple domains
batch_size (int): mini-batch size (:math:`N\times K` here)
n_domains_per_batch (int): number of domains to select in a single mini-batch (:math:`N` here)
"""
def __init__(self, data_source: ConcatDataset, batch_size: int, n_domains_per_batch: int):
super(Sampler, self).__init__()
self.n_domains_in_dataset = len(data_source.cumulative_sizes)
self.n_domains_per_batch = n_domains_per_batch
assert self.n_domains_in_dataset >= self.n_domains_per_batch
self.sample_idxes_per_domain = []
start = 0
for end in data_source.cumulative_sizes:
idxes = [idx for idx in range(start, end)]
self.sample_idxes_per_domain.append(idxes)
start = end
assert batch_size % n_domains_per_batch == 0
self.batch_size_per_domain = batch_size // n_domains_per_batch
self.length = len(list(self.__iter__()))
def __iter__(self):
sample_idxes_per_domain = copy.deepcopy(self.sample_idxes_per_domain)
domain_idxes = [idx for idx in range(self.n_domains_in_dataset)]
final_idxes = []
stop_flag = False
while not stop_flag:
selected_domains = random.sample(domain_idxes, self.n_domains_per_batch)
for domain in selected_domains:
sample_idxes = sample_idxes_per_domain[domain]
if len(sample_idxes) < self.batch_size_per_domain:
selected_idxes = np.random.choice(sample_idxes, self.batch_size_per_domain, replace=True)
else:
selected_idxes = random.sample(sample_idxes, self.batch_size_per_domain)
final_idxes.extend(selected_idxes)
for idx in selected_idxes:
if idx in sample_idxes_per_domain[domain]:
sample_idxes_per_domain[domain].remove(idx)
remaining_size = len(sample_idxes_per_domain[domain])
if remaining_size < self.batch_size_per_domain:
stop_flag = True
return iter(final_idxes)
def __len__(self):
return self.length
|
[
"numpy.random.choice",
"random.sample",
"copy.deepcopy"
] |
[((1183, 1226), 'copy.deepcopy', 'copy.deepcopy', (['self.sample_idxes_per_domain'], {}), '(self.sample_idxes_per_domain)\n', (1196, 1226), False, 'import copy\n'), ((3199, 3242), 'copy.deepcopy', 'copy.deepcopy', (['self.sample_idxes_per_domain'], {}), '(self.sample_idxes_per_domain)\n', (3212, 3242), False, 'import copy\n'), ((3427, 3480), 'random.sample', 'random.sample', (['domain_idxes', 'self.n_domains_per_batch'], {}), '(domain_idxes, self.n_domains_per_batch)\n', (3440, 3480), False, 'import random\n'), ((1458, 1513), 'random.sample', 'random.sample', (['sample_idxes', 'self.batch_size_per_domain'], {}), '(sample_idxes, self.batch_size_per_domain)\n', (1471, 1513), False, 'import random\n'), ((3693, 3765), 'numpy.random.choice', 'np.random.choice', (['sample_idxes', 'self.batch_size_per_domain'], {'replace': '(True)'}), '(sample_idxes, self.batch_size_per_domain, replace=True)\n', (3709, 3765), True, 'import numpy as np\n'), ((3825, 3880), 'random.sample', 'random.sample', (['sample_idxes', 'self.batch_size_per_domain'], {}), '(sample_idxes, self.batch_size_per_domain)\n', (3838, 3880), False, 'import random\n')]
|
from stuff import *
# Get weekday pattern from case data in order to identify exact date on SGTF graph
# 0 mod 7 is Thursday in daytodate notation (being 1970-01-01)
nc={}
with open('SAcases','r') as fp:
for x in fp:
y=x.split()
nc[datetoday(y[0])]=int(y[1])
minday=min(nc)
maxday=max(nc)
c0=[0]*7
c1=[0]*7
for d in range(minday+3,maxday-3):
ex=[nc[r] for r in range(d-3,d+4)]
if min(ex)>=50:
i=d%7
c0[i]+=1
c1[i]+=nc[d]*7/sum(ex)
#for i in range(7):
# print(i,c1[i]/c0[i])
# Thur 1.184
# Fri 1.170
# Sat 1.122
# Sun 0.913
# Mon 0.655
# Tue 0.766
# Wed 1.158
if 0:
infile='OmicronSGTF.png'
dateorigin=datetoday('2021-10-01')-564
row0,row1=23,359
col0,col1=81,614
y0=(0,358);y1=(50,43)
z0=(0,357);z1=(1600,126)
if 1:
infile='OmicronSGTF_frompdf.png'
dateorigin=datetoday('2021-10-01')-564
row0,row1=11,345
col0,col1=81,614
y0=(0,344.5);y1=(50,32)
z0=(0,344.5);z1=(2000,57.5)
# SGTF image from slide 12 of https://sacoronavirus.co.za/2021/11/25/sars-cov-2-sequencing-new-variant-update-25-november-2021/
# resized down by a factor of 2/3 in order to get 1 horizontal pixel = 1 day.
from PIL import Image
import numpy as np
im_frame = Image.open(infile)
cc = np.array(im_frame,dtype=int)
im_frame.close()
# Top-leftian, row before column
r=cc.shape[0]
c=cc.shape[1]
# Get blueness
bb=cc[:,:,2]*2-(cc[:,:,0]+cc[:,:,1])
def process(bb,name):
bb1=bb[row0:row1,:]
mm=row0+np.argmax(bb1,axis=0)
im=Image.fromarray(((bb-bb.min())/(bb.max()-bb.min())*255.999+0.0005).astype(np.dtype('uint8')))
im.save(name+'_filtered.png')
oo=cc.astype(np.dtype('uint8'))
for x in range(col0,col1): oo[mm[x],x]=[255,0,0]
im=Image.fromarray(oo)
im.save(name+'_sgtf.png')
sgtf={}
for x in range(col0,col1):
sgtf[daytodate(dateorigin+x)]=(mm[x]-y1[1])/(y0[1]-y1[1])*(y0[0]-y1[0])+y1[0]
with open(name+'_sgtf','w') as fp:
for date in sorted(list(sgtf)):
print(date,"%6.2f"%sgtf[date],file=fp)
return mm,sgtf
process(bb,'simple')
lrantialias=bb-np.maximum(np.roll(bb,1,1),np.roll(bb,-1,1))
process(lrantialias,'LRantialias')
# Hybrid because deantialiasing method is likely to work well for the vertical spike, but not when derivative is low.
spike=605
hybrid=np.concatenate([bb[:,:spike],lrantialias[:,spike:]],axis=1)
mm,sgtf=process(hybrid,'hybrid')
dd=cc[:,:,0]-np.maximum(cc[:,:,1],cc[:,:,2])
oo=(dd>3).astype(np.dtype('uint8'))*255
im=Image.fromarray(oo)
im.save('temp.png')
ee=(dd>3)*1000+np.tile(np.arange(r-1,-1,-1)[:,None],(1,c))
process(ee,'simplered')
oo=cc.astype(np.dtype('uint8'))
nn=np.zeros(c)
for x in range(col0,col1):
s0=1
s1=10
f=0.5
mx=0
for y in range(row1-1,row0-1,-1):
if abs(y-mm[x])>1:
s0=(1-f)*s0+f*1
s1=(1-f)*s1+f*dd[y,x]
#print(y,dd[y,x],s1/s0)
if s1/s0>5: mx=y
nn[x]=mx
oo[mx,x]=[0,255,0]
oo[mm[x],x]=[255,0,0]
im=Image.fromarray(oo)
im.save('sgtf+counts.png')
with open('SA_sgtf','w') as fp:
print("# Date %SGTF Tests num(S-) num(S+)",file=fp)
for x in range(col0,col1):
if nn[x]>0:
date=daytodate(dateorigin+x)
n=max((nn[x]-z1[1])/(z0[1]-z1[1])*(z0[0]-z1[0])+z1[0],0)
s=sgtf[date]
print(date,"%6.2f %6.1f %6.1f %6.1f"%(s,n,s/100*n,(1-s/100)*n),file=fp)
|
[
"numpy.dtype",
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.roll",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.maximum",
"numpy.arange"
] |
[((1195, 1213), 'PIL.Image.open', 'Image.open', (['infile'], {}), '(infile)\n', (1205, 1213), False, 'from PIL import Image\n'), ((1219, 1248), 'numpy.array', 'np.array', (['im_frame'], {'dtype': 'int'}), '(im_frame, dtype=int)\n', (1227, 1248), True, 'import numpy as np\n'), ((2239, 2302), 'numpy.concatenate', 'np.concatenate', (['[bb[:, :spike], lrantialias[:, spike:]]'], {'axis': '(1)'}), '([bb[:, :spike], lrantialias[:, spike:]], axis=1)\n', (2253, 2302), True, 'import numpy as np\n'), ((2421, 2440), 'PIL.Image.fromarray', 'Image.fromarray', (['oo'], {}), '(oo)\n', (2436, 2440), False, 'from PIL import Image\n'), ((2581, 2592), 'numpy.zeros', 'np.zeros', (['c'], {}), '(c)\n', (2589, 2592), True, 'import numpy as np\n'), ((2872, 2891), 'PIL.Image.fromarray', 'Image.fromarray', (['oo'], {}), '(oo)\n', (2887, 2891), False, 'from PIL import Image\n'), ((1679, 1698), 'PIL.Image.fromarray', 'Image.fromarray', (['oo'], {}), '(oo)\n', (1694, 1698), False, 'from PIL import Image\n'), ((2346, 2382), 'numpy.maximum', 'np.maximum', (['cc[:, :, 1]', 'cc[:, :, 2]'], {}), '(cc[:, :, 1], cc[:, :, 2])\n', (2356, 2382), True, 'import numpy as np\n'), ((2559, 2576), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (2567, 2576), True, 'import numpy as np\n'), ((1435, 1457), 'numpy.argmax', 'np.argmax', (['bb1'], {'axis': '(0)'}), '(bb1, axis=0)\n', (1444, 1457), True, 'import numpy as np\n'), ((1604, 1621), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (1612, 1621), True, 'import numpy as np\n'), ((2034, 2051), 'numpy.roll', 'np.roll', (['bb', '(1)', '(1)'], {}), '(bb, 1, 1)\n', (2041, 2051), True, 'import numpy as np\n'), ((2050, 2068), 'numpy.roll', 'np.roll', (['bb', '(-1)', '(1)'], {}), '(bb, -1, 1)\n', (2057, 2068), True, 'import numpy as np\n'), ((2395, 2412), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (2403, 2412), True, 'import numpy as np\n'), ((1536, 1553), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (1544, 1553), True, 'import numpy as np\n'), ((2485, 2509), 'numpy.arange', 'np.arange', (['(r - 1)', '(-1)', '(-1)'], {}), '(r - 1, -1, -1)\n', (2494, 2509), True, 'import numpy as np\n')]
|
import numpy as np
from sklearn import multioutput
import xgboost as xgb
class Regressor():
def _init_(self):
super()._init_()
self.model = None
def fit(self, X, y):
# Create empty model made
self.model_bag = dict()
# Data bag
self.data_bag = dict()
max_cascade_size = 9
for i in range(1, max_cascade_size):
self.data_bag[i] = dict()
self.model_bag[i] = dict()
for inp, out in zip(X, y):
metadata = inp[0]
signal = inp[1]
number_modules_in_cascade = len(metadata)
metadata_str = ""
for i in range(number_modules_in_cascade):
metadata_str += (metadata[i][0] + "_" + str(metadata[i][1][0]) + "_" + str(metadata[i][1][1]) + "-")
metadata_str = metadata_str[:-1]
try:
self.data_bag[number_modules_in_cascade][metadata_str].append([signal, out])
except:
self.data_bag[number_modules_in_cascade][metadata_str] = [[signal, out]]
all_train_input_EDFA = []
all_train_output_EDFA = []
all_train_input_SMF = []
all_train_output_SMF = []
# Train one model per size 1 cascade
for metadata_str in list(self.data_bag[1].keys()):
# Train only with size 1 cascades
train_data = np.asarray((self.data_bag[1][metadata_str]))
train_input, train_output = train_data[:, 0], train_data[:, 1]
if 'EDFA' in metadata_str:
all_train_input_EDFA += list(train_input)
all_train_output_EDFA += list(train_output)
else:
all_train_input_SMF += list(train_input)
all_train_output_SMF += list(train_output)
self.model_bag[1][metadata_str] = multioutput.MultiOutputRegressor(xgb.XGBRegressor()).fit(train_input, train_output)
self.model_bag[1]['joker_EDFA'] = multioutput.MultiOutputRegressor(xgb.XGBRegressor()).fit(np.asarray(all_train_input_EDFA), np.asarray(all_train_output_EDFA))
self.model_bag[1]['joker_SMF'] = multioutput.MultiOutputRegressor(xgb.XGBRegressor()).fit(np.asarray(all_train_input_SMF), np.asarray(all_train_output_SMF))
# Now, let's train also with the size 2 cascades
for metadata_str in list(self.data_bag[2].keys()):
metadata_split_str = metadata_str.split('-')
first_individual_module = metadata_split_str[0]
second_individual_module = metadata_split_str[1]
try:
model = self.model_bag[1][first_individual_module]
except:
if 'EDFA' in first_individual_module:
model = self.model_bag[1]['joker_EDFA']
else:
model = self.model_bag[1]['joker_SMF']
data = np.asarray(self.data_bag[2][metadata_str])
train_inp, train_out = data[:, 0], data[:, 1]
pred = model.predict(train_inp)
pred = pred * (pred > 0)
if second_individual_module in self.model_bag[1]:
self.model_bag[1][second_individual_module].fit(pred, train_out)
else:
if 'EDFA' in second_individual_module:
self.model_bag[1]['joker_EDFA'].fit(pred, train_out)
else:
self.model_bag[1]['joker_SMF'].fit(pred, train_out)
def predict(self, X):
preds = []
for inp in X:
metadata = inp[0]
signal = inp[1]
for module in metadata:
metadata_str = module[0] + "_" + str(module[1][0]) + "_" + str(module[1][1])
try:
model = self.model_bag[1][metadata_str]
except:
if 'EDFA' in metadata_str:
model = self.model_bag[1]['joker_EDFA']
else:
model = self.model_bag[1]['joker_SMF']
pred = model.predict(np.asarray(signal).reshape(1, -1))
pred = pred * (pred > 0)
# Use previous pred as the new input
signal = pred
preds.append(pred[0])
return np.asarray(preds)
|
[
"xgboost.XGBRegressor",
"numpy.asarray"
] |
[((4365, 4382), 'numpy.asarray', 'np.asarray', (['preds'], {}), '(preds)\n', (4375, 4382), True, 'import numpy as np\n'), ((1460, 1502), 'numpy.asarray', 'np.asarray', (['self.data_bag[1][metadata_str]'], {}), '(self.data_bag[1][metadata_str])\n', (1470, 1502), True, 'import numpy as np\n'), ((2103, 2135), 'numpy.asarray', 'np.asarray', (['all_train_input_EDFA'], {}), '(all_train_input_EDFA)\n', (2113, 2135), True, 'import numpy as np\n'), ((2137, 2170), 'numpy.asarray', 'np.asarray', (['all_train_output_EDFA'], {}), '(all_train_output_EDFA)\n', (2147, 2170), True, 'import numpy as np\n'), ((2270, 2301), 'numpy.asarray', 'np.asarray', (['all_train_input_SMF'], {}), '(all_train_input_SMF)\n', (2280, 2301), True, 'import numpy as np\n'), ((2303, 2335), 'numpy.asarray', 'np.asarray', (['all_train_output_SMF'], {}), '(all_train_output_SMF)\n', (2313, 2335), True, 'import numpy as np\n'), ((2953, 2995), 'numpy.asarray', 'np.asarray', (['self.data_bag[2][metadata_str]'], {}), '(self.data_bag[2][metadata_str])\n', (2963, 2995), True, 'import numpy as np\n'), ((2079, 2097), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (2095, 2097), True, 'import xgboost as xgb\n'), ((2246, 2264), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (2262, 2264), True, 'import xgboost as xgb\n'), ((1952, 1970), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (1968, 1970), True, 'import xgboost as xgb\n'), ((4142, 4160), 'numpy.asarray', 'np.asarray', (['signal'], {}), '(signal)\n', (4152, 4160), True, 'import numpy as np\n')]
|
import os
import cv2
import gc
import random
import time
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import argparse
from glob import glob
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from PIL import Image, ImageFilter
from models.OEFT import OEFT
parser = argparse.ArgumentParser(description='Code to optimize')
parser.add_argument('--device', help='cuda | cuda:0 | cpu', default="cuda", type=str)
parser.add_argument('--device_num', help='which GPUs to use', default="0", type=str)
parser.add_argument('--sample_freq', help="sampling frequency of saving results", default=500, type=float)
""" optimizer.py setting """
parser.add_argument('--content_root', help='folder of content images', default="../OEFT/example_210905/input/", type=str)
parser.add_argument('--style_root', help='folder of style images', default="../OEFT/example_210905/style/", type=str)
parser.add_argument('--save_root', help='folder of saving results', default="../OEFT/example_210929/experiment", type=str)
parser.add_argument('--fileType', help='png|jpg', default="png", type=str)
parser.add_argument('--keys', help='vgg layer names', default=['r12', 'r22', 'r34', 'r44', 'r54'], nargs="+")
parser.add_argument('--iter', help="number of iteration for optimization", default=1000, type=int)
parser.add_argument('--img_size', help="size of input image", default=256, type=int)
parser.add_argument('--pretrained', help="use pre-trained network or not", action="store_false")
parser.add_argument('--denorm', help="size of input image", action="store_false")
parser.add_argument('--lr', help="learning rate", default=1e-4, type=float)
parser.add_argument('--beta1', help="optimizer parameter", default=0.5, type=float)
parser.add_argument('--beta2', help="optimizer parameter", default=0.999, type=float)
parser.add_argument('--weight_decay', help="weight_decay", default=1e-4, type=float)
""" OEPT.py setting """
parser.add_argument('--warpFeat', help="use warped feature as decoder input", action="store_true")
parser.add_argument('--warpMv', help="use warped feature as moving averaged feature with content feature", action="store_true")
parser.add_argument('--warpRes', help="use warped image as residual", action="store_true")
parser.add_argument('--cycle', help="use cycle consistency regularization", action="store_true")
parser.add_argument('--res_wt', help="weight between decoder output and residual warped img", default=8/9, type=float)
parser.add_argument('--cycle_wt', help="weight of cycle consistency regularization", default=1., type=float)
# 256, 128, 64, 32, 16
parser.add_argument('--nce_wt', help='nce loss weights from each layer[256-16]', default=[1/8*1/4, 1/4*1/4, 1/2*1/4, 1.*1/4, 1.*1/4], nargs="+")
parser.add_argument('--nns_wt', help='NN style loss weights from each layer[256-16]', default=[1/16*1/4, 1/8*1/4, 1/4*1/4, 1/2*1/4, 1.*1/4], nargs="+")
parser.add_argument('--nce_temp', help="temperature for nce", default=0.07, type=float)
parser.add_argument('--nns_temp', help="temperature for nns", default=0.05, type=float)
parser.add_argument('--content_style_wt', help="weight of between content and style", default=4/5, type=float)
""" corrnet.py setting """
parser.add_argument('--corr_temp', help="temperature of correlation module", default=0.01, type=float)
parser.add_argument('--mv_wt', help="weight of moving average", default=0.6, type=float)
mean = np.array([0.485, 0.456, 0.406]).reshape(1,1,3)
std = np.array([0.229, 0.224, 0.225]).reshape(1,1,3)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
GPU_list = args.device_num
len_GPU = len( GPU_list.split(","))
print("@@@@@@@@@@@@ len_GPU: ", len_GPU)
os.environ["CUDA_VISIBLE_DEVICES"] = GPU_list
# Load content and style
print(os.listdir('./'))
def img_load(path):
img = cv2.imread(path)[::,::,::-1] # BGR to RGB, [0-255]
return img
def toPIL(img):
# image range should be [0-255] for converting.
img_type = str(type(img))
if 'numpy' in img_type:
img = Image.fromarray(img)
elif 'torch' in img_type:
img = transforms.ToPILImage()(img).convert("RGB")
return img
if __name__ == "__main__":
# parse options
keys = args.keys
content_root = args.content_root
style_root = args.style_root
if not os.path.exists(args.content_root):
print("!!! args.content_root does not exist !!!")
exit()
if not os.path.exists(args.style_root):
print("!!! args.style_root does not exist !!!")
exit()
content_list = glob( os.path.join(content_root, "*.{}".format(args.fileType)) )
style_list = glob( os.path.join(style_root, "*.{}".format(args.fileType) ) )
if len(content_list) < 1 or len(style_list) < 1:
print("!!! The number of content and style images should be more than 1 !!!")
exit()
content_list.sort()
style_list.sort()
print("@@@@@@@@@ len(content_list): ", len(content_list))
for z in range(len(content_list)):
random_seed = 1006
os.environ['PYTHONHASHSEED'] = str(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
""" start iteration """
torch.cuda.empty_cache() # remove all caches
try:
""" content, style image path indexing """
content_path = content_list[z] # './examples/input/in11.png'
style_path = style_list[z] # './examples/style/tar11.png'
""" img load """
content = img_load(content_path)
style = img_load(style_path)
content_256 = content.copy()
style_256 = style.copy()
except Exception as e:
print("image loading error : ", e)
continue
""" Convert numpy array to PIL.Image format """
""" and modify the range (0-255) to [0-1] """
content = toPIL(content)
style = toPIL(style)
""" Make transform """
transform_list = []
img_size = (args.img_size, args.img_size)
transform_list.append(transforms.Resize(img_size, interpolation=2)) # @@@@ args.interpol-method = 2
transform_list += [transforms.ToTensor()]
transform_list += [transforms.Normalize( (0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))]
transform = transforms.Compose(transform_list)
""" do transform """
content = transform(content)
style = transform(style)
content = torch.unsqueeze( content, dim=0 )
style = torch.unsqueeze( style, dim=0 )
""" Load model """
model = OEFT(args=args, pretrained=args.pretrained)
model = model.to(args.device)
""" Define optimizer """
e_optimizer = torch.optim.Adam(model.corrNet.parameters(), lr=args.lr,
betas=(args.beta1, args.beta2)
)
g_optimizer = torch.optim.Adam(model.decoder.parameters(), lr=args.lr,
betas=(args.beta1, args.beta2)
)
for param_group in e_optimizer.param_groups:
param_group['lr'] = 1e-4 #hparams.initial_learning_rate
for param_group in g_optimizer.param_groups:
param_group['lr'] = 1e-4 #hparams.initial_learning_rate
if "cuda" in args.device:
content = content.type(torch.cuda.FloatTensor).to(args.device).detach()
style = style.type(torch.cuda.FloatTensor).to(args.device).detach()
new_input = content.clone().type(torch.cuda.FloatTensor).to(args.device)
else:
content = content.type(torch.FloatTensor).to(args.device).detach()
style = style.type(torch.FloatTensor).to(args.device).detach()
new_input = content.clone().type(torch.FloatTensor).to(args.device)
warp_result = None
dec_result = None
cycle_result = None
count = 0
model.train()
start = time.time()
prog_bar = tqdm(range(args.iter))
for i in prog_bar:
if count == 0:
warped_s2c_feat, warped_s2c_imgs, tr_s2c_img_decs, loss, loss_dict, nce_dict, nns_dict = model(style, content, step=i)
else:
if args.warpMv:
warped_s2c_feat = warped_s2c_feat
for key in args.keys:
warped_s2c_feat[key] = warped_s2c_feat[key].detach()
warped_s2c_feat, warped_s2c_imgs, tr_s2c_img_decs, loss, loss_dict, nce_dict, nns_dict = model(style, content, warped_s2c_feat, step=i)
else:
warped_s2c_feat, warped_s2c_imgs, tr_s2c_img_decs, loss, loss_dict, nce_dict, nns_dict = model(style, content, step=i)
# summary_writer.add_scalars( "Total NCE and NNS" , loss_dict, i)
# summary_writer.add_scalars( "NCEs" , nce_dict, i)
# summary_writer.add_scalars( "NNSs" , nns_dict, i)
# nce_dict.update(nns_dict)
# summary_writer.add_scalars( "NCEs and NNSs" , nce_dict, i)
prog_bar.set_description("Pair:{}, iter:{}, loss_style:{}, loss_cont:{}, loss_cycle:{}".format(
z+1,
i+1,
loss_dict["L_style"],
loss_dict["L_content"],
loss_dict["L_cycle"])
)
e_optimizer.zero_grad()
g_optimizer.zero_grad()
loss.backward()
e_optimizer.step()
g_optimizer.step()
count += 1
""" generation result """
dec_result = tr_s2c_img_decs.clone().detach()
""" save the results """
if (i + 1) % args.sample_freq == 0 or i == 0 or i == args.iter-1:
c_img = os.path.basename(content_path) # 'in11.png'
s_img = os.path.basename(style_path) # 'tar11.png'
c_name = c_img.split('.')[0] # in11
s_name = s_img.split('.')[0] # tar11
pair_dir = '{}_'.format(z) + c_name + '_and_' + s_name
pair_iter_dir = '{}_'.format(z) + "iter" + str(i) + "_" +c_name + '_and_' + s_name
""" making folder to save results """
root_iter_path = os.path.join(args.save_root, pair_dir)
save_dir_path = os.path.join(root_iter_path, pair_iter_dir)
if not os.path.isdir(args.save_root):
os.makedirs(args.save_root)
if not os.path.isdir(save_dir_path):
os.makedirs(save_dir_path)
""" denormalization """
if args.denorm == True:
result = ( np.clip(( dec_result[0].permute(1,2,0).clone().detach().cpu().numpy()) *std + mean, 0.0, 1.0)*255.0).astype('uint8')[::,::,::-1]
else:
result = ( np.clip(( dec_result[0].permute(1,2,0).clone().detach().cpu().numpy()), 0.0, 1.0)*255.0).astype('uint8')[::,::,::-1]
""" change the form """
content_save = (cv2.resize(content_256, (256,256))).astype('uint8')[::,::,::-1]
style_save = (cv2.resize(style_256, (256,256))).astype('uint8')[::,::,::-1]
bundle_result = np.stack( (content_save, style_save, result), axis=1 )
bundle_result = bundle_result.reshape((256, 256*3, 3))
""" save the result """
cv2.imwrite( os.path.join(save_dir_path, c_name+'.png'), content_save)
cv2.imwrite( os.path.join(save_dir_path, s_name+'.png'), style_save)
cv2.imwrite( os.path.join(save_dir_path, 'result.png'), result)
cv2.imwrite( os.path.join(save_dir_path, c_name + '_' + s_name + '_' + 'result_bundle.png'), bundle_result)
# if args.denorm == True:
# warped_s2c_imgs= (np.clip( ( warped_s2c_imgs[0].clone().permute(1,2,0).detach().cpu().numpy()) *std + mean, 0.0, 1.0)*255.0).astype('uint8')[::,::,::-1]
# else:
# warped_s2c_imgs = (np.clip( ( warped_s2c_imgs[0].clone().permute(1,2,0).detach().cpu().numpy()), 0.0, 1.0)*255.0).astype('uint8')[::,::,::-1]
# resolution = ['256', '128', '64', '32', '16']
# cv2.imwrite( os.path.join(save_dir_path, 'warp_{0}.png'.format(resolution[2])) , warped_s2c_imgs ) # k=2 (64)
print("time :", time.time() - start)
print("root path: ", root_iter_path)
del model
gc.collect()
|
[
"models.OEFT.OEFT",
"torchvision.transforms.ToPILImage",
"numpy.array",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"torch.unsqueeze",
"numpy.stack",
"os.path.isdir",
"numpy.random.seed",
"torchvision.transforms.ToTensor",
"gc.collect",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"cv2.resize",
"time.time",
"torchvision.transforms.Compose",
"cv2.imread",
"torch.cuda.empty_cache",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"PIL.Image.fromarray",
"os.makedirs",
"os.path.join",
"random.seed",
"os.path.basename",
"torch.cuda.manual_seed"
] |
[((324, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Code to optimize"""'}), "(description='Code to optimize')\n", (347, 379), False, 'import argparse\n'), ((4596, 4612), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (4606, 4612), False, 'import os\n'), ((4238, 4269), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (4246, 4269), True, 'import numpy as np\n'), ((4292, 4323), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (4300, 4323), True, 'import numpy as np\n'), ((4644, 4660), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (4654, 4660), False, 'import cv2\n'), ((4851, 4871), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (4866, 4871), False, 'from PIL import Image, ImageFilter\n'), ((5132, 5165), 'os.path.exists', 'os.path.exists', (['args.content_root'], {}), '(args.content_root)\n', (5146, 5165), False, 'import os\n'), ((5251, 5282), 'os.path.exists', 'os.path.exists', (['args.style_root'], {}), '(args.style_root)\n', (5265, 5282), False, 'import os\n'), ((5916, 5946), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (5933, 5946), False, 'import torch\n'), ((5955, 5990), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['random_seed'], {}), '(random_seed)\n', (5977, 5990), False, 'import torch\n'), ((5999, 6038), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['random_seed'], {}), '(random_seed)\n', (6025, 6038), False, 'import torch\n'), ((6164, 6191), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (6178, 6191), True, 'import numpy as np\n'), ((6200, 6224), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (6211, 6224), False, 'import random\n'), ((6266, 6290), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6288, 6290), False, 'import torch\n'), ((7453, 7487), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (7471, 7487), True, 'import torchvision.transforms as transforms\n'), ((7614, 7645), 'torch.unsqueeze', 'torch.unsqueeze', (['content'], {'dim': '(0)'}), '(content, dim=0)\n', (7629, 7645), False, 'import torch\n'), ((7666, 7695), 'torch.unsqueeze', 'torch.unsqueeze', (['style'], {'dim': '(0)'}), '(style, dim=0)\n', (7681, 7695), False, 'import torch\n'), ((7742, 7785), 'models.OEFT.OEFT', 'OEFT', ([], {'args': 'args', 'pretrained': 'args.pretrained'}), '(args=args, pretrained=args.pretrained)\n', (7746, 7785), False, 'from models.OEFT import OEFT\n'), ((9156, 9167), 'time.time', 'time.time', ([], {}), '()\n', (9165, 9167), False, 'import time\n'), ((14087, 14099), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14097, 14099), False, 'import gc\n'), ((7161, 7205), 'torchvision.transforms.Resize', 'transforms.Resize', (['img_size'], {'interpolation': '(2)'}), '(img_size, interpolation=2)\n', (7178, 7205), True, 'import torchvision.transforms as transforms\n'), ((7266, 7287), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7285, 7287), True, 'import torchvision.transforms as transforms\n'), ((7316, 7382), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (7336, 7382), True, 'import torchvision.transforms as transforms\n'), ((11349, 11379), 'os.path.basename', 'os.path.basename', (['content_path'], {}), '(content_path)\n', (11365, 11379), False, 'import os\n'), ((11417, 11445), 'os.path.basename', 'os.path.basename', (['style_path'], {}), '(style_path)\n', (11433, 11445), False, 'import os\n'), ((11826, 11864), 'os.path.join', 'os.path.join', (['args.save_root', 'pair_dir'], {}), '(args.save_root, pair_dir)\n', (11838, 11864), False, 'import os\n'), ((11897, 11940), 'os.path.join', 'os.path.join', (['root_iter_path', 'pair_iter_dir'], {}), '(root_iter_path, pair_iter_dir)\n', (11909, 11940), False, 'import os\n'), ((12823, 12875), 'numpy.stack', 'np.stack', (['(content_save, style_save, result)'], {'axis': '(1)'}), '((content_save, style_save, result), axis=1)\n', (12831, 12875), True, 'import numpy as np\n'), ((13993, 14004), 'time.time', 'time.time', ([], {}), '()\n', (14002, 14004), False, 'import time\n'), ((11965, 11994), 'os.path.isdir', 'os.path.isdir', (['args.save_root'], {}), '(args.save_root)\n', (11978, 11994), False, 'import os\n'), ((12016, 12043), 'os.makedirs', 'os.makedirs', (['args.save_root'], {}), '(args.save_root)\n', (12027, 12043), False, 'import os\n'), ((12067, 12095), 'os.path.isdir', 'os.path.isdir', (['save_dir_path'], {}), '(save_dir_path)\n', (12080, 12095), False, 'import os\n'), ((12117, 12143), 'os.makedirs', 'os.makedirs', (['save_dir_path'], {}), '(save_dir_path)\n', (12128, 12143), False, 'import os\n'), ((13020, 13064), 'os.path.join', 'os.path.join', (['save_dir_path', "(c_name + '.png')"], {}), "(save_dir_path, c_name + '.png')\n", (13032, 13064), False, 'import os\n'), ((13107, 13151), 'os.path.join', 'os.path.join', (['save_dir_path', "(s_name + '.png')"], {}), "(save_dir_path, s_name + '.png')\n", (13119, 13151), False, 'import os\n'), ((13192, 13233), 'os.path.join', 'os.path.join', (['save_dir_path', '"""result.png"""'], {}), "(save_dir_path, 'result.png')\n", (13204, 13233), False, 'import os\n'), ((13272, 13350), 'os.path.join', 'os.path.join', (['save_dir_path', "(c_name + '_' + s_name + '_' + 'result_bundle.png')"], {}), "(save_dir_path, c_name + '_' + s_name + '_' + 'result_bundle.png')\n", (13284, 13350), False, 'import os\n'), ((4916, 4939), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (4937, 4939), True, 'import torchvision.transforms as transforms\n'), ((12630, 12665), 'cv2.resize', 'cv2.resize', (['content_256', '(256, 256)'], {}), '(content_256, (256, 256))\n', (12640, 12665), False, 'import cv2\n'), ((12728, 12761), 'cv2.resize', 'cv2.resize', (['style_256', '(256, 256)'], {}), '(style_256, (256, 256))\n', (12738, 12761), False, 'import cv2\n')]
|
import tensorflow as tf
import numpy as np
import random
import time
from math import exp
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import Merge
from keras.optimizers import RMSprop, Adam
start_time = time.time()
class UAV_fire_extinguish(object):
n_w = 4 # width of grid world
n_uav = 2 # number of agents
n_fire = 3 # number of fires
u_loca = [0, 15] # initial location of agents
t_fail = [0.02, 0.04] # probability for automatical failure
t_emit = [0.5, 0.5] # probability for getting observation
l_fire = [2, 7, 12] # location of fires
r_fire = [5.0, 5.0, 50.0] # reward of putting down each fire
e_fire = [[0.9,0.9],
[0.9,0.9],
[0.0,0.9]] # fire extinguish probability for each fire [down by 1 agent, down by 2 agent]
l_bigf = [12] # location of big fire
l_smlf = [2,7] # location of small fire
s_init = u_loca + [1]*n_fire + [1]*n_uav # initial state of the system
n_visi = 3 # length of local vision
##### Sampling method #####
def sampling_events(event,prob):
n_length = len(event)
x_rand = np.random.random()
for i in range(n_length):
x_rand = x_rand - prob[i]
if x_rand <= 0:
return event[i]
def mix_distribution(event1,prob1,event2,prob2):
n_length_1 = len(event1)
n_length_2 = len(event2)
new_event = []
new_prob = []
for e1 in range(n_length_1):
for e2 in range(n_length_2):
e_new = event1[e1] + [event2[e2]]
new_event.append(e_new)
p_new = prob1[e1] * prob2[e2]
new_prob.append(p_new)
return (new_event,new_prob)
##### check boundary #####
def check_boundary(x,w):
if x < 0:
return 0
elif x > w-1:
return w-1
else:
return x
##################################
##### Mapping between states #####
##################################
def two_dim_to_one(l_cor,n_w):
x = l_cor[0]
y = l_cor[1]
l = n_w * y + x
return l
def one_dim_to_two(l,n_w):
x = l%n_w
y = (l-x)/n_w
return [x,y]
############################
##### TRANSITION MODEL #####
############################
### simple movement of one agent due to action
def move_location_single(l_1d,a,n_w):
if l_1d == n_w * n_w:
return l_1d
l = one_dim_to_two(l_1d,n_w)
x_next = l[0]
y_next = l[1]
if a == 0: # up
y_next = y_next + 1
elif a == 1: # down
y_next = y_next - 1
elif a == 2: # left
x_next = x_next - 1
elif a == 3:
x_next = x_next + 1
else:
pass
x_next = check_boundary(x_next,n_w)
y_next = check_boundary(y_next,n_w)
l_next = two_dim_to_one((x_next,y_next),n_w)
return l_next
######################################################
##### number of uavs at the location of the fire #####
######################################################
def fire_has_uavs(lf,l_tuple,n_cut):
num = 0
for i in range(len(l_tuple)):
if lf == l_tuple[i]:
num += 1
if num > n_cut:
num = n_cut
return num
######################################################################
##### Obtain all possible sets and the corresponding probability #####
######################################################################
def transition_model(sys_cart_product_and_time_delay,a_joint,UAV_fire_extinguish):
s_fail = UAV_fire_extinguish.n_w * UAV_fire_extinguish.n_w
cart_product = sys_cart_product_and_time_delay[0: UAV_fire_extinguish.n_uav + UAV_fire_extinguish.n_fire]
time_delay = sys_cart_product_and_time_delay[UAV_fire_extinguish.n_uav + UAV_fire_extinguish.n_fire :]
##### Terminal states #####
die_product = 1
### if all agents are broken ###
for i_uav in range(UAV_fire_extinguish.n_uav):
if cart_product[i_uav] == s_fail:
die_product = die_product * 1
else:
die_product = die_product * 0
if die_product == 1:
return ([UAV_fire_extinguish.u_loca + [1]*UAV_fire_extinguish.n_fire], [1.0], [1]*UAV_fire_extinguish.n_uav)
### if all fires are extinguished ###
if sum(cart_product[UAV_fire_extinguish.n_uav:UAV_fire_extinguish.n_uav + UAV_fire_extinguish.n_fire]) == 0:
return ([UAV_fire_extinguish.u_loca + [1]*UAV_fire_extinguish.n_fire], [1.0], [1]*UAV_fire_extinguish.n_uav)
##### Transition of the first UAV #####
if cart_product[0] == s_fail:
event_product = [[s_fail]]
prob_product = [1.0]
else:
l0_next = move_location_single(cart_product[0],a_joint[0],UAV_fire_extinguish.n_w)
event_product = [[l0_next],[s_fail]]
prob_product = [1.0 - UAV_fire_extinguish.t_fail[0], UAV_fire_extinguish.t_fail[0]]
##### Transition of the second UAV #####
for i_uav in range(1,UAV_fire_extinguish.n_uav):
if cart_product[i_uav] == s_fail:
event_set_1 = [s_fail]
prob_set_1 = [1.0]
else:
l1_next = move_location_single(cart_product[i_uav],a_joint[i_uav],UAV_fire_extinguish.n_w)
event_set_1 = [l1_next,s_fail]
prob_set_1 = [1.0 - UAV_fire_extinguish.t_fail[i_uav], UAV_fire_extinguish.t_fail[i_uav]]
(event_product,prob_product) = mix_distribution(event_product,prob_product,event_set_1,prob_set_1)
##### Transition of the fire states #####
for i_fire in range(UAV_fire_extinguish.n_fire):
the_fire_state = cart_product[UAV_fire_extinguish.n_uav + i_fire]
if the_fire_state == 0: # no fire
(event_product,prob_product) = mix_distribution(event_product,prob_product,[0],[1.0])
else:
l_f = UAV_fire_extinguish.l_fire[i_fire]
l_0 = cart_product[0]
l_1 = cart_product[1]
if fire_has_uavs(l_f,cart_product[0:UAV_fire_extinguish.n_uav],2) == 1:
rate_put_down = UAV_fire_extinguish.e_fire[i_fire][0]
(event_product,prob_product) = mix_distribution(event_product,prob_product,[0,1],[rate_put_down,1.0-rate_put_down])
elif fire_has_uavs(l_f,cart_product[0:UAV_fire_extinguish.n_uav],2) == 2:
rate_put_down = UAV_fire_extinguish.e_fire[i_fire][1]
(event_product,prob_product) = mix_distribution(event_product,prob_product,[0,1],[rate_put_down,1.0-rate_put_down])
else:
(event_product,prob_product) = mix_distribution(event_product,prob_product,[1],[1.0])
##### Consider the transition of time delay (Poisson Process) #####
for i_uav in range(UAV_fire_extinguish.n_uav):
random_p = random.random()
if random_p < UAV_fire_extinguish.t_emit[i_uav]:
time_delay[i_uav] = 1
else:
time_delay[i_uav] = time_delay[i_uav] + 1
return (event_product,prob_product,time_delay)
def global_observation(agent,sys_state,UAV_fire_extinguish):
s_fail = UAV_fire_extinguish.n_w * UAV_fire_extinguish.n_w
o_length = 2 * UAV_fire_extinguish.n_uav + UAV_fire_extinguish.n_fire + 1 + 1
# (x,y) coordinate of each agent + fire status of each fire + agent ID + time_delay
obs = ([agent] +
[0] * ( 2 * UAV_fire_extinguish.n_uav) +
sys_state[UAV_fire_extinguish.n_uav: UAV_fire_extinguish.n_uav + UAV_fire_extinguish.n_fire] +
[sys_state[UAV_fire_extinguish.n_uav + UAV_fire_extinguish.n_fire + agent]])
for j_agent in range(UAV_fire_extinguish.n_uav):
[x,y] = one_dim_to_two(sys_state[j_agent],UAV_fire_extinguish.n_w)
obs[1 + 2*j_agent] = x
obs[2 + 2*j_agent] = y
return obs
def local_observation(agent,sys_state,UAV_fire_extinguish):
s_fail = UAV_fire_extinguish.n_w * UAV_fire_extinguish.n_w
# agent = which agent is going to make the observation
vision_depth = UAV_fire_extinguish.n_visi
vision_area = (vision_depth * 2 + 1) ** 2
self_location_xy = one_dim_to_two(sys_state[agent],UAV_fire_extinguish.n_w)
# vision 1: other agents
vision_1 = [0]*vision_area
for other_agent in range(UAV_fire_extinguish.n_uav):
if other_agent != agent :
location_other_agent = sys_state[other_agent]
location_other_xy = one_dim_to_two(location_other_agent,UAV_fire_extinguish.n_w)
dx = location_other_xy[0] - self_location_xy[0]
dy = location_other_xy[1] - self_location_xy[1]
if (-1)*vision_depth <= dx <= vision_depth and (-1)*vision_depth <= dy <= vision_depth and sys_state[other_agent] != s_fail:
relative_location = two_dim_to_one((dx + vision_depth,dy + vision_depth), vision_depth * 2 + 1)
vision_1[relative_location] += 1
# vision 2: big fires
vision_2 = [0]*vision_area
# vision 3: small fires
vision_3 = [0]*vision_area
for i_fire in range(UAV_fire_extinguish.n_fire):
if sys_state[UAV_fire_extinguish.n_uav + i_fire] == 1:
if UAV_fire_extinguish.l_fire[i_fire] in UAV_fire_extinguish.l_bigf: # it is a big fire
big_location = one_dim_to_two(UAV_fire_extinguish.l_fire[i_fire],UAV_fire_extinguish.n_w)
dx = big_location[0] - self_location_xy[0]
dy = big_location[1] - self_location_xy[1]
if (-1)*vision_depth <= dx <= vision_depth and (-1)*vision_depth <= dy <= vision_depth:
relative_location = two_dim_to_one((dx + vision_depth,dy + vision_depth), vision_depth * 2 + 1)
vision_2[relative_location] += 1
else: # it is a small fire
sml_location = one_dim_to_two(UAV_fire_extinguish.l_fire[i_fire],UAV_fire_extinguish.n_w)
dx = sml_location[0] - self_location_xy[0]
dy = sml_location[1] - self_location_xy[1]
if (-1)*vision_depth <= dx <= vision_depth and (-1)*vision_depth <= dy <= vision_depth:
relative_location = two_dim_to_one((dx + vision_depth,dy + vision_depth), vision_depth * 2 + 1)
vision_3[relative_location] += 1
time_delay = sys_state[UAV_fire_extinguish.n_uav + UAV_fire_extinguish.n_fire + agent]
return (([agent] + self_location_xy + [time_delay]),(vision_1),(vision_2),(vision_3))
def transition_sample(
current_state,
a_joint, # tuple
info_list, # [info_1,info_2,....]
UAV_fire_extinguish):
n_w = UAV_fire_extinguish.n_w
reward = 0.0
(event,prob,time_delay) = transition_model(current_state,a_joint,UAV_fire_extinguish)
next_state = sampling_events(event,prob) + time_delay
# Collect rewards
for i_fire in range(UAV_fire_extinguish.n_fire):
if current_state[UAV_fire_extinguish.n_uav + i_fire] == 1 and next_state[UAV_fire_extinguish.n_uav + i_fire] == 0:
reward += UAV_fire_extinguish.r_fire[i_fire]
# Update information if time delay is 1.0
updated_info_list = info_list[:]
for i_agent in range(UAV_fire_extinguish.n_uav):
if next_state[UAV_fire_extinguish.n_uav + UAV_fire_extinguish.n_fire + i_agent] == 1:
updated_info_list[i_agent] = global_observation(i_agent,next_state,UAV_fire_extinguish)
else:
#updated_info_list[i_agent][3] = updated_info_list[i_agent][3] + 1
updated_info_list[i_agent][-1] = updated_info_list[i_agent][-1] + 1
return [next_state,updated_info_list,reward]
######## CODE FOR SIMULATOR IS FINISHED #########
######## CODE FOR SIMULATOR IS FINISHED #########
######## CODE FOR SIMULATOR IS FINISHED #########
######## CODE FOR SIMULATOR IS FINISHED #########
######## CODE FOR SIMULATOR IS FINISHED #########
######## CODE FOR SIMULATOR IS FINISHED #########
def samples_by_random_action(n_init_pool,UAV_fire_extinguish):
size = UAV_fire_extinguish.n_w
input_number = 4 + 3 *(2 * UAV_fire_extinguish.n_visi + 1)**2
o_pool = np.zeros((UAV_fire_extinguish.n_uav,n_init_pool,input_number),float)
a_pool = np.zeros((UAV_fire_extinguish.n_uav,n_init_pool,5),float)
r_pool = np.zeros((UAV_fire_extinguish.n_uav,n_init_pool,1),float)
op_pool = np.zeros((UAV_fire_extinguish.n_uav,n_init_pool,input_number),float)
s_current = UAV_fire_extinguish.s_init
last_info_list = []
for i_uav in range(UAV_fire_extinguish.n_uav):
last_info_list.append(local_observation(i_uav,s_current,UAV_fire_extinguish))
#print(last_info_list[i_uav])
next_info_list = last_info_list[:]
for i_event in range(n_init_pool):
a_joint = [0] * UAV_fire_extinguish.n_uav
for i_uav in range(UAV_fire_extinguish.n_uav):
a_joint[i_uav] = random.randint(0,4)
#print(s_current,a_joint)
outcome = transition_sample(s_current,a_joint,last_info_list,UAV_fire_extinguish)
next_state = outcome[0]
next_info_list = outcome[1]
reward = outcome[2]
for i_uav in range(UAV_fire_extinguish.n_uav):
o_pool[i_uav,i_event,:] = last_info_list[i_uav][:]
op_pool[i_uav,i_event,:] = next_info_list[i_uav][:]
a_pool[i_uav,i_event,a_joint[i_uav]] = 1.0
r_pool[i_uav,i_event,0] = reward
last_info_list = next_info_list[:]
s_current = next_state
return (o_pool,a_pool,r_pool,op_pool)
def samples_by_one_agent_random_action(n_init_pool,free_agent,UAV_fire_extinguish):
size = UAV_fire_extinguish.n_w
input_number = 4 + 3 *(2 * UAV_fire_extinguish.n_visi + 1)**2
o_pool = np.zeros((UAV_fire_extinguish.n_uav,n_init_pool,input_number),float)
a_pool = np.zeros((UAV_fire_extinguish.n_uav,n_init_pool,5),float)
r_pool = np.zeros((UAV_fire_extinguish.n_uav,n_init_pool,1),float)
op_pool = np.zeros((UAV_fire_extinguish.n_uav,n_init_pool,input_number),float)
s_current = UAV_fire_extinguish.s_init
last_info_list = []
for i_uav in range(UAV_fire_extinguish.n_uav):
last_info_list.append(local_observation(i_uav,s_current,UAV_fire_extinguish))
next_info_list = last_info_list[:]
for i_event in range(n_init_pool):
a_joint = [0] * UAV_fire_extinguish.n_uav
for i_uav in range(UAV_fire_extinguish.n_uav):
if i_uav == free_agent:
a_joint[i_uav] = random.randint(0,4)
else:
a_joint[i_uav] = es_greedy(sess.run(Q, feed_dict={last_info: [last_info_list[i_uav]]}),0.0)
outcome = transition_sample(s_current,a_joint,last_info_list,UAV_fire_extinguish)
next_state = outcome[0]
next_info_list = outcome[1]
reward = outcome[2]
for i_uav in range(UAV_fire_extinguish.n_uav):
o_pool[i_uav,i_event,:] = last_info_list[i_uav][:]
op_pool[i_uav,i_event,:] = next_info_list[i_uav][:]
a_pool[i_uav,i_event,a_joint[i_uav]] = 1.0
r_pool[i_uav,i_event,0] = reward
last_info_list = next_info_list[:]
s_current = next_state
return (o_pool,a_pool,r_pool,op_pool)
def truncate_dataset_multiagent(data_array,n_keep_size):
n_size = len(data_array[0])
if n_size <= n_keep_size:
return data_array
else:
return data_array[:,(n_size-n_keep_size):,:]
def batch_select_multiagent(inputs,n_uav,n_batch,seeds):
batch_set = np.zeros((n_uav,n_batch,len(inputs[0][1])))
for i in range(n_batch):
for i_uav in range(n_uav):
batch_set[i_uav,i,:] = inputs[i_uav,seeds[i],:]
return batch_set
def visualize_scenario_indp(current_state,h_print,r_explore,UAV_fire_extinguish):
last_info_list = []
for i_uav in range(UAV_fire_extinguish.n_uav):
last_info_list.append(local_observation(i_uav,current_state,UAV_fire_extinguish))
next_info_list = last_info_list[:]
for h in range(h_print):
a_joint = [0] * UAV_fire_extinguish.n_uav
for i_uav in range(UAV_fire_extinguish.n_uav):
(obs_0,obs_1,obs_2,obs_3) = last_info_list[i_uav][:]
old_qval = final_model.predict([np.array(obs_0).reshape(1,input_size_nn_sfinfo),
np.array(obs_1).reshape(1,input_size_nn_vision),
np.array(obs_2).reshape(1,input_size_nn_vision),
np.array(obs_3).reshape(1,input_size_nn_vision)], batch_size=1)
a_joint[i_uav] = es_greedy(old_qval,r_explore)
outcome = transition_sample(current_state,a_joint,last_info_list,UAV_fire_extinguish)
(next_state,next_info_list,reward_immed) = outcome
next_state = outcome[0]
next_info_list = outcome[1]
reward = outcome[2]
print(current_state,a_joint,reward)
current_state = next_state
last_info_list = next_info_list
##########################################
############ Neural Network ##############
##########################################
##### functions for nerual network #####
def es_greedy(inputs,epsi):
x_rand = np.random.random()
if x_rand < epsi:
return np.random.randint(0,4)
else:
return np.argmax(inputs)
def softmax(inputs,T):
x_rand = np.random.random()
e_input = np.ones(len(inputs))
for i in range(len(inputs)):
e_input[i] = exp(inputs[i]/float(T))
e_input = e_input/sum(e_input)
e_input[-1] += 0.01
for i in range(len(inputs)):
if x < e_input[i]:
return x
else:
x = x - e_input[i]
##### target value #####
#####################################################################
#####################################################################
input_size_nn_vision = (2 * UAV_fire_extinguish.n_visi + 1)**2
input_size_nn_sfinfo = 4
self_info_branch = Sequential()
self_info_branch.add(Dense(10, init='lecun_uniform', input_shape = (input_size_nn_sfinfo,)))
self_info_branch.add(Activation('relu'))
other_vision_branch = Sequential()
other_vision_branch.add(Dense(50, init='lecun_uniform', input_shape = (input_size_nn_vision,)))
other_vision_branch.add(Activation('relu'))
other_vision_branch.add(Dense(50, init='lecun_uniform'))
other_vision_branch.add(Activation('relu'))
smalf_vision_branch = Sequential()
smalf_vision_branch.add(Dense(50, init='lecun_uniform', input_shape = (input_size_nn_vision,)))
smalf_vision_branch.add(Activation('relu'))
smalf_vision_branch.add(Dense(50, init='lecun_uniform'))
smalf_vision_branch.add(Activation('relu'))
bigf_vision_branch = Sequential()
bigf_vision_branch.add(Dense(50, init='lecun_uniform', input_shape = (input_size_nn_vision,)))
bigf_vision_branch.add(Activation('relu'))
bigf_vision_branch.add(Dense(50, init='lecun_uniform'))
bigf_vision_branch.add(Activation('relu'))
merged = Merge([self_info_branch, other_vision_branch, smalf_vision_branch, bigf_vision_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Activation('relu'))
final_model.add(Dense(5,init='lecun_uniform'))
final_model.add(Activation('linear'))
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
final_model.compile(loss='mse', optimizer=adam)
##############################
epochs = 60000
gamma = 0.9
epsilon = 0.2
random_action_thresold = 1000
epsi = 0.2
max_pool_size = 20000
n_batch_size = 5000
current_state = UAV_fire_extinguish.s_init
last_info_list = []
for i_uav in range(UAV_fire_extinguish.n_uav):
last_info_list.append(local_observation(i_uav,current_state,UAV_fire_extinguish))
for iteration_times in range(0): # 3 not 0
obs_sfinfo = []
obs_otheru = []
obs_smallf = []
obs_bigf = []
reward_list = []
target_list = []
if iteration_times == 0:
pass
elif iteration_times == 1:
UAV_fire_extinguish.e_fire[2][0] = 0.2
elif iteration_times == 2:
UAV_fire_extinguish.e_fire[2][0] = 0.1
else:
UAV_fire_extinguish.e_fire[2][0] = 0.0
print(UAV_fire_extinguish.e_fire[2][0])
for ep in range(epochs):
epsi = 0.1# - 0.2 * (ep / epochs)
if ep % 100 == 0:
print("iteration times = ",ep,"===============================")
###################################
########## Choose action ##########
###################################
a_joint = [0] * UAV_fire_extinguish.n_uav
if ep < random_action_thresold:
for i_uav in range(UAV_fire_extinguish.n_uav):
a_joint[i_uav] = random.randint(0,4)
else:
for i_uav in range(UAV_fire_extinguish.n_uav):
(obs_0,obs_1,obs_2,obs_3) = last_info_list[i_uav][:]
old_qval = final_model.predict([np.array(obs_0).reshape(1,input_size_nn_sfinfo),
np.array(obs_1).reshape(1,input_size_nn_vision),
np.array(obs_2).reshape(1,input_size_nn_vision),
np.array(obs_3).reshape(1,input_size_nn_vision)], batch_size=1)
a_joint[i_uav] = es_greedy(old_qval,epsi)
#####################################
########## Make transition ##########
#####################################
outcome_transition = transition_sample(current_state,a_joint,last_info_list,UAV_fire_extinguish)
next_state = outcome_transition[0]
#############################################################
### Add observations and rewards into pool for all agents ###
#############################################################
for i_uav in range(UAV_fire_extinguish.n_uav):
# add observations
(obs_0,obs_1,obs_2,obs_3) = last_info_list[i_uav][:]
obs_sfinfo.append(np.array(obs_0).reshape(1,input_size_nn_sfinfo))
obs_otheru.append(np.array(obs_1).reshape(1,input_size_nn_vision))
obs_smallf.append(np.array(obs_2).reshape(1,input_size_nn_vision))
obs_bigf.append(np.array(obs_3).reshape(1,input_size_nn_vision))
reward_list.append(outcome_transition[2])
# add target value
(obsp_0,obsp_1,obsp_2,obsp_3) = outcome_transition[1][i_uav][:]
old_qval = final_model.predict([np.array(obs_0).reshape(1,input_size_nn_sfinfo),
np.array(obs_1).reshape(1,input_size_nn_vision),
np.array(obs_2).reshape(1,input_size_nn_vision),
np.array(obs_3).reshape(1,input_size_nn_vision)], batch_size=1)
new_qval = final_model.predict([np.array(obsp_0).reshape(1,input_size_nn_sfinfo),
np.array(obsp_1).reshape(1,input_size_nn_vision),
np.array(obsp_2).reshape(1,input_size_nn_vision),
np.array(obsp_3).reshape(1,input_size_nn_vision)], batch_size=1)
max_q_new = np.max(new_qval)
y = np.zeros((1,5))
y[:] = old_qval[:]
y[0][a_joint[i_uav]] = outcome_transition[2] + gamma * max_q_new
target_list.append(y)
#########################################
### update next state and information ###
#########################################
current_state = next_state
last_info_list = outcome_transition[1][:]
###########################################
### if we have too many samples in pool ###
###########################################
if len(obs_sfinfo) > max_pool_size:
obs_sfinfo.pop(0)
obs_otheru.pop(0)
obs_smallf.pop(0)
obs_bigf.pop(0)
reward_list.pop(0)
############################
### train neural network ###
############################
if ep % 500 == 0 and ep > random_action_thresold:
# create batch
obs_0_array = np.zeros((n_batch_size,input_size_nn_sfinfo))
obs_1_array = np.zeros((n_batch_size,input_size_nn_vision))
obs_2_array = np.zeros((n_batch_size,input_size_nn_vision))
obs_3_array = np.zeros((n_batch_size,input_size_nn_vision))
targt_array = np.zeros((n_batch_size,5))
if len(obs_sfinfo) > n_batch_size+1:
seeds = random.sample(xrange(0,len(obs_sfinfo)),n_batch_size)
for i_batch_sample in range(n_batch_size):
b_number = seeds[i_batch_sample]
obs_0_array[i_batch_sample,:] = obs_sfinfo[b_number][0][:]
obs_1_array[i_batch_sample,:] = obs_otheru[b_number][0][:]
obs_2_array[i_batch_sample,:] = obs_smallf[b_number][0][:]
obs_3_array[i_batch_sample,:] = obs_bigf[b_number][0][:]
targt_array[i_batch_sample,:] = target_list[b_number][0][:]
# train
final_model.fit([obs_0_array,obs_1_array,obs_2_array,obs_3_array],
targt_array,
batch_size = n_batch_size,
nb_epoch = 50,
verbose = 1)
visualize_scenario_indp(UAV_fire_extinguish.s_init,30,0.2,UAV_fire_extinguish)
print("=====================")
visualize_scenario_indp(UAV_fire_extinguish.s_init,30,0.2,UAV_fire_extinguish)
print("=====================")
visualize_scenario_indp(UAV_fire_extinguish.s_init,30,0.2,UAV_fire_extinguish)
print("=====================")
visualize_scenario_indp(UAV_fire_extinguish.s_init,30,0.2,UAV_fire_extinguish)
print("=====================")
visualize_scenario_indp(UAV_fire_extinguish.s_init,30,0.2,UAV_fire_extinguish)
print("=====================")
|
[
"keras.optimizers.Adam",
"keras.layers.Merge",
"keras.layers.core.Activation",
"numpy.random.random",
"numpy.argmax",
"keras.models.Sequential",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"random.random",
"time.time",
"random.randint",
"keras.layers.core.Dense"
] |
[((271, 282), 'time.time', 'time.time', ([], {}), '()\n', (280, 282), False, 'import time\n'), ((16855, 16867), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (16865, 16867), False, 'from keras.models import Sequential\n'), ((17025, 17037), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (17035, 17037), False, 'from keras.models import Sequential\n'), ((17302, 17314), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (17312, 17314), False, 'from keras.models import Sequential\n'), ((17578, 17590), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (17588, 17590), False, 'from keras.models import Sequential\n'), ((17838, 17944), 'keras.layers.Merge', 'Merge', (['[self_info_branch, other_vision_branch, smalf_vision_branch, bigf_vision_branch\n ]'], {'mode': '"""concat"""'}), "([self_info_branch, other_vision_branch, smalf_vision_branch,\n bigf_vision_branch], mode='concat')\n", (17843, 17944), False, 'from keras.layers import Merge\n'), ((17956, 17968), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (17966, 17968), False, 'from keras.models import Sequential\n'), ((18122, 18188), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)', 'decay': '(0.0)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n', (18126, 18188), False, 'from keras.optimizers import RMSprop, Adam\n'), ((1144, 1162), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1160, 1162), True, 'import numpy as np\n'), ((11346, 11417), 'numpy.zeros', 'np.zeros', (['(UAV_fire_extinguish.n_uav, n_init_pool, input_number)', 'float'], {}), '((UAV_fire_extinguish.n_uav, n_init_pool, input_number), float)\n', (11354, 11417), True, 'import numpy as np\n'), ((11426, 11486), 'numpy.zeros', 'np.zeros', (['(UAV_fire_extinguish.n_uav, n_init_pool, 5)', 'float'], {}), '((UAV_fire_extinguish.n_uav, n_init_pool, 5), float)\n', (11434, 11486), True, 'import numpy as np\n'), ((11495, 11555), 'numpy.zeros', 'np.zeros', (['(UAV_fire_extinguish.n_uav, n_init_pool, 1)', 'float'], {}), '((UAV_fire_extinguish.n_uav, n_init_pool, 1), float)\n', (11503, 11555), True, 'import numpy as np\n'), ((11565, 11636), 'numpy.zeros', 'np.zeros', (['(UAV_fire_extinguish.n_uav, n_init_pool, input_number)', 'float'], {}), '((UAV_fire_extinguish.n_uav, n_init_pool, input_number), float)\n', (11573, 11636), True, 'import numpy as np\n'), ((12853, 12924), 'numpy.zeros', 'np.zeros', (['(UAV_fire_extinguish.n_uav, n_init_pool, input_number)', 'float'], {}), '((UAV_fire_extinguish.n_uav, n_init_pool, input_number), float)\n', (12861, 12924), True, 'import numpy as np\n'), ((12933, 12993), 'numpy.zeros', 'np.zeros', (['(UAV_fire_extinguish.n_uav, n_init_pool, 5)', 'float'], {}), '((UAV_fire_extinguish.n_uav, n_init_pool, 5), float)\n', (12941, 12993), True, 'import numpy as np\n'), ((13002, 13062), 'numpy.zeros', 'np.zeros', (['(UAV_fire_extinguish.n_uav, n_init_pool, 1)', 'float'], {}), '((UAV_fire_extinguish.n_uav, n_init_pool, 1), float)\n', (13010, 13062), True, 'import numpy as np\n'), ((13072, 13143), 'numpy.zeros', 'np.zeros', (['(UAV_fire_extinguish.n_uav, n_init_pool, input_number)', 'float'], {}), '((UAV_fire_extinguish.n_uav, n_init_pool, input_number), float)\n', (13080, 13143), True, 'import numpy as np\n'), ((16138, 16156), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (16154, 16156), True, 'import numpy as np\n'), ((16285, 16303), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (16301, 16303), True, 'import numpy as np\n'), ((16889, 16957), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'init': '"""lecun_uniform"""', 'input_shape': '(input_size_nn_sfinfo,)'}), "(10, init='lecun_uniform', input_shape=(input_size_nn_sfinfo,))\n", (16894, 16957), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((16982, 17000), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (16992, 17000), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17062, 17130), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'init': '"""lecun_uniform"""', 'input_shape': '(input_size_nn_vision,)'}), "(50, init='lecun_uniform', input_shape=(input_size_nn_vision,))\n", (17067, 17130), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17158, 17176), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17168, 17176), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17202, 17233), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'init': '"""lecun_uniform"""'}), "(50, init='lecun_uniform')\n", (17207, 17233), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17259, 17277), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17269, 17277), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17339, 17407), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'init': '"""lecun_uniform"""', 'input_shape': '(input_size_nn_vision,)'}), "(50, init='lecun_uniform', input_shape=(input_size_nn_vision,))\n", (17344, 17407), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17435, 17453), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17445, 17453), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17479, 17510), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'init': '"""lecun_uniform"""'}), "(50, init='lecun_uniform')\n", (17484, 17510), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17536, 17554), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17546, 17554), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17614, 17682), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'init': '"""lecun_uniform"""', 'input_shape': '(input_size_nn_vision,)'}), "(50, init='lecun_uniform', input_shape=(input_size_nn_vision,))\n", (17619, 17682), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17709, 17727), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17719, 17727), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17752, 17783), 'keras.layers.core.Dense', 'Dense', (['(50)'], {'init': '"""lecun_uniform"""'}), "(50, init='lecun_uniform')\n", (17757, 17783), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((17808, 17826), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17818, 17826), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((18009, 18027), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18019, 18027), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((18045, 18075), 'keras.layers.core.Dense', 'Dense', (['(5)'], {'init': '"""lecun_uniform"""'}), "(5, init='lecun_uniform')\n", (18050, 18075), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((18092, 18112), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (18102, 18112), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((6341, 6356), 'random.random', 'random.random', ([], {}), '()\n', (6354, 6356), False, 'import random\n'), ((16189, 16212), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (16206, 16212), True, 'import numpy as np\n'), ((16231, 16248), 'numpy.argmax', 'np.argmax', (['inputs'], {}), '(inputs)\n', (16240, 16248), True, 'import numpy as np\n'), ((12066, 12086), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (12080, 12086), False, 'import random\n'), ((21839, 21855), 'numpy.max', 'np.max', (['new_qval'], {}), '(new_qval)\n', (21845, 21855), True, 'import numpy as np\n'), ((21867, 21883), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {}), '((1, 5))\n', (21875, 21883), True, 'import numpy as np\n'), ((22729, 22775), 'numpy.zeros', 'np.zeros', (['(n_batch_size, input_size_nn_sfinfo)'], {}), '((n_batch_size, input_size_nn_sfinfo))\n', (22737, 22775), True, 'import numpy as np\n'), ((22795, 22841), 'numpy.zeros', 'np.zeros', (['(n_batch_size, input_size_nn_vision)'], {}), '((n_batch_size, input_size_nn_vision))\n', (22803, 22841), True, 'import numpy as np\n'), ((22861, 22907), 'numpy.zeros', 'np.zeros', (['(n_batch_size, input_size_nn_vision)'], {}), '((n_batch_size, input_size_nn_vision))\n', (22869, 22907), True, 'import numpy as np\n'), ((22927, 22973), 'numpy.zeros', 'np.zeros', (['(n_batch_size, input_size_nn_vision)'], {}), '((n_batch_size, input_size_nn_vision))\n', (22935, 22973), True, 'import numpy as np\n'), ((22993, 23020), 'numpy.zeros', 'np.zeros', (['(n_batch_size, 5)'], {}), '((n_batch_size, 5))\n', (23001, 23020), True, 'import numpy as np\n'), ((13572, 13592), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (13586, 13592), False, 'import random\n'), ((19482, 19502), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (19496, 19502), False, 'import random\n'), ((20677, 20692), 'numpy.array', 'np.array', (['obs_0'], {}), '(obs_0)\n', (20685, 20692), True, 'import numpy as np\n'), ((20750, 20765), 'numpy.array', 'np.array', (['obs_1'], {}), '(obs_1)\n', (20758, 20765), True, 'import numpy as np\n'), ((20823, 20838), 'numpy.array', 'np.array', (['obs_2'], {}), '(obs_2)\n', (20831, 20838), True, 'import numpy as np\n'), ((20894, 20909), 'numpy.array', 'np.array', (['obs_3'], {}), '(obs_3)\n', (20902, 20909), True, 'import numpy as np\n'), ((15201, 15216), 'numpy.array', 'np.array', (['obs_0'], {}), '(obs_0)\n', (15209, 15216), True, 'import numpy as np\n'), ((15288, 15303), 'numpy.array', 'np.array', (['obs_1'], {}), '(obs_1)\n', (15296, 15303), True, 'import numpy as np\n'), ((15375, 15390), 'numpy.array', 'np.array', (['obs_2'], {}), '(obs_2)\n', (15383, 15390), True, 'import numpy as np\n'), ((15462, 15477), 'numpy.array', 'np.array', (['obs_3'], {}), '(obs_3)\n', (15470, 15477), True, 'import numpy as np\n'), ((21127, 21142), 'numpy.array', 'np.array', (['obs_0'], {}), '(obs_0)\n', (21135, 21142), True, 'import numpy as np\n'), ((21214, 21229), 'numpy.array', 'np.array', (['obs_1'], {}), '(obs_1)\n', (21222, 21229), True, 'import numpy as np\n'), ((21301, 21316), 'numpy.array', 'np.array', (['obs_2'], {}), '(obs_2)\n', (21309, 21316), True, 'import numpy as np\n'), ((21388, 21403), 'numpy.array', 'np.array', (['obs_3'], {}), '(obs_3)\n', (21396, 21403), True, 'import numpy as np\n'), ((21491, 21507), 'numpy.array', 'np.array', (['obsp_0'], {}), '(obsp_0)\n', (21499, 21507), True, 'import numpy as np\n'), ((21579, 21595), 'numpy.array', 'np.array', (['obsp_1'], {}), '(obsp_1)\n', (21587, 21595), True, 'import numpy as np\n'), ((21667, 21683), 'numpy.array', 'np.array', (['obsp_2'], {}), '(obsp_2)\n', (21675, 21683), True, 'import numpy as np\n'), ((21755, 21771), 'numpy.array', 'np.array', (['obsp_3'], {}), '(obsp_3)\n', (21763, 21771), True, 'import numpy as np\n'), ((19670, 19685), 'numpy.array', 'np.array', (['obs_0'], {}), '(obs_0)\n', (19678, 19685), True, 'import numpy as np\n'), ((19757, 19772), 'numpy.array', 'np.array', (['obs_1'], {}), '(obs_1)\n', (19765, 19772), True, 'import numpy as np\n'), ((19844, 19859), 'numpy.array', 'np.array', (['obs_2'], {}), '(obs_2)\n', (19852, 19859), True, 'import numpy as np\n'), ((19931, 19946), 'numpy.array', 'np.array', (['obs_3'], {}), '(obs_3)\n', (19939, 19946), True, 'import numpy as np\n')]
|
###############################################################################
# Copyright (c) 2007-2018, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""Tests for non-linear least-squares fitter.
:author: <NAME>
:license: Modified BSD
"""
from __future__ import division
import numpy as np
from numpy.testing import TestCase, assert_almost_equal, run_module_suite
from scikits.fitting import (NonLinearLeastSquaresFit, LinearLeastSquaresFit,
vectorize_fit_func)
class TestNonLinearLeastSquaresFit(TestCase):
"""Check the NonLinearLeastSquaresFit class."""
def setUp(self):
# Quadratic function centred at p
self.vFunc = vectorize_fit_func(lambda p, x: ((x - p) ** 2).sum())
self.true_params = np.array([1, -4])
self.init_params = np.array([0, 0])
self.x = 4.0 * np.random.randn(2, 20)
self.y = self.vFunc(self.true_params, self.x)
# 2-D log Gaussian function
def lngauss_diagcov(p, x):
xminmu = x - p[:2, np.newaxis]
return p[4] - 0.5 * np.dot(p[2:4], xminmu * xminmu)
self.func2 = lngauss_diagcov
self.true_params2 = np.array([3, -2, 10, 10, 4])
self.init_params2 = np.array([0, 0, 1, 1, 0])
self.x2 = np.random.randn(2, 80)
self.y2 = lngauss_diagcov(self.true_params2, self.x2)
# Linear function
self.func3 = lambda p, x: np.dot(p, x)
self.jac3 = lambda p, x: x
self.true_params3 = np.array([-0.1, 0.2, -0.3, 0.0, 0.5])
self.init_params3 = np.zeros(5)
self.enabled_params_int = [0, 1, 2, 4]
self.enabled_params_bool = [True, True, True, False, True]
t = np.arange(0, 10., 10. / 100)
self.x3 = np.vander(t, 5).T
self.y3 = self.func3(self.true_params3, self.x3)
def test_fit_eval_func1(self):
"""NonLinearLeastSquaresFit: Basic function fitting and evaluation."""
interp = NonLinearLeastSquaresFit(self.vFunc, self.init_params)
interp.fit(self.x, self.y)
y = interp(self.x)
assert_almost_equal(interp.params, self.true_params, decimal=7)
assert_almost_equal(y, self.y, decimal=5)
def test_fit_eval_gauss(self):
"""NonLinearLeastSquaresFit: Check fit on 2-D log Gaussian function."""
interp2 = NonLinearLeastSquaresFit(self.func2, self.init_params2)
interp2.fit(self.x2, self.y2)
y2 = interp2(self.x2)
assert_almost_equal(interp2.params, self.true_params2, decimal=10)
assert_almost_equal(y2, self.y2, decimal=10)
def test_fit_eval_linear(self):
"""NonLinearLeastSquaresFit: Do linear problem and check Jacobian."""
lin = LinearLeastSquaresFit()
lin.fit(self.x3, self.y3, std_y=2.0)
nonlin = NonLinearLeastSquaresFit(self.func3, self.init_params3,
func_jacobian=self.jac3)
nonlin.fit(self.x3, self.y3, std_y=2.0)
# A correct Jacobian helps a lot...
assert_almost_equal(nonlin.params, self.true_params3, decimal=11)
assert_almost_equal(nonlin.cov_params, lin.cov_params, decimal=11)
nonlin_nojac = NonLinearLeastSquaresFit(self.func3, self.init_params3)
nonlin_nojac.fit(self.x3, self.y3, std_y=0.1)
assert_almost_equal(nonlin_nojac.params, self.true_params3, decimal=5)
# Covariance matrix is way smaller than linear one...
def test_enabled_params(self):
"""NonLinearLeastSquaresFit: Try to optimise subset of parameters."""
lin = LinearLeastSquaresFit()
lin.fit(self.x3[self.enabled_params_int, :], self.y3, std_y=2.0)
lin_cov_params = np.zeros((len(self.true_params3),
len(self.true_params3)))
subset = np.ix_(self.enabled_params_int, self.enabled_params_int)
lin_cov_params[subset] = lin.cov_params
nonlin = NonLinearLeastSquaresFit(self.func3, self.init_params3,
self.enabled_params_int, self.jac3)
nonlin.fit(self.x3, self.y3, std_y=2.0)
assert_almost_equal(nonlin.params, self.true_params3, decimal=11)
assert_almost_equal(nonlin.cov_params, lin_cov_params, decimal=11)
nonlin = NonLinearLeastSquaresFit(self.func3, self.init_params3,
self.enabled_params_bool, self.jac3)
nonlin.fit(self.x3, self.y3, std_y=2.0)
assert_almost_equal(nonlin.params, self.true_params3, decimal=11)
assert_almost_equal(nonlin.cov_params, lin_cov_params, decimal=11)
if __name__ == "__main__":
run_module_suite()
|
[
"scikits.fitting.LinearLeastSquaresFit",
"numpy.vander",
"scikits.fitting.NonLinearLeastSquaresFit",
"numpy.ix_",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_almost_equal",
"numpy.dot",
"numpy.testing.run_module_suite",
"numpy.random.randn",
"numpy.arange"
] |
[((5252, 5270), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (5268, 5270), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((1392, 1409), 'numpy.array', 'np.array', (['[1, -4]'], {}), '([1, -4])\n', (1400, 1409), True, 'import numpy as np\n'), ((1437, 1453), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1445, 1453), True, 'import numpy as np\n'), ((1798, 1826), 'numpy.array', 'np.array', (['[3, -2, 10, 10, 4]'], {}), '([3, -2, 10, 10, 4])\n', (1806, 1826), True, 'import numpy as np\n'), ((1855, 1880), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0]'], {}), '([0, 0, 1, 1, 0])\n', (1863, 1880), True, 'import numpy as np\n'), ((1899, 1921), 'numpy.random.randn', 'np.random.randn', (['(2)', '(80)'], {}), '(2, 80)\n', (1914, 1921), True, 'import numpy as np\n'), ((2120, 2157), 'numpy.array', 'np.array', (['[-0.1, 0.2, -0.3, 0.0, 0.5]'], {}), '([-0.1, 0.2, -0.3, 0.0, 0.5])\n', (2128, 2157), True, 'import numpy as np\n'), ((2186, 2197), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2194, 2197), True, 'import numpy as np\n'), ((2324, 2354), 'numpy.arange', 'np.arange', (['(0)', '(10.0)', '(10.0 / 100)'], {}), '(0, 10.0, 10.0 / 100)\n', (2333, 2354), True, 'import numpy as np\n'), ((2578, 2632), 'scikits.fitting.NonLinearLeastSquaresFit', 'NonLinearLeastSquaresFit', (['self.vFunc', 'self.init_params'], {}), '(self.vFunc, self.init_params)\n', (2602, 2632), False, 'from scikits.fitting import NonLinearLeastSquaresFit, LinearLeastSquaresFit, vectorize_fit_func\n'), ((2703, 2766), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['interp.params', 'self.true_params'], {'decimal': '(7)'}), '(interp.params, self.true_params, decimal=7)\n', (2722, 2766), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((2775, 2816), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['y', 'self.y'], {'decimal': '(5)'}), '(y, self.y, decimal=5)\n', (2794, 2816), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((2951, 3006), 'scikits.fitting.NonLinearLeastSquaresFit', 'NonLinearLeastSquaresFit', (['self.func2', 'self.init_params2'], {}), '(self.func2, self.init_params2)\n', (2975, 3006), False, 'from scikits.fitting import NonLinearLeastSquaresFit, LinearLeastSquaresFit, vectorize_fit_func\n'), ((3083, 3149), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['interp2.params', 'self.true_params2'], {'decimal': '(10)'}), '(interp2.params, self.true_params2, decimal=10)\n', (3102, 3149), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((3158, 3202), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['y2', 'self.y2'], {'decimal': '(10)'}), '(y2, self.y2, decimal=10)\n', (3177, 3202), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((3332, 3355), 'scikits.fitting.LinearLeastSquaresFit', 'LinearLeastSquaresFit', ([], {}), '()\n', (3353, 3355), False, 'from scikits.fitting import NonLinearLeastSquaresFit, LinearLeastSquaresFit, vectorize_fit_func\n'), ((3418, 3503), 'scikits.fitting.NonLinearLeastSquaresFit', 'NonLinearLeastSquaresFit', (['self.func3', 'self.init_params3'], {'func_jacobian': 'self.jac3'}), '(self.func3, self.init_params3, func_jacobian=self.jac3\n )\n', (3442, 3503), False, 'from scikits.fitting import NonLinearLeastSquaresFit, LinearLeastSquaresFit, vectorize_fit_func\n'), ((3641, 3706), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nonlin.params', 'self.true_params3'], {'decimal': '(11)'}), '(nonlin.params, self.true_params3, decimal=11)\n', (3660, 3706), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((3715, 3781), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nonlin.cov_params', 'lin.cov_params'], {'decimal': '(11)'}), '(nonlin.cov_params, lin.cov_params, decimal=11)\n', (3734, 3781), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((3805, 3860), 'scikits.fitting.NonLinearLeastSquaresFit', 'NonLinearLeastSquaresFit', (['self.func3', 'self.init_params3'], {}), '(self.func3, self.init_params3)\n', (3829, 3860), False, 'from scikits.fitting import NonLinearLeastSquaresFit, LinearLeastSquaresFit, vectorize_fit_func\n'), ((3923, 3993), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nonlin_nojac.params', 'self.true_params3'], {'decimal': '(5)'}), '(nonlin_nojac.params, self.true_params3, decimal=5)\n', (3942, 3993), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((4184, 4207), 'scikits.fitting.LinearLeastSquaresFit', 'LinearLeastSquaresFit', ([], {}), '()\n', (4205, 4207), False, 'from scikits.fitting import NonLinearLeastSquaresFit, LinearLeastSquaresFit, vectorize_fit_func\n'), ((4417, 4473), 'numpy.ix_', 'np.ix_', (['self.enabled_params_int', 'self.enabled_params_int'], {}), '(self.enabled_params_int, self.enabled_params_int)\n', (4423, 4473), True, 'import numpy as np\n'), ((4539, 4635), 'scikits.fitting.NonLinearLeastSquaresFit', 'NonLinearLeastSquaresFit', (['self.func3', 'self.init_params3', 'self.enabled_params_int', 'self.jac3'], {}), '(self.func3, self.init_params3, self.\n enabled_params_int, self.jac3)\n', (4563, 4635), False, 'from scikits.fitting import NonLinearLeastSquaresFit, LinearLeastSquaresFit, vectorize_fit_func\n'), ((4729, 4794), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nonlin.params', 'self.true_params3'], {'decimal': '(11)'}), '(nonlin.params, self.true_params3, decimal=11)\n', (4748, 4794), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((4803, 4869), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nonlin.cov_params', 'lin_cov_params'], {'decimal': '(11)'}), '(nonlin.cov_params, lin_cov_params, decimal=11)\n', (4822, 4869), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((4887, 4984), 'scikits.fitting.NonLinearLeastSquaresFit', 'NonLinearLeastSquaresFit', (['self.func3', 'self.init_params3', 'self.enabled_params_bool', 'self.jac3'], {}), '(self.func3, self.init_params3, self.\n enabled_params_bool, self.jac3)\n', (4911, 4984), False, 'from scikits.fitting import NonLinearLeastSquaresFit, LinearLeastSquaresFit, vectorize_fit_func\n'), ((5078, 5143), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nonlin.params', 'self.true_params3'], {'decimal': '(11)'}), '(nonlin.params, self.true_params3, decimal=11)\n', (5097, 5143), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((5152, 5218), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['nonlin.cov_params', 'lin_cov_params'], {'decimal': '(11)'}), '(nonlin.cov_params, lin_cov_params, decimal=11)\n', (5171, 5218), False, 'from numpy.testing import TestCase, assert_almost_equal, run_module_suite\n'), ((1477, 1499), 'numpy.random.randn', 'np.random.randn', (['(2)', '(20)'], {}), '(2, 20)\n', (1492, 1499), True, 'import numpy as np\n'), ((2044, 2056), 'numpy.dot', 'np.dot', (['p', 'x'], {}), '(p, x)\n', (2050, 2056), True, 'import numpy as np\n'), ((2371, 2386), 'numpy.vander', 'np.vander', (['t', '(5)'], {}), '(t, 5)\n', (2380, 2386), True, 'import numpy as np\n'), ((1701, 1732), 'numpy.dot', 'np.dot', (['p[2:4]', '(xminmu * xminmu)'], {}), '(p[2:4], xminmu * xminmu)\n', (1707, 1732), True, 'import numpy as np\n')]
|
"""Main module."""
__authors__ = '<NAME>, <NAME>'
__version__ = '1.0'
__date__ = '9/10/2017'
import json
import os.path
import pickle
import random
import urllib
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import naive_bayes
from sklearn import svm
from sklearn.metrics import accuracy_score
from textblob import TextBlob
import matplotlib.pyplot as plt
import requests
import numpy as np
SETTINGS_PATH = 'settings.json'
RAW_PATH = 'data/raw.json'
STORIES_PATH = 'data/with_stories.json'
LABELS_PATH = 'data/with_labels.json'
SENTIMENTS_PATH = 'data/with_sentiments.json'
MNB_PATH = 'models/mnb.pkl'
SVM_PATH = 'models/svm.pkl'
COUNT_VECT_PATH = 'models/count_vect.pkl'
TFIDF_VECT_PATH = 'models/tfidf_vect.pkl'
BASE_URI = 'http://api.nytimes.com/svc/mostpopular/v2'
TYPE = 'mostviewed'
SECTION = 'all-sections'
TIME_PERIOD = '1'
RESPONSE_FORMAT = 'json'
def query(num_queries=1):
"""Request data from NYT and store it as a json file.
Args:
num_queries (int): The number of queries
"""
# Load API key
settings = json.load(open(SETTINGS_PATH))
API_KEY = settings['API_KEY']
# Send requests
URI = f'{BASE_URI}/{TYPE}/{SECTION}/{TIME_PERIOD}.{RESPONSE_FORMAT}'
articles = []
for k in range(num_queries):
print(f'Running query {k+1}...')
offset = k * 20
payload = {'api_key': API_KEY, 'offset': offset}
response = requests.get(URI, params=payload)
articles += response.json()['results']
# Save to file
with open(RAW_PATH, 'w') as output_file:
json.dump(articles, output_file)
def scrape_stories():
"""Get full document texts from urls."""
# Load articles
articles = json.load(open(RAW_PATH))
# Submit GET request and parse response content
for k, article in enumerate(articles):
print(f'Scraping article {k+1}...')
url = article['url']
f = urllib.request.urlopen(url)
soup = BeautifulSoup(f, 'html5lib')
story = ''
for par in soup.find_all('p', class_='story-body-text \
story-content'):
if par.string:
story += ' ' + par.string
article.update({'story': story})
# Save articles
with open(STORIES_PATH, 'w') as output_file:
json.dump(articles, output_file)
def label_articles(reset=False, relabel=False, start=0, rand_labels=False):
"""Run UI for sentiment labeling.
Loads all articles and presents those without a label.
Args:
reset (boolean): Delete all labels
relabel (boolean): Allow option to override existing labels
start (int): Article number to start from
rand_labels (boolean): Assign all random labels
"""
# Load articles
if reset or not os.path.isfile(LABELS_PATH):
articles = json.load(open(STORIES_PATH))
else:
articles = json.load(open(LABELS_PATH))
if start >= len(articles):
raise ValueError(f'Invalid starting point: {start}')
# Label articles
sentiments = [-1, 1]
print(f'Available sentiments: {sentiments}')
for k, article in enumerate(articles[start:]):
if not relabel and 'sentiment' in article:
continue
print(f'Article: {k+start+1}')
print(f"Title: {article['title']}")
print(f"Abstract: {article['abstract']}")
if rand_labels:
sent = random.choice(sentiments)
else:
try:
sent = int(input('Label: '))
except ValueError:
break
if sent not in sentiments:
break
article.update({'sentiment': sent})
print('----------------------------')
# Save articles
with open(LABELS_PATH, 'w') as output_file:
json.dump(articles, output_file)
def train_model(random_state=None):
"""Train a sentiment analyzer model.
Args:
random_state (int): Random seed for train_test_split used by numpy
"""
# Load articles
articles = json.load(open(LABELS_PATH))
# Extract data
articles = [article for article in articles if 'sentiment' in article]
stopset = set(stopwords.words('english'))
titles = [article['title'] for article in articles]
labels = [article['sentiment'] for article in articles]
# Vectorize data
count_vect = CountVectorizer(lowercase=True,
strip_accents='ascii',
stop_words=stopset,
decode_error='replace')
tfidf_vect = TfidfVectorizer(use_idf=True,
lowercase=True,
strip_accents='ascii',
stop_words=stopset,
decode_error='replace')
# Analyze and display relevant information
num_total = len(articles)
num_pos = sum(article['sentiment'] == 1 for article in articles)
num_neg = sum(article['sentiment'] == -1 for article in articles)
print(f'Found {num_total} labeled articles')
print(f'{num_pos} +, {num_neg} -')
# Train using count vectorizer
print('Vectorizing using bag of words...')
x = count_vect.fit_transform(titles)
y = labels
if random_state is not None:
x_train, x_test, y_train, y_test = train_test_split(
x, y, random_state=random_state)
else:
x_train, x_test, y_train, y_test = train_test_split(x, y)
mnb_clf = naive_bayes.MultinomialNB()
mnb_clf.fit(x_train, y_train)
y_pred = mnb_clf.predict(x_test)
mnb_acc = accuracy_score(y_test, y_pred) * 100
print('Naive Bayes: %.2f%% accuracy' % mnb_acc)
svm_clf = svm.SVC(probability=True)
svm_clf.fit(x_train, y_train)
y_pred = svm_clf.predict(x_test)
svm_acc = accuracy_score(y_test, y_pred) * 100
print('SVM: %.2f%% accuracy' % svm_acc)
# Train using tfidf vectorizer
print('Vectorizing using tfidf...')
x = tfidf_vect.fit_transform(titles)
y = labels
if random_state is not None:
x_train, x_test, y_train, y_test = train_test_split(
x, y, random_state=random_state)
else:
x_train, x_test, y_train, y_test = train_test_split(x, y)
mnb_clf = naive_bayes.MultinomialNB()
mnb_clf.fit(x_train, y_train)
y_pred = mnb_clf.predict(x_test)
mnb_acc = accuracy_score(y_test, y_pred) * 100
print('Naive Bayes: %.2f%% accuracy' % mnb_acc)
svm_clf = svm.SVC(probability=True)
svm_clf.fit(x_train, y_train)
y_pred = svm_clf.predict(x_test)
svm_acc = accuracy_score(y_test, y_pred) * 100
print('SVM: %.2f%% accuracy' % svm_acc)
# Store vectorizers and trained classifiers
with open(SVM_PATH, 'wb') as output_file:
pickle.dump(mnb_clf, output_file)
with open(MNB_PATH, 'wb') as output_file:
pickle.dump(svm_clf, output_file)
with open(COUNT_VECT_PATH, 'wb') as output_file:
pickle.dump(count_vect.vocabulary_, output_file)
with open(TFIDF_VECT_PATH, 'wb') as output_file:
pickle.dump(tfidf_vect.vocabulary_, output_file)
def analyze():
"""Analyze article data."""
# Calculate sentiment scores
articles = json.load(open(LABELS_PATH))
mnb_clf = pickle.load(open(MNB_PATH, 'rb'))
svm_clf = pickle.load(open(SVM_PATH, 'rb'))
count_vocabulary = pickle.load(open(COUNT_VECT_PATH, 'rb'))
tfidf_vocabulary = pickle.load(open(TFIDF_VECT_PATH, 'rb'))
stopset = set(stopwords.words('english'))
count_vect = CountVectorizer(lowercase=True,
strip_accents='ascii',
stop_words=stopset,
decode_error='replace',
vocabulary=count_vocabulary)
tfidf_vect = TfidfVectorizer(use_idf=True,
lowercase=True,
strip_accents='ascii',
stop_words=stopset,
decode_error='replace',
vocabulary=tfidf_vocabulary)
for k, article in enumerate(articles):
title = article['title']
abstract = article['abstract']
story = article['story']
print(f'{k+1}: {title}')
title_sent = TextBlob(title).sentiment
abstract_sent = TextBlob(abstract).sentiment
story_sent = TextBlob(story).sentiment
article.update({'title_sent': title_sent,
'abstract_sent': abstract_sent,
'story_sent': story_sent})
print(f'{title_sent} {abstract_sent} {story_sent}')
count = count_vect.fit_transform([title])
tfidf = tfidf_vect.fit_transform([title])
article.update({'count_mnb_sent': mnb_clf.predict(count).item(0),
'count_svm_sent': svm_clf.predict(count).item(0),
'tfidf_mnb_sent': mnb_clf.predict(tfidf).item(0),
'tfidf_svm_sent': svm_clf.predict(tfidf).item(0)})
# Test TextBlob performance
num_total = 0
num_correct = 0
for article in articles:
if 'sentiment' not in article:
continue
title_sent = article['title_sent'].polarity
true_sent = article['sentiment']
if title_sent == 0:
continue
if _sign(title_sent) == true_sent:
num_correct += 1
num_total += 1
acc = num_correct / num_total * 100
print('=========================')
print('TextBlob accuracy: %.2f' % acc)
print('=========================')
# Determine min, max, mean, and std
title_sents = np.array([a['title_sent'] for a in articles])
abstract_sents = np.array([a['abstract_sent'] for a in articles])
story_sents = np.array([a['story_sent'] for a in articles])
print('Title Sentiments')
print('----------------')
print(f'min: {np.min(title_sents)}')
print(f'max: {np.max(title_sents)}')
print(f'mean: {np.mean(title_sents)}')
print(f'std: {np.std(title_sents)}')
print()
print('Abstract Sentiments')
print('-------------------')
print(f'min: {np.min(abstract_sents)}')
print(f'max: {np.max(abstract_sents)}')
print(f'mean: {np.mean(abstract_sents)}')
print(f'std: {np.std(abstract_sents)}')
print()
print('Story Sentiments')
print('----------------')
print(f'min: {np.min(story_sents)}')
print(f'max: {np.max(story_sents)}')
print(f'mean: {np.mean(story_sents)}')
print(f'std: {np.std(story_sents)}')
print()
# Save to file
with open(SENTIMENTS_PATH, 'w') as output_file:
json.dump(articles, output_file)
def visualize():
"""Visualize the data."""
# Load data
articles = json.load(open(SENTIMENTS_PATH))
title_sents = [article['title_sent'][0] for article in articles]
abstract_sents = [article['abstract_sent'][0] for article in articles]
story_sents = [article['story_sent'][0] for article in articles]
count_mnb_sents = [article['count_mnb_sent'] for article in articles]
count_svm_sents = [article['count_svm_sent'] for article in articles]
tfidf_mnb_sents = [article['tfidf_mnb_sent'] for article in articles]
tfidf_svm_sents = [article['tfidf_svm_sent'] for article in articles]
view_rank = range(1, len(articles) + 1)
# Calculate trendlines
z1 = np.polyfit(view_rank, title_sents, 1)
p1 = np.poly1d(z1)
z2 = np.polyfit(view_rank, abstract_sents, 1)
p2 = np.poly1d(z2)
z3 = np.polyfit(view_rank, story_sents, 1)
p3 = np.poly1d(z3)
z4 = np.polyfit(view_rank, count_mnb_sents, 1)
p4 = np.poly1d(z4)
z5 = np.polyfit(view_rank, count_svm_sents, 1)
p5 = np.poly1d(z5)
z6 = np.polyfit(view_rank, tfidf_mnb_sents, 1)
p6 = np.poly1d(z6)
z7 = np.polyfit(view_rank, tfidf_svm_sents, 1)
p7 = np.poly1d(z7)
# Compute moving average
window_size = 10
window = np.ones(int(window_size))/float(window_size)
count_svm_sents_ma = np.convolve(count_svm_sents, window, 'same')
tfidf_svm_sents_ma = np.convolve(tfidf_svm_sents, window, 'same')
# Plot sentiment versus view rank
# TextBlob
plt.figure(1)
plt.subplot(1, 3, 1)
plt.scatter(view_rank, title_sents, s=5)
plt.plot(view_rank, p1(view_rank), 'r--')
plt.title('Title Sentiment')
plt.xlabel('View Rank')
plt.ylabel('Sentiment Score')
plt.ylim(-1.1, 1.1)
plt.subplot(1, 3, 2)
plt.scatter(view_rank, abstract_sents, s=5)
plt.plot(view_rank, p2(view_rank), 'r--')
plt.title('Abstract Sentiment')
plt.xlabel('View Rank')
plt.ylim(-1.1, 1.1)
plt.subplot(1, 3, 3)
plt.scatter(view_rank, story_sents, s=5)
plt.plot(view_rank, p3(view_rank), 'r--')
plt.title('Story Sentiment')
plt.xlabel('View Rank')
plt.ylim(-1.1, 1.1)
# sklearn classifiers
plt.figure(2)
plt.subplot(2, 2, 1)
plt.scatter(view_rank, count_mnb_sents, s=5)
plt.plot(view_rank, p4(view_rank), 'r--')
plt.title('Bag of Words + Naive Bayes')
plt.ylabel('Sentiment Score')
plt.ylim(-1.1, 1.1)
plt.subplot(2, 2, 2)
plt.scatter(view_rank, count_svm_sents, s=5)
plt.scatter(view_rank, count_svm_sents_ma, s=5, facecolor='0.5')
plt.plot(view_rank, p5(view_rank), 'r--')
plt.title('Bag of Words + SVM')
plt.ylim(-1.1, 1.1)
plt.subplot(2, 2, 3)
plt.scatter(view_rank, tfidf_mnb_sents, s=5)
plt.plot(view_rank, p6(view_rank), 'r--')
plt.title('Tfidf + Naive Bayes')
plt.xlabel('View Rank')
plt.ylabel('Sentiment Score')
plt.ylim(-1.1, 1.1)
plt.subplot(2, 2, 4)
plt.scatter(view_rank, tfidf_svm_sents, s=5)
plt.scatter(view_rank, tfidf_svm_sents_ma, s=5, facecolor='0.5')
plt.plot(view_rank, p7(view_rank), 'r--')
plt.title('Tfidf + SVM')
plt.xlabel('View Rank')
plt.ylim(-1.1, 1.1)
plt.show()
def _sign(x):
if x < 0:
return -1
elif x > 0:
return 1
else:
return 0
|
[
"numpy.convolve",
"numpy.polyfit",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.poly1d",
"textblob.TextBlob",
"numpy.mean",
"nltk.corpus.stopwords.words",
"sklearn.feature_extraction.text.CountVectorizer",
"matplotlib.pyplot.xlabel",
"numpy.max",
"sklearn.naive_bayes.MultinomialNB",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplot.ylim",
"urllib.request.urlopen",
"random.choice",
"sklearn.model_selection.train_test_split",
"requests.get",
"numpy.std",
"matplotlib.pyplot.title",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.show",
"sklearn.svm.SVC",
"pickle.dump",
"bs4.BeautifulSoup",
"sklearn.feature_extraction.text.TfidfVectorizer",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"json.dump"
] |
[((4568, 4670), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'lowercase': '(True)', 'strip_accents': '"""ascii"""', 'stop_words': 'stopset', 'decode_error': '"""replace"""'}), "(lowercase=True, strip_accents='ascii', stop_words=stopset,\n decode_error='replace')\n", (4583, 4670), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((4783, 4899), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'use_idf': '(True)', 'lowercase': '(True)', 'strip_accents': '"""ascii"""', 'stop_words': 'stopset', 'decode_error': '"""replace"""'}), "(use_idf=True, lowercase=True, strip_accents='ascii',\n stop_words=stopset, decode_error='replace')\n", (4798, 4899), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5706, 5733), 'sklearn.naive_bayes.MultinomialNB', 'naive_bayes.MultinomialNB', ([], {}), '()\n', (5731, 5733), False, 'from sklearn import naive_bayes\n'), ((5923, 5948), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (5930, 5948), False, 'from sklearn import svm\n'), ((6481, 6508), 'sklearn.naive_bayes.MultinomialNB', 'naive_bayes.MultinomialNB', ([], {}), '()\n', (6506, 6508), False, 'from sklearn import naive_bayes\n'), ((6698, 6723), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (6705, 6723), False, 'from sklearn import svm\n'), ((7748, 7879), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'lowercase': '(True)', 'strip_accents': '"""ascii"""', 'stop_words': 'stopset', 'decode_error': '"""replace"""', 'vocabulary': 'count_vocabulary'}), "(lowercase=True, strip_accents='ascii', stop_words=stopset,\n decode_error='replace', vocabulary=count_vocabulary)\n", (7763, 7879), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((8025, 8170), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'use_idf': '(True)', 'lowercase': '(True)', 'strip_accents': '"""ascii"""', 'stop_words': 'stopset', 'decode_error': '"""replace"""', 'vocabulary': 'tfidf_vocabulary'}), "(use_idf=True, lowercase=True, strip_accents='ascii',\n stop_words=stopset, decode_error='replace', vocabulary=tfidf_vocabulary)\n", (8040, 8170), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((9892, 9937), 'numpy.array', 'np.array', (["[a['title_sent'] for a in articles]"], {}), "([a['title_sent'] for a in articles])\n", (9900, 9937), True, 'import numpy as np\n'), ((9959, 10007), 'numpy.array', 'np.array', (["[a['abstract_sent'] for a in articles]"], {}), "([a['abstract_sent'] for a in articles])\n", (9967, 10007), True, 'import numpy as np\n'), ((10026, 10071), 'numpy.array', 'np.array', (["[a['story_sent'] for a in articles]"], {}), "([a['story_sent'] for a in articles])\n", (10034, 10071), True, 'import numpy as np\n'), ((11624, 11661), 'numpy.polyfit', 'np.polyfit', (['view_rank', 'title_sents', '(1)'], {}), '(view_rank, title_sents, 1)\n', (11634, 11661), True, 'import numpy as np\n'), ((11671, 11684), 'numpy.poly1d', 'np.poly1d', (['z1'], {}), '(z1)\n', (11680, 11684), True, 'import numpy as np\n'), ((11694, 11734), 'numpy.polyfit', 'np.polyfit', (['view_rank', 'abstract_sents', '(1)'], {}), '(view_rank, abstract_sents, 1)\n', (11704, 11734), True, 'import numpy as np\n'), ((11744, 11757), 'numpy.poly1d', 'np.poly1d', (['z2'], {}), '(z2)\n', (11753, 11757), True, 'import numpy as np\n'), ((11767, 11804), 'numpy.polyfit', 'np.polyfit', (['view_rank', 'story_sents', '(1)'], {}), '(view_rank, story_sents, 1)\n', (11777, 11804), True, 'import numpy as np\n'), ((11814, 11827), 'numpy.poly1d', 'np.poly1d', (['z3'], {}), '(z3)\n', (11823, 11827), True, 'import numpy as np\n'), ((11838, 11879), 'numpy.polyfit', 'np.polyfit', (['view_rank', 'count_mnb_sents', '(1)'], {}), '(view_rank, count_mnb_sents, 1)\n', (11848, 11879), True, 'import numpy as np\n'), ((11889, 11902), 'numpy.poly1d', 'np.poly1d', (['z4'], {}), '(z4)\n', (11898, 11902), True, 'import numpy as np\n'), ((11912, 11953), 'numpy.polyfit', 'np.polyfit', (['view_rank', 'count_svm_sents', '(1)'], {}), '(view_rank, count_svm_sents, 1)\n', (11922, 11953), True, 'import numpy as np\n'), ((11963, 11976), 'numpy.poly1d', 'np.poly1d', (['z5'], {}), '(z5)\n', (11972, 11976), True, 'import numpy as np\n'), ((11986, 12027), 'numpy.polyfit', 'np.polyfit', (['view_rank', 'tfidf_mnb_sents', '(1)'], {}), '(view_rank, tfidf_mnb_sents, 1)\n', (11996, 12027), True, 'import numpy as np\n'), ((12037, 12050), 'numpy.poly1d', 'np.poly1d', (['z6'], {}), '(z6)\n', (12046, 12050), True, 'import numpy as np\n'), ((12060, 12101), 'numpy.polyfit', 'np.polyfit', (['view_rank', 'tfidf_svm_sents', '(1)'], {}), '(view_rank, tfidf_svm_sents, 1)\n', (12070, 12101), True, 'import numpy as np\n'), ((12111, 12124), 'numpy.poly1d', 'np.poly1d', (['z7'], {}), '(z7)\n', (12120, 12124), True, 'import numpy as np\n'), ((12259, 12303), 'numpy.convolve', 'np.convolve', (['count_svm_sents', 'window', '"""same"""'], {}), "(count_svm_sents, window, 'same')\n", (12270, 12303), True, 'import numpy as np\n'), ((12329, 12373), 'numpy.convolve', 'np.convolve', (['tfidf_svm_sents', 'window', '"""same"""'], {}), "(tfidf_svm_sents, window, 'same')\n", (12340, 12373), True, 'import numpy as np\n'), ((12432, 12445), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (12442, 12445), True, 'import matplotlib.pyplot as plt\n'), ((12450, 12470), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (12461, 12470), True, 'import matplotlib.pyplot as plt\n'), ((12475, 12515), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'title_sents'], {'s': '(5)'}), '(view_rank, title_sents, s=5)\n', (12486, 12515), True, 'import matplotlib.pyplot as plt\n'), ((12566, 12594), 'matplotlib.pyplot.title', 'plt.title', (['"""Title Sentiment"""'], {}), "('Title Sentiment')\n", (12575, 12594), True, 'import matplotlib.pyplot as plt\n'), ((12599, 12622), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""View Rank"""'], {}), "('View Rank')\n", (12609, 12622), True, 'import matplotlib.pyplot as plt\n'), ((12627, 12656), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sentiment Score"""'], {}), "('Sentiment Score')\n", (12637, 12656), True, 'import matplotlib.pyplot as plt\n'), ((12661, 12680), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (12669, 12680), True, 'import matplotlib.pyplot as plt\n'), ((12686, 12706), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (12697, 12706), True, 'import matplotlib.pyplot as plt\n'), ((12711, 12754), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'abstract_sents'], {'s': '(5)'}), '(view_rank, abstract_sents, s=5)\n', (12722, 12754), True, 'import matplotlib.pyplot as plt\n'), ((12805, 12836), 'matplotlib.pyplot.title', 'plt.title', (['"""Abstract Sentiment"""'], {}), "('Abstract Sentiment')\n", (12814, 12836), True, 'import matplotlib.pyplot as plt\n'), ((12841, 12864), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""View Rank"""'], {}), "('View Rank')\n", (12851, 12864), True, 'import matplotlib.pyplot as plt\n'), ((12869, 12888), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (12877, 12888), True, 'import matplotlib.pyplot as plt\n'), ((12894, 12914), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (12905, 12914), True, 'import matplotlib.pyplot as plt\n'), ((12919, 12959), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'story_sents'], {'s': '(5)'}), '(view_rank, story_sents, s=5)\n', (12930, 12959), True, 'import matplotlib.pyplot as plt\n'), ((13010, 13038), 'matplotlib.pyplot.title', 'plt.title', (['"""Story Sentiment"""'], {}), "('Story Sentiment')\n", (13019, 13038), True, 'import matplotlib.pyplot as plt\n'), ((13043, 13066), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""View Rank"""'], {}), "('View Rank')\n", (13053, 13066), True, 'import matplotlib.pyplot as plt\n'), ((13071, 13090), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (13079, 13090), True, 'import matplotlib.pyplot as plt\n'), ((13122, 13135), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (13132, 13135), True, 'import matplotlib.pyplot as plt\n'), ((13140, 13160), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (13151, 13160), True, 'import matplotlib.pyplot as plt\n'), ((13165, 13209), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'count_mnb_sents'], {'s': '(5)'}), '(view_rank, count_mnb_sents, s=5)\n', (13176, 13209), True, 'import matplotlib.pyplot as plt\n'), ((13260, 13299), 'matplotlib.pyplot.title', 'plt.title', (['"""Bag of Words + Naive Bayes"""'], {}), "('Bag of Words + Naive Bayes')\n", (13269, 13299), True, 'import matplotlib.pyplot as plt\n'), ((13304, 13333), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sentiment Score"""'], {}), "('Sentiment Score')\n", (13314, 13333), True, 'import matplotlib.pyplot as plt\n'), ((13338, 13357), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (13346, 13357), True, 'import matplotlib.pyplot as plt\n'), ((13363, 13383), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (13374, 13383), True, 'import matplotlib.pyplot as plt\n'), ((13388, 13432), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'count_svm_sents'], {'s': '(5)'}), '(view_rank, count_svm_sents, s=5)\n', (13399, 13432), True, 'import matplotlib.pyplot as plt\n'), ((13437, 13501), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'count_svm_sents_ma'], {'s': '(5)', 'facecolor': '"""0.5"""'}), "(view_rank, count_svm_sents_ma, s=5, facecolor='0.5')\n", (13448, 13501), True, 'import matplotlib.pyplot as plt\n'), ((13552, 13583), 'matplotlib.pyplot.title', 'plt.title', (['"""Bag of Words + SVM"""'], {}), "('Bag of Words + SVM')\n", (13561, 13583), True, 'import matplotlib.pyplot as plt\n'), ((13588, 13607), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (13596, 13607), True, 'import matplotlib.pyplot as plt\n'), ((13613, 13633), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (13624, 13633), True, 'import matplotlib.pyplot as plt\n'), ((13638, 13682), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'tfidf_mnb_sents'], {'s': '(5)'}), '(view_rank, tfidf_mnb_sents, s=5)\n', (13649, 13682), True, 'import matplotlib.pyplot as plt\n'), ((13733, 13765), 'matplotlib.pyplot.title', 'plt.title', (['"""Tfidf + Naive Bayes"""'], {}), "('Tfidf + Naive Bayes')\n", (13742, 13765), True, 'import matplotlib.pyplot as plt\n'), ((13770, 13793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""View Rank"""'], {}), "('View Rank')\n", (13780, 13793), True, 'import matplotlib.pyplot as plt\n'), ((13798, 13827), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sentiment Score"""'], {}), "('Sentiment Score')\n", (13808, 13827), True, 'import matplotlib.pyplot as plt\n'), ((13832, 13851), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (13840, 13851), True, 'import matplotlib.pyplot as plt\n'), ((13857, 13877), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (13868, 13877), True, 'import matplotlib.pyplot as plt\n'), ((13882, 13926), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'tfidf_svm_sents'], {'s': '(5)'}), '(view_rank, tfidf_svm_sents, s=5)\n', (13893, 13926), True, 'import matplotlib.pyplot as plt\n'), ((13931, 13995), 'matplotlib.pyplot.scatter', 'plt.scatter', (['view_rank', 'tfidf_svm_sents_ma'], {'s': '(5)', 'facecolor': '"""0.5"""'}), "(view_rank, tfidf_svm_sents_ma, s=5, facecolor='0.5')\n", (13942, 13995), True, 'import matplotlib.pyplot as plt\n'), ((14046, 14070), 'matplotlib.pyplot.title', 'plt.title', (['"""Tfidf + SVM"""'], {}), "('Tfidf + SVM')\n", (14055, 14070), True, 'import matplotlib.pyplot as plt\n'), ((14075, 14098), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""View Rank"""'], {}), "('View Rank')\n", (14085, 14098), True, 'import matplotlib.pyplot as plt\n'), ((14103, 14122), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (14111, 14122), True, 'import matplotlib.pyplot as plt\n'), ((14128, 14138), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14136, 14138), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1641), 'requests.get', 'requests.get', (['URI'], {'params': 'payload'}), '(URI, params=payload)\n', (1620, 1641), False, 'import requests\n'), ((1762, 1794), 'json.dump', 'json.dump', (['articles', 'output_file'], {}), '(articles, output_file)\n', (1771, 1794), False, 'import json\n'), ((2106, 2133), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (2128, 2133), False, 'import urllib\n'), ((2149, 2177), 'bs4.BeautifulSoup', 'BeautifulSoup', (['f', '"""html5lib"""'], {}), "(f, 'html5lib')\n", (2162, 2177), False, 'from bs4 import BeautifulSoup\n'), ((2512, 2544), 'json.dump', 'json.dump', (['articles', 'output_file'], {}), '(articles, output_file)\n', (2521, 2544), False, 'import json\n'), ((4003, 4035), 'json.dump', 'json.dump', (['articles', 'output_file'], {}), '(articles, output_file)\n', (4012, 4035), False, 'import json\n'), ((4385, 4411), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (4400, 4411), False, 'from nltk.corpus import stopwords\n'), ((5548, 5597), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'random_state': 'random_state'}), '(x, y, random_state=random_state)\n', (5564, 5597), False, 'from sklearn.model_selection import train_test_split\n'), ((5668, 5690), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {}), '(x, y)\n', (5684, 5690), False, 'from sklearn.model_selection import train_test_split\n'), ((5819, 5849), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5833, 5849), False, 'from sklearn.metrics import accuracy_score\n'), ((6034, 6064), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6048, 6064), False, 'from sklearn.metrics import accuracy_score\n'), ((6323, 6372), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'random_state': 'random_state'}), '(x, y, random_state=random_state)\n', (6339, 6372), False, 'from sklearn.model_selection import train_test_split\n'), ((6443, 6465), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {}), '(x, y)\n', (6459, 6465), False, 'from sklearn.model_selection import train_test_split\n'), ((6594, 6624), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6608, 6624), False, 'from sklearn.metrics import accuracy_score\n'), ((6809, 6839), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6823, 6839), False, 'from sklearn.metrics import accuracy_score\n'), ((6993, 7026), 'pickle.dump', 'pickle.dump', (['mnb_clf', 'output_file'], {}), '(mnb_clf, output_file)\n', (7004, 7026), False, 'import pickle\n'), ((7081, 7114), 'pickle.dump', 'pickle.dump', (['svm_clf', 'output_file'], {}), '(svm_clf, output_file)\n', (7092, 7114), False, 'import pickle\n'), ((7176, 7224), 'pickle.dump', 'pickle.dump', (['count_vect.vocabulary_', 'output_file'], {}), '(count_vect.vocabulary_, output_file)\n', (7187, 7224), False, 'import pickle\n'), ((7286, 7334), 'pickle.dump', 'pickle.dump', (['tfidf_vect.vocabulary_', 'output_file'], {}), '(tfidf_vect.vocabulary_, output_file)\n', (7297, 7334), False, 'import pickle\n'), ((7703, 7729), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (7718, 7729), False, 'from nltk.corpus import stopwords\n'), ((10887, 10919), 'json.dump', 'json.dump', (['articles', 'output_file'], {}), '(articles, output_file)\n', (10896, 10919), False, 'import json\n'), ((3620, 3645), 'random.choice', 'random.choice', (['sentiments'], {}), '(sentiments)\n', (3633, 3645), False, 'import random\n'), ((8534, 8549), 'textblob.TextBlob', 'TextBlob', (['title'], {}), '(title)\n', (8542, 8549), False, 'from textblob import TextBlob\n'), ((8584, 8602), 'textblob.TextBlob', 'TextBlob', (['abstract'], {}), '(abstract)\n', (8592, 8602), False, 'from textblob import TextBlob\n'), ((8634, 8649), 'textblob.TextBlob', 'TextBlob', (['story'], {}), '(story)\n', (8642, 8649), False, 'from textblob import TextBlob\n'), ((10151, 10170), 'numpy.min', 'np.min', (['title_sents'], {}), '(title_sents)\n', (10157, 10170), True, 'import numpy as np\n'), ((10192, 10211), 'numpy.max', 'np.max', (['title_sents'], {}), '(title_sents)\n', (10198, 10211), True, 'import numpy as np\n'), ((10234, 10254), 'numpy.mean', 'np.mean', (['title_sents'], {}), '(title_sents)\n', (10241, 10254), True, 'import numpy as np\n'), ((10276, 10295), 'numpy.std', 'np.std', (['title_sents'], {}), '(title_sents)\n', (10282, 10295), True, 'import numpy as np\n'), ((10396, 10418), 'numpy.min', 'np.min', (['abstract_sents'], {}), '(abstract_sents)\n', (10402, 10418), True, 'import numpy as np\n'), ((10440, 10462), 'numpy.max', 'np.max', (['abstract_sents'], {}), '(abstract_sents)\n', (10446, 10462), True, 'import numpy as np\n'), ((10485, 10508), 'numpy.mean', 'np.mean', (['abstract_sents'], {}), '(abstract_sents)\n', (10492, 10508), True, 'import numpy as np\n'), ((10530, 10552), 'numpy.std', 'np.std', (['abstract_sents'], {}), '(abstract_sents)\n', (10536, 10552), True, 'import numpy as np\n'), ((10647, 10666), 'numpy.min', 'np.min', (['story_sents'], {}), '(story_sents)\n', (10653, 10666), True, 'import numpy as np\n'), ((10688, 10707), 'numpy.max', 'np.max', (['story_sents'], {}), '(story_sents)\n', (10694, 10707), True, 'import numpy as np\n'), ((10730, 10750), 'numpy.mean', 'np.mean', (['story_sents'], {}), '(story_sents)\n', (10737, 10750), True, 'import numpy as np\n'), ((10772, 10791), 'numpy.std', 'np.std', (['story_sents'], {}), '(story_sents)\n', (10778, 10791), True, 'import numpy as np\n')]
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
from mne._ola import _COLA, _Interp2, _Storer
def test_interp_2pt():
"""Test our two-point interpolator."""
n_pts = 200
assert n_pts % 50 == 0
feeds = [ # test a bunch of feeds to make sure they don't break things
[n_pts],
[50] * (n_pts // 50),
[10] * (n_pts // 10),
[5] * (n_pts // 5),
[2] * (n_pts // 2),
[1] * n_pts,
]
# ZOH
values = np.array([10, -10])
expected = np.full(n_pts, 10)
for feed in feeds:
expected[-1] = 10
interp = _Interp2([0, n_pts], values, 'zero')
out = np.concatenate([interp.feed(f)[0] for f in feed])
assert_allclose(out, expected)
interp = _Interp2([0, n_pts - 1], values, 'zero')
expected[-1] = -10
out = np.concatenate([interp.feed(f)[0] for f in feed])
assert_allclose(out, expected)
# linear and inputs of different sizes
values = [np.arange(2)[:, np.newaxis, np.newaxis], np.array([20, 10])]
expected = [
np.linspace(0, 1, n_pts, endpoint=False)[np.newaxis, np.newaxis, :],
np.linspace(20, 10, n_pts, endpoint=False)]
for feed in feeds:
interp = _Interp2([0, n_pts], values, 'linear')
outs = [interp.feed(f) for f in feed]
outs = [np.concatenate([o[0] for o in outs], axis=-1),
np.concatenate([o[1] for o in outs], axis=-1)]
assert_allclose(outs[0], expected[0], atol=1e-7)
assert_allclose(outs[1], expected[1], atol=1e-7)
# cos**2 and more interesting bounds
values = np.array([10, -10])
expected = np.full(n_pts, 10.)
expected[-5:] = -10
cos = np.cos(np.linspace(0, np.pi / 2., n_pts - 9,
endpoint=False))
expected[4:-5] = cos ** 2 * 20 - 10
for feed in feeds:
interp = _Interp2([4, n_pts - 5], values, 'cos2')
out = np.concatenate([interp.feed(f)[0] for f in feed])
assert_allclose(out, expected, atol=1e-7)
out = interp.feed(10)[0]
assert_allclose(out, [values[-1]] * 10, atol=1e-7)
# hann and broadcasting
n_hann = n_pts - 9
expected[4:-5] = np.hanning(2 * n_hann + 1)[n_hann:-1] * 20 - 10
expected = np.array([expected, expected[::-1] * 0.5])
values = np.array([values, values[::-1] * 0.5]).T
for feed in feeds:
interp = _Interp2([4, n_pts - 5], values, 'hann')
out = np.concatenate([interp.feed(f)[0] for f in feed], axis=-1)
assert_allclose(out, expected, atol=1e-7)
# one control point and None support
values = [np.array([10]), None]
for start in [0, 50, 99, 100, 1000]:
interp = _Interp2([start], values, 'zero')
out, none = interp.feed(n_pts)
assert none is None
expected = np.full(n_pts, 10.)
assert_allclose(out, expected)
@pytest.mark.parametrize('ndim', (1, 2, 3))
def test_cola(ndim):
"""Test COLA processing."""
sfreq = 1000.
rng = np.random.RandomState(0)
def processor(x):
return (x / 2.,) # halve the signal
for n_total in (999, 1000, 1001):
signal = rng.randn(n_total)
out = rng.randn(n_total) # shouldn't matter
for _ in range(ndim - 1):
signal = signal[np.newaxis]
out = out[np.newaxis]
for n_samples in (99, 100, 101, 102,
n_total - n_total // 2 + 1, n_total):
for window in ('hann', 'bartlett', 'boxcar', 'triang'):
# A few example COLA possibilities
n_overlaps = ()
if window in ('hann', 'bartlett') or n_samples % 2 == 0:
n_overlaps += ((n_samples + 1) // 2,)
if window == 'boxcar':
n_overlaps += (0,)
for n_overlap in n_overlaps:
# can pass callable or ndarray
for storer in (out, _Storer(out)):
cola = _COLA(processor, storer, n_total, n_samples,
n_overlap, sfreq, window)
n_input = 0
# feed data in an annoying way
while n_input < n_total:
next_len = min(rng.randint(1, 30),
n_total - n_input)
cola.feed(signal[..., n_input:n_input + next_len])
n_input += next_len
assert_allclose(out, signal / 2., atol=1e-7)
|
[
"numpy.hanning",
"mne._ola._COLA",
"numpy.arange",
"numpy.testing.assert_allclose",
"mne._ola._Interp2",
"pytest.mark.parametrize",
"numpy.array",
"numpy.linspace",
"mne._ola._Storer",
"numpy.concatenate",
"numpy.full",
"numpy.random.RandomState"
] |
[((2880, 2922), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ndim"""', '(1, 2, 3)'], {}), "('ndim', (1, 2, 3))\n", (2903, 2922), False, 'import pytest\n'), ((493, 512), 'numpy.array', 'np.array', (['[10, -10]'], {}), '([10, -10])\n', (501, 512), True, 'import numpy as np\n'), ((528, 546), 'numpy.full', 'np.full', (['n_pts', '(10)'], {}), '(n_pts, 10)\n', (535, 546), True, 'import numpy as np\n'), ((1626, 1645), 'numpy.array', 'np.array', (['[10, -10]'], {}), '([10, -10])\n', (1634, 1645), True, 'import numpy as np\n'), ((1661, 1681), 'numpy.full', 'np.full', (['n_pts', '(10.0)'], {}), '(n_pts, 10.0)\n', (1668, 1681), True, 'import numpy as np\n'), ((2074, 2125), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '([values[-1]] * 10)'], {'atol': '(1e-07)'}), '(out, [values[-1]] * 10, atol=1e-07)\n', (2089, 2125), False, 'from numpy.testing import assert_allclose\n'), ((2261, 2303), 'numpy.array', 'np.array', (['[expected, expected[::-1] * 0.5]'], {}), '([expected, expected[::-1] * 0.5])\n', (2269, 2303), True, 'import numpy as np\n'), ((3004, 3028), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (3025, 3028), True, 'import numpy as np\n'), ((613, 649), 'mne._ola._Interp2', '_Interp2', (['[0, n_pts]', 'values', '"""zero"""'], {}), "([0, n_pts], values, 'zero')\n", (621, 649), False, 'from mne._ola import _COLA, _Interp2, _Storer\n'), ((722, 752), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'expected'], {}), '(out, expected)\n', (737, 752), False, 'from numpy.testing import assert_allclose\n'), ((770, 810), 'mne._ola._Interp2', '_Interp2', (['[0, n_pts - 1]', 'values', '"""zero"""'], {}), "([0, n_pts - 1], values, 'zero')\n", (778, 810), False, 'from mne._ola import _COLA, _Interp2, _Storer\n'), ((910, 940), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'expected'], {}), '(out, expected)\n', (925, 940), False, 'from numpy.testing import assert_allclose\n'), ((1040, 1058), 'numpy.array', 'np.array', (['[20, 10]'], {}), '([20, 10])\n', (1048, 1058), True, 'import numpy as np\n'), ((1162, 1204), 'numpy.linspace', 'np.linspace', (['(20)', '(10)', 'n_pts'], {'endpoint': '(False)'}), '(20, 10, n_pts, endpoint=False)\n', (1173, 1204), True, 'import numpy as np\n'), ((1246, 1284), 'mne._ola._Interp2', '_Interp2', (['[0, n_pts]', 'values', '"""linear"""'], {}), "([0, n_pts], values, 'linear')\n", (1254, 1284), False, 'from mne._ola import _COLA, _Interp2, _Storer\n'), ((1465, 1514), 'numpy.testing.assert_allclose', 'assert_allclose', (['outs[0]', 'expected[0]'], {'atol': '(1e-07)'}), '(outs[0], expected[0], atol=1e-07)\n', (1480, 1514), False, 'from numpy.testing import assert_allclose\n'), ((1522, 1571), 'numpy.testing.assert_allclose', 'assert_allclose', (['outs[1]', 'expected[1]'], {'atol': '(1e-07)'}), '(outs[1], expected[1], atol=1e-07)\n', (1537, 1571), False, 'from numpy.testing import assert_allclose\n'), ((1722, 1776), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2.0)', '(n_pts - 9)'], {'endpoint': '(False)'}), '(0, np.pi / 2.0, n_pts - 9, endpoint=False)\n', (1733, 1776), True, 'import numpy as np\n'), ((1886, 1926), 'mne._ola._Interp2', '_Interp2', (['[4, n_pts - 5]', 'values', '"""cos2"""'], {}), "([4, n_pts - 5], values, 'cos2')\n", (1894, 1926), False, 'from mne._ola import _COLA, _Interp2, _Storer\n'), ((1999, 2041), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'expected'], {'atol': '(1e-07)'}), '(out, expected, atol=1e-07)\n', (2014, 2041), False, 'from numpy.testing import assert_allclose\n'), ((2317, 2355), 'numpy.array', 'np.array', (['[values, values[::-1] * 0.5]'], {}), '([values, values[::-1] * 0.5])\n', (2325, 2355), True, 'import numpy as np\n'), ((2398, 2438), 'mne._ola._Interp2', '_Interp2', (['[4, n_pts - 5]', 'values', '"""hann"""'], {}), "([4, n_pts - 5], values, 'hann')\n", (2406, 2438), False, 'from mne._ola import _COLA, _Interp2, _Storer\n'), ((2520, 2562), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'expected'], {'atol': '(1e-07)'}), '(out, expected, atol=1e-07)\n', (2535, 2562), False, 'from numpy.testing import assert_allclose\n'), ((2618, 2632), 'numpy.array', 'np.array', (['[10]'], {}), '([10])\n', (2626, 2632), True, 'import numpy as np\n'), ((2698, 2731), 'mne._ola._Interp2', '_Interp2', (['[start]', 'values', '"""zero"""'], {}), "([start], values, 'zero')\n", (2706, 2731), False, 'from mne._ola import _COLA, _Interp2, _Storer\n'), ((2818, 2838), 'numpy.full', 'np.full', (['n_pts', '(10.0)'], {}), '(n_pts, 10.0)\n', (2825, 2838), True, 'import numpy as np\n'), ((2846, 2876), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'expected'], {}), '(out, expected)\n', (2861, 2876), False, 'from numpy.testing import assert_allclose\n'), ((999, 1011), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (1008, 1011), True, 'import numpy as np\n'), ((1085, 1125), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_pts'], {'endpoint': '(False)'}), '(0, 1, n_pts, endpoint=False)\n', (1096, 1125), True, 'import numpy as np\n'), ((1347, 1392), 'numpy.concatenate', 'np.concatenate', (['[o[0] for o in outs]'], {'axis': '(-1)'}), '([o[0] for o in outs], axis=-1)\n', (1361, 1392), True, 'import numpy as np\n'), ((1410, 1455), 'numpy.concatenate', 'np.concatenate', (['[o[1] for o in outs]'], {'axis': '(-1)'}), '([o[1] for o in outs], axis=-1)\n', (1424, 1455), True, 'import numpy as np\n'), ((2198, 2224), 'numpy.hanning', 'np.hanning', (['(2 * n_hann + 1)'], {}), '(2 * n_hann + 1)\n', (2208, 2224), True, 'import numpy as np\n'), ((3938, 3950), 'mne._ola._Storer', '_Storer', (['out'], {}), '(out)\n', (3945, 3950), False, 'from mne._ola import _COLA, _Interp2, _Storer\n'), ((3984, 4054), 'mne._ola._COLA', '_COLA', (['processor', 'storer', 'n_total', 'n_samples', 'n_overlap', 'sfreq', 'window'], {}), '(processor, storer, n_total, n_samples, n_overlap, sfreq, window)\n', (3989, 4054), False, 'from mne._ola import _COLA, _Interp2, _Storer\n'), ((4508, 4554), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', '(signal / 2.0)'], {'atol': '(1e-07)'}), '(out, signal / 2.0, atol=1e-07)\n', (4523, 4554), False, 'from numpy.testing import assert_allclose\n')]
|
import numpy as np
import sys
import string
M=4
N=5
# grille rempli par sequence
s_grille=np.full((M+N,M+N),0)
# grille remplit par 0 -1 1
grille=np.full((M,N), -1)
#grille[1][0]=0
sequence1=[1,1]
sequence2=[2,1]
# non-colore -1
# blanche 0
# noire 1
def lire_fichier(s_grille):
#file=sys.argv[1:]
try:
in_file = open(sys.argv[1], "r")
except:
sys.exit("ERROR. Can't read supplied filename.")
text = in_file.read()
lg=len(text)
i=0
nextline=0
line=0
colonne=0
bool=0
j=0
while(i<lg-1):
if(text[i]=='\n'):
nextline=1
if(bool==1):
bool=0
else:
line=line+1
i=i+1
colonne=0
continue
else:
if nextline==1:
if text[i]=="0x20":
if text[i+1]!="0x20" and text[i+1]!="\n":
s_grille[line][colonne]=0
colonne=colonne+1
nextline==1
else:
nextline==1
elif (text[i]>='1' and text[i]<='9'):
s_grille[line][colonne]=text[i]
colonne=colonne+1
nextline==0
elif text[i]=='#':
j=line-1
bool=1
nextline==0
else:
nextline==0
if nextline==0:
#print("hi")
if (text[i]>='1' and text[i]<='9'):
s_grille[line][colonne]=text[i]
i=i+1
#print(s_grille)
in_file.close()
return s_grille
def compare_block_ligne(grille, i, j, sl):
if ((j+1<N) and (grille[i][j+1]==1))or ((j-1>=0) and (grille[i][j-1]==1)):
return False
while(j>=0 and j<N and sl>0):
if grille[i][j]==0:
return False
j=j+1
sl=sl-1
return True
def compare_block_colonne(grille, i, j, sl):
if ((i+1<M) and (grille[i+1][j]==1))or ((i-1>=0) and (grille[i-1][j]==1)):
return False
while(i>=0 and i<M and sl>0):
if grille[i][j]==0:
return False
i=i+1
sl=sl-1
return True
def coloriage_possible_ligne(grille, sequence, i, j, l, cl):
# problem de syntaxe
# cas a: si l depasse le nb d'element de la sequence, inviolement de syntaxe
# cas b, i n'est pas compris entre 0 et N-1, inviolement de syntaxe
# cas c, j < 0 , inviolement de syntaxe
if (len(sequence)<l) or (i<0) or (i>N-1) or(j<0):
return False
# cas 1 : l=0:
# -si j=0, vrai
# -sinon faux
if (l==0):
if (j==0):
return True
print("1false")
return False
else:
val=sequence[l-1]
print("s", sequence[l-1])
# cas 2a : si j < sl -1
if (j<(sequence[l-1]-1)):
print("2false")
return False
# cas 2b : si j == sl-1
# -si l == 1, vrai
# -sinon faux
elif (j==(sequence[l-1]-1)):
cpt=j
bool=0
while(j>=0):
if grille[i][j]==0 or cl==0:
bool=1
break
j=j-1
print(l, bool)
if l==1 and bool==0:
print("ABC true")
return True
print("3false")
return False
else:
#cas 2c
return coloriage_possible_ligne_rec(grille, sequence, i, j, l, -1, cl )#, case_j ,nb_block)
def coloriage_possible_ligne_rec(grille, sequence, i, j, l, check ,cl):#, case_j ,nb_block):
if (l==0) and j>=-1 :
print("ABC True")
return True
if j<0:
print(i, j, l)
print(grille)
print("0false")
return False
# Pour la premiere iteration, on ne sait pas si c'est une case blanche ou noire
print(grille)
if check ==-1:
if cl==0:
compare=compare_block_ligne(grille, i, j-sequence[l-1], sequence[l-1])
else:
compare=compare_block_ligne(grille, i, j-sequence[l-1]+1, sequence[l-1])
print("i, j", i, j,"compare:", compare, "l", l)
if grille[i][j]==-1:
if not (compare):
print("4false")
return False
else:
if(j==0) and l==1 and sequence[0]==1:
return True
print("here i j", i ,j-(sequence[l-1])-(1-cl)-1)
if (j-(sequence[l-1])-(1-cl)-1<-1):
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1]), l-1, 0, cl)
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-(1-cl)-1, l-1, 0, cl)
elif grille[i][j]==1:
if(j==0) and l==1 and sequence[0]==1:
return True
if cl==0:
return False
if compare:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl)
return False
elif grille[i][j]==0:
if(j==0) and l==1 and sequence[0]==1:
return False
if cl==1:
return False
if compare:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-2, l-1 ,0, cl)
return False
else:
print("Syntaxe erreur valeur different que -1 0 1")
exit()
else:
compare_1=compare_block_ligne(grille, i, j-sequence[l-1], sequence[l-1])
compare_2=compare_block_ligne(grille, i, j-sequence[l-1]+1, sequence[l-1])
print("i, j", i, j,"compare1:", compare_1, "l",l)
print("i, j", i, j,"compare2:", compare_2, "l",l)
if grille[i][j]==-1:
if(j==0) and l==1 and sequence[0]==1:
return True
#print(i,j-sequence[l-1] ,sequence[l-1])
if grille[i][j-sequence[l-1]-1]==1 and compare_1:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-2, l-1, 0, cl)
elif grille[i][j-sequence[l-1]]==1 and compare_2:
#if(j==0):
# return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0, cl)
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl)
elif not (compare_1 or compare_2):
print("6false")
return False
else:
if grille[i][j-sequence[l-1]-1]==0:
l=len(sequence[l-1])
while(l>=0):
list[i][j-(sequence[l-1])+l]=1
l=l-1
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl)
else:
print("or")
if (j==0) and sequence[l-1]==1:
print("ABC True")
return True
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl) or coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-2, l-1, 0, cl)
elif grille[i][j]==1:
if(j==0) and l==1 and sequence[0]==1:
return True
if compare_2:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl)
else:
print("7false")
return False
elif grille[i][j]==0:
if(j==0) and l==1 and sequence[0]==1:
return False
if compare_1:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-2, l-1 ,0, cl)
else:
print("8false")
return False
else:
print("Syntaxe erreur valeur different que -1 0 1")
exit()
def coloriage_possible_colonne(grille, sequence, i, j, l ,cl):
# problem de syntaxe
# cas a: si l depasse le nb d'element de la sequence, inviolement de syntaxe
# cas b, i n'est pas compris entre 0 et N-1, inviolement de syntaxe
# cas c, j < 0 , inviolement de syntaxe
if (len(sequence)<l) or (i<0) or (i>N-1) or(j<0):
return False
# cas 1 : l=0:
# -si j=0, vrai
# -sinon faux
if (l==0):
if (i==0):
return True
print("11false")
return False
else:
print("i")
val=sequence[l-1]
# cas 2a : si j < sl -1
if (i<(sequence[l-1]-1)):
print("22false")
return False
# cas 2b : si j == sl-1
# -si l == 1, vrai
# -sinon faux
elif (i==(sequence[l-1]-1)):
cpt=i
bool=0
while(i>=0):
if grille[i][j]==0 or cl==0:
bool=1
break
i=i-1
if l==1 and bool==0:
print("ABC true")
return True
print("33false")
return False
else:
#cas 2c
return coloriage_possible_colonne_rec(grille, sequence, i, j, l, -1 ,cl)#, case_j ,nb_block)
def coloriage_possible_colonne_rec(grille, sequence, i, j, l, check, cl):#, case_j ,nb_block):
if (l==0) and (i>=-1):
print("ABC true")
return True
if i<0:
print("44false")
return False
# Pour la premiere iteration, on ne sait pas si c'est une case blanche ou noire
print(grille)
if check ==-1:
if cl==0:
compare=compare_block_colonne(grille, i-sequence[l-1], j, sequence[l-1])
else:
compare=compare_block_colonne(grille, i-sequence[l-1]+1, j, sequence[l-1])
print("i, j", i, j,"compare:", compare, "l", l)
if grille[i][j]==-1:
if not (compare):
print("55false")
return False
else:
if(i==0) and l==1 and sequence[0]==1:
return True
print("here i j", i-(sequence[l-1])-(1-cl)-1 ,j)
if (i-(sequence[l-1])-(1-cl)-1<-1):
return coloriage_possible_ligne_rec(grille, sequence, i-(sequence[l-1]) ,j, l-1, 0, cl)
return coloriage_possible_ligne_rec(grille, sequence, i-(sequence[l-1])-(1-cl)-1 ,j, l-1, 0, cl)
elif grille[i][j]==1:
if(i==0) and l==1 and sequence[0]==1:
return True
if compare:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl)
else:
##print("77false")
return False
elif grille[i][j]==0:
return False
else:
print("Syntaxe erreur valeur different que -1 0 1")
exit()
else:
compare_1=compare_block_colonne(grille, i-sequence[l-1], j, sequence[l-1])
compare_2=compare_block_colonne(grille, i-sequence[l-1]+1, j, sequence[l-1])
print("i, j", i, j,"compare1:", compare_1, "l",l)
print("i, j", i, j,"compare2:", compare_2, "l",l)
if grille[i][j]==-1:
if grille[i][j-sequence[l-1]-1]==1 and compare_1:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-2 ,j, l-1, 0, cl)
elif grille[i][j-sequence[l-1]]==1 and compare_2:
if(i==0):
return coloriage_possible_ligne_rec(grille, sequence, i-(sequence[l-1]) ,j, l-1 ,0, cl)
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl)
elif not (compare_1 or compare_2):
print("66false")
return False
else:
if grille[i][j-sequence[l-1]-1]==0:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl)
else:
if (j==0) and sequence[l-1]==1:
print("ABC True")
return True
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl) or coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-2 ,j, l-1, 0, cl)
elif grille[i][j]==1:
if(i==0) and l==1 and sequence[0]==1:
return True
if compare_2:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl)
else:
print("77false")
return False
elif grille[i][j]==0:
if(i==0) and l==1 and sequence[0]==1:
return False
if compare_1:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-2 ,j, l-1 ,0, cl)
else:
print("88false")
return False
else:
print("Syntaxe erreur valeur different que -1 0 1")
exit()
def dupliquer(grille):
grille_d=np.full((M,N), -1)
for i in range(M):
for j in range(N):
grille_d[i][j]=grille[i][j]
return grille_d
def creer_sequence(indice, direction):
init=1
k=0
sequence=[]
#print(s_grille)
if direction==1:
while(k<M):
if(s_grille[indice][k]!=0 or init==1):
sequence.append(s_grille[indice][k])
#print("this",indice, k)
#print(s_grille[indice][k])
init=0
k=k+1
elif direction==2:
while(k<N):
if(s_grille[indice+M][k]!=0 or init==1):
sequence.append(s_grille[indice+M][k])
init=0
k=k+1
return sequence
def coloreLig(grille, i):
sequence=creer_sequence(i,1)# 1 signifie ligne 2 signifie colonne
l=len(sequence)
a=0
somme_sequence=0
while (a<l):
somme_sequence=somme_sequence+sequence[a]
a=a+1
j=N-1
bool=0
print("----------------------",sequence)
while(j>=0 and l>0):
print("i",i, "j", j, "l", l)
resultat_blanc=(coloriage_possible_ligne(grille, sequence, i, j, l, 0))
print("noir")
resultat_noir=(coloriage_possible_ligne(grille, sequence, i, j, l, 1) )
print("resultat_blanc, resultat_noir",resultat_blanc, resultat_noir)
k=j
if resultat_noir==True:
bool=1
if resultat_blanc==False:
s=sequence[l-1]
print(l-1)
while(s>0):
print("in while")
print(sequence)
grille[i][k]=1
k=k-1
s=s-1
del sequence[l-1]
else:
nb=j-1
min=j
max=-1
while(nb>=0):
#print(grille[i][nb], nb)
if grille[i][nb]==1:
if(grille[i][nb]>max):
max=nb
if(grille[i][nb]<min):
min=nb
nb=nb-1
print("max",max)
print("min",min)
l=len(sequence)
print("************l",l,"max-min+1", max-min+1)
print((l==1 and max-min+1==sequence[l-1]))
if not (l==1 and max-min+1==sequence[l-1]):
print("why?")
del sequence[l-1]
print("fin")
if resultat_noir==False and resultat_blanc==False and j==N-1:
print(i, j)
return (False, grille)
j=k-1
l=len(sequence)
if(j<0 and l>0):
del sequence[l-1]
j=M-1
l=len(sequence)
if(bool==1):
return (True,grille)
print("what")
return (False, grille)
return resultat
def coloreCol(grille, j):
sequence=creer_sequence(j,2)# 1 signifie ligne 2 signifie colonne
l=len(sequence)
i=M-1
bool=0
print("----------------------",sequence)
while(i>=0 and l>0):
bool_del=0
print("i",i, "j", j, "l", l)
resultat_blanc=(coloriage_possible_colonne(grille, sequence, i, j, l, 0))
print("noir")
resultat_noir=(coloriage_possible_colonne(grille, sequence, i, j, l, 1) )
print("resultat_blanc, resultat_noir",resultat_blanc, resultat_noir)
print("l=",l)
k=i
if resultat_noir==True:
bool=1
if resultat_blanc==False:
s=sequence[l-1]
k=i
while(s>0):
print("welcome")
grille[k][j]=1
k=k-1
s=s-1
del sequence[l-1]
else:
nb=i-1
min=i
max=-1
while(nb>=0):
#print(grille[i][nb], nb)
if grille[nb][j]==1:
if(grille[nb][j]>max):
max=nb
if(grille[nb][j]<min):
min=nb
nb=nb-1
if not (l==1 and max-min+1==sequence[l-1]):
print("why?")
del sequence[l-1]
#del sequence[-1]
print("fin")
#if resultat_blanc==True:
# if resultat_noir==False:
if resultat_noir==False and resultat_blanc==False and i==M-1:
print(i, j)
return (False, grille)
i=k-1
#l=len(sequence)
#if(i<=0 and l>0):
# if(bool_del!=1):
# del sequence[-1]
#i=M-1"""
l=len(sequence)
if(bool==1):
return (True,grille)
print("what")
return (False, grille)
def coloration(grille):
grille_d=dupliquer(grille)
LigneAVoir=set()
ColonneAVoir=set()
i=M-1
j=N-1
while (i>=0):
LigneAVoir.add(i)
i=i-1
while(j>=0):
ColonneAVoir.add(j)
j=j-1
while ((LigneAVoir!=set())or(ColonneAVoir!=set())):
while (LigneAVoir):
i=LigneAVoir.pop()
(ok,grille_d)=coloreLig(grille_d, i)
if ok==False:
print("hi")
print(grille_d)
return (-1, [[]])#matrice vide!!
Nouveaux=set()
for j in range(N):
if grille_d[i][j]==1:
Nouveaux.add(j)
ColonneAVoir=ColonneAVoir.union(Nouveaux)
while(ColonneAVoir):
j=ColonneAVoir.pop()
(ok,grille_d)=coloreCol(grille_d, j)
if ok==False:
print("hello")
return (False, [[]])#matrice vide!
Nouveaux=set()
for i in range(M):
if grille_d[i][j]==1:
Nouveaux.add(i)
print("-------Nouveaux-------",Nouveaux)
print("-------LigneAVoir-------",LigneAVoir)
LigneAVoir=LigneAVoir.union(Nouveaux)
print("-------LigneAVoir-------",LigneAVoir)
for i in range(M) :
for j in range(N) :
if(grille_d[i][j]!=0 and grille_d[i][j]!=1):
return (0, grille_d)
return (1,grille_d)
#print(coloriage_possible(grille, sequence1, 1, 1, 2))
#print(coloriage_possible(grille, sequence2, 1, 3, 2))
lire_fichier(s_grille)
print(s_grille)
print(coloration(grille))
|
[
"numpy.full",
"sys.exit"
] |
[((92, 118), 'numpy.full', 'np.full', (['(M + N, M + N)', '(0)'], {}), '((M + N, M + N), 0)\n', (99, 118), True, 'import numpy as np\n'), ((148, 167), 'numpy.full', 'np.full', (['(M, N)', '(-1)'], {}), '((M, N), -1)\n', (155, 167), True, 'import numpy as np\n'), ((13363, 13382), 'numpy.full', 'np.full', (['(M, N)', '(-1)'], {}), '((M, N), -1)\n', (13370, 13382), True, 'import numpy as np\n'), ((375, 423), 'sys.exit', 'sys.exit', (['"""ERROR. Can\'t read supplied filename."""'], {}), '("ERROR. Can\'t read supplied filename.")\n', (383, 423), False, 'import sys\n')]
|
#for data cleaning and analysis
import pandas as pd
import numpy as np
from random import randint
#for visualization
import matplotlib.pyplot as plt
import seaborn as sns
#for directory-related functions
import os
import glob
import getpass
#for web-scraping baseball data
import pybaseball as pyb
#for drafting
import math
import random
#for clustering
from sklearn.cluster import MeanShift,estimate_bandwidth
from sklearn.model_selection import train_test_split, cross_validate
#import time to see how long the script runs for
import time
import datetime
from datetime import date, timedelta
#import tkinter to build GUIs
import tkinter as tk
from tkinter import filedialog
#for warnings
import warnings
warnings.filterwarnings("ignore")
#for progress bar
from tqdm import tqdm
#enter forecasting and drafting parameters
def entry():
root = tk.Tk()
root.geometry("400x300")
root.title('Select Forecasting and Drafting Parameters')
label_simulations = tk.Label(root, text='Choose the number of simulations for forecasting')
entry_simulations = tk.Entry(root)
label_num_competitors = tk.Label(root, text='Choose Number of Competitors')
entry_num_competitors = tk.Entry(root)
label_num_rounds = tk.Label(root, text='Choose the number of rounds in the draft')
entry_num_rounds = tk.Entry(root)
label_num_iterations = tk.Label(root, text="Choose the number of iterations for the Draft Agent's Exploration")
entry_num_iterations = tk.Entry(root)
label_simulations.pack()
entry_simulations.pack()
label_num_competitors.pack()
entry_num_competitors.pack()
label_num_rounds.pack()
entry_num_rounds.pack()
label_num_iterations.pack()
entry_num_iterations.pack()
def enter_params():
global simulations
global num_competitors
global num_rounds
global num_iterations
simulations = int(entry_simulations.get())
num_competitors = int(entry_num_competitors.get())
num_rounds = int(entry_num_rounds.get())
num_iterations = int(entry_num_iterations.get())
root.destroy()
def get_params():
global dateStore
dateStore = True
enter_params()
get_params_button = tk.Button(root, text='Submit', command= get_params)
get_params_button.pack()
root.mainloop()
return simulations, num_competitors, num_rounds, num_iterations
#allow the user to select a date range
def get_dates() :
root = tk.Tk()
root.geometry("400x300")
root.title('Select Start and End time')
label_start = tk.Label(root, text='Start Year: YYYY')
entry_start = tk.Entry(root)
label_end = tk.Label(root, text='End Year: YYYY')
entry_end = tk.Entry(root)
label_start.pack()
entry_start.pack()
label_end.pack()
entry_end.pack()
def enter_year():
global start_time
global end_time
start_time = datetime.datetime.strptime(entry_start.get(),'%Y')
end_time =datetime.datetime.strptime(entry_end.get(),'%Y')
root.destroy()
def get_year():
global dateStore
dateStore = True
enter_year()
get_year_button = tk.Button(root, text='Submit', command= get_year)
get_year_button.pack()
root.mainloop()
#get range of years
date_range = pd.date_range(start=start_time, end = end_time,freq='D')
#create dictionary to store years
years = {str(date.year) : date.year for date in date_range}
return years
#make a dictionary with a dataframe for each season for hitters, pitchers, and teams
def make_period_dicts(dictionary):
batter_df = {dic:pyb.batting_stats(int(dic), qual = False) for dic in dictionary.keys()}
pitcher_df = {dic:pyb.pitching_stats(int(dic), qual = False) for dic in dictionary.keys()}
return batter_df , pitcher_df
#forecaster class
class Forecaster:
def __init__(self, simulations, num_competitors, num_rounds, num_iterations,years):
self.user = getpass.getuser()
self.today = date.today().strftime("%m_%d_%y")
self.simulations = simulations
self.num_competitors = num_competitors
self.num_rounds = num_rounds
self.num_iterations = num_iterations
self.years = years
print('Downloading Data')
print('')
self.seasons_dict_batter, self.seasons_dict_pitcher = make_period_dicts(self.years)
#perform monte carlo full season forecast
def monte_carlo_forecast(self):
print('Constructing the Database')
print('')
#merge the frames together
def merge_dict(dfDict, onCols, how='outer', naFill=None):
keys = list(dfDict.keys())
for i in range(len(keys)):
key = keys[i]
df0 = dfDict[key]
cols = list(df0.columns)
valueCols = list(filter(lambda x: x not in (onCols), cols))
df0 = df0[onCols + valueCols]
df0.columns = onCols + [(s + '_' + key) for s in valueCols]
if (i == 0):
outDf = df0
else:
outDf = pd.merge(outDf, df0, how=how, on=onCols)
if (naFill != None):
outDf = outDf.fillna(naFill)
return(outDf)
#get the column names
def get_column_names(dictionary):
key_list = list(dictionary.keys())
columns_list = list(dictionary[key_list[0]].columns)
return columns_list
self.pitcher_columns_list, self.batter_columns_list = get_column_names(self.seasons_dict_pitcher), get_column_names(self.seasons_dict_batter)
#merge the seasons together
def merge_season_dicts():
self.merged_batter_seasons_dict = merge_dict(self.seasons_dict_batter, self.batter_columns_list, how = 'outer', naFill = None)
self.merged_pitcher_seasons_dict = merge_dict(self.seasons_dict_pitcher, self.pitcher_columns_list, how = 'outer', naFill = None)
return self.merged_batter_seasons_dict, self.merged_pitcher_seasons_dict
merge_season_dicts()
#make a dataframe for each hitter
def make_player_dicts(dataframe):
df = {name : dataframe[dataframe['Name']==name] for name in dataframe['Name']}
return df
self.batter_dict, self.pitcher_dict = make_player_dicts(self.merged_batter_seasons_dict), make_player_dicts(self.merged_pitcher_seasons_dict)
#get the current year
def get_year_names(dictionary):
keys_list = list(dictionary.keys())
return keys_list
self.years_list = get_year_names(self.years)
self.current_year = self.years_list[-1]
#get only the players who played in the current year
def filter_for_current_players(dictionary, year):
current_dict = {name : dictionary[name] for name in dictionary.keys() if dictionary[name]['Season'].values[-1]==int(year)}
return current_dict
self.current_pitcher_dict, self.current_batter_dict = filter_for_current_players(self.pitcher_dict, self.current_year), filter_for_current_players(self.batter_dict, self.current_year)
#raw stats for batters and pitchers
def stats():
batter_stats = ['1B', '2B','3B', 'HR','R','RBI','BB','SO','SB', 'IDfg']
pitcher_stats = ['W', 'IP', 'ER', 'SO', 'BB', 'SV', 'HLD', 'IDfg']
return batter_stats, pitcher_stats
self.batter_stats, self.pitcher_stats = stats()
#filter by these stats
def filter_for_current_stats(dictionary, stats):
current_dict = {name:dictionary[name][stats] for name in dictionary.keys()}
return current_dict
self.current_stat_batter, self.current_stat_pitcher = filter_for_current_stats(self.current_batter_dict, self.batter_stats), filter_for_current_stats(self.current_pitcher_dict, self.pitcher_stats)
#team names and their abbreviations
def teams_abbreviatons():
team_list = ['Diamondbacks-ARI', 'Braves-ATL', 'Orioles-BAL', 'Red Sox-BOS', 'Cubs-CHC',
'White Sox-CHW', 'Reds-CIN' , 'Indians-CLE' , 'Rockies-COL', 'Tigers-DET' ,
'Marlins-MIA' ,'Astros-HOU' ,'Royals-KCR' ,'Angels-LAA','Dodgers-LAD',
'Brewers-MIL' ,'Twins-MIN','Mets-NYM','Yankees-NYY','Athletics-OAK','Phillies-PHI',
'Pirates-PIT' ,'Padres-SDP' ,'Giants-SFG','Mariners-SEA', 'Cardinals-STL',
'Rays-TB' ,'Rangers-TEX' ,'Blue Jays-TOR' ,'Nationals-WSN']
return team_list
self.team_list = teams_abbreviatons()
#split the team names
def split_names(team_list) :
split_list = [team.split('-') for team in team_list]
return split_list
self.split_teams = split_names(self.team_list)
#create dict for team names
def create_dict(team_list):
teams_dict = {team[1]: team[0] for team in team_list}
return teams_dict
self.teams_dict = create_dict(self.split_teams)
#get a list of the teams
def get_team_name_lists(team_list):
team_list_full = [team.split('-')[0] for team in team_list]
team_list_abv = [team.split('-')[1] for team in team_list]
return team_list_full, team_list_abv
self.team_list_full, self.team_list_abv = get_team_name_lists(self.team_list)
#get all the schedules
def get_schedules(team_list_abv, years_list, team_list_full):
season_list = []
season_list = [{team_list_ful: {year_list:pyb.schedule_and_record(int(year_list), team_list_ab)}} for year_list in years_list for team_list_ab, team_list_ful in zip(team_list_abv, team_list_full)]
return season_list
self.season_list = get_schedules(self.team_list_abv, self.years_list, self.team_list_full)
#drop pitchers from the hitters list
def drop_pitchers(current_stat_batter, current_stat_pitcher):
for key in current_stat_pitcher.keys():
if key in current_stat_batter.keys() and key in current_stat_pitcher.keys():
del current_stat_batter[key]
return current_stat_batter
self.current_stat_batter = drop_pitchers(self.current_stat_batter, self.current_stat_pitcher)
#create averages for each player for each stat
def player_averager(dictionary):
average_players ={}
for key in dictionary.keys():
average_players.update({key : dictionary[key].mean().round().to_frame().transpose()})
average_players[key] = average_players[key].reset_index()
average_players[key].rename(columns = {'index': 'Name'}, inplace = True)
average_players[key]['Name']= key
return average_players
self.average_batters, self.average_pitchers = player_averager(self.current_stat_batter), player_averager(self.current_stat_pitcher)
#columns to merge on
def merge_columns(average_batters, average_pitchers):
#return list(average_batters['<NAME>'].columns), list(average_pitchers['<NAME>'].columns)
return list(average_batters[list(average_batters.keys())[0]].columns), list(average_pitchers[list(average_pitchers.keys())[0]].columns)
self.batter_columns, self.pitcher_columns = merge_columns(self.average_batters, self.average_pitchers)
#merge the average players to create the clusters
def average_merger(average_batters, batter_columns,average_pitchers, pitcher_columns):
return merge_dict(average_batters, batter_columns, how = 'outer', naFill = None), merge_dict(average_pitchers, pitcher_columns, how = 'outer', naFill = None)
self.merged_batter_df, self.merged_pitcher_df = average_merger(self.average_batters, self.batter_columns, self.average_pitchers, self.pitcher_columns)
#separate starting and relief pitchers and account for overlap
def separate_pitchers(merged_pitcher_df):
starting_pitchers = merged_pitcher_df[(merged_pitcher_df['SV'] ==0) &(merged_pitcher_df['HLD'] ==0) | (merged_pitcher_df['Name']=='<NAME>') | (merged_pitcher_df['Name']=='<NAME>')]
relief_pitchers = merged_pitcher_df[(merged_pitcher_df['SV'] >=1) & (merged_pitcher_df['SV'] <8) | (merged_pitcher_df['HLD']> 0) & (merged_pitcher_df['Name'] !='<NAME>') & (merged_pitcher_df['Name']!='<NAME>')]
closers = merged_pitcher_df[(merged_pitcher_df['SV'] >10) & (merged_pitcher_df['HLD'] >= 0) & (merged_pitcher_df['Name'] !='<NAME>') & (merged_pitcher_df['Name']!='<NAME>')]
return starting_pitchers, relief_pitchers, closers
self.starting_pitchers, self.relief_pitchers, self.closers = separate_pitchers(self.merged_pitcher_df)
#cluster players to obtain a prior distrbution for each stat
print('Clustering Players')
print('')
def mean_shift(data,quantile) :
#split the data
train = data.drop('Name', axis =1)
if 'Cluster Label' in train.columns:
train = data.drop(['Name', 'Cluster Label', 'IDfg'], axis =1)
else:
pass
#estimate the bandwith
bandwidth = estimate_bandwidth(train, quantile=quantile, n_samples=100000)
#instantiate the mean shift clustering object
ms = MeanShift(bandwidth = bandwidth, bin_seeding = True, cluster_all =True, n_jobs = None )
#fit the model to the training data
ms.fit(train)
#grab the cluster labels and centers
labels = ms.labels_
cluster_centers = ms.cluster_centers_
#find the number of unique labels
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
#find the clusters
cluster_finder = data
cluster_finder['Cluster Label'] = labels
#create the clusters
clusters = [cluster_finder[cluster_finder['Cluster Label']==label] for label in labels_unique]
#extract the summary statistics
cluster_describers = [cluster.describe() for cluster in clusters]
return cluster_finder, clusters, cluster_describers
self.cluster_finder_batter, self.clusters_batter, self.cluster_describers_batter = mean_shift(self.merged_batter_df,0.16)
self.cluster_finder_starting_pitcher, self.clusters_starting_pitcher, self.cluster_describers_starting_pitcher = mean_shift(self.starting_pitchers, 0.18)
self.cluster_finder_relief_pitcher, self.clusters_relief_pitcher, self.cluster_describers_relief_pitcher = mean_shift(self.relief_pitchers, 0.2)
self.cluster_finder_closer, self.clusters_closer, self.cluster_describer_closer = mean_shift(self.closers, 0.19)
#match the pitcher subsets properly
def subset_pitchers(dictionary, dataframe):
for key in dictionary.keys():
dictionary = {key: dictionary[key] for key in dataframe['Name']}
return dictionary
self.current_stat_starting = subset_pitchers(self.current_stat_pitcher, self.cluster_finder_starting_pitcher)
self.current_stat_relief = subset_pitchers(self.current_stat_pitcher, self.cluster_finder_relief_pitcher)
self.current_stat_closer = subset_pitchers(self.current_stat_pitcher, self.cluster_finder_closer)
#use the clusters to make distributions for rookies
#also use clusters for a similarity comparison for non-rookies
def player_matcher(dictionary,dataframe,columns):
for key in dictionary.keys() :
label = int(dataframe[dataframe['Name'] == key]['Cluster Label'])
dictionary[key].loc[key] = dataframe[dataframe['Cluster Label']==label][columns[1:]].mean().round()
return dictionary
self.full_batters = player_matcher(self.current_stat_batter, self.cluster_finder_batter,self.batter_columns)
self.full_starters = player_matcher(self.current_stat_starting, self.cluster_finder_starting_pitcher,self.pitcher_columns)
self.full_relievers = player_matcher(self.current_stat_relief, self.cluster_finder_relief_pitcher,self.pitcher_columns)
self.full_closers = player_matcher(self.current_stat_closer, self.cluster_finder_closer,self.pitcher_columns)
#sample over the player distributions
def sample_averager(dictionary,simulations):
sample_players = {}
sample_players_risk = {}
for key in tqdm(dictionary.keys()):
if len(dictionary[key]) > 1 :
for column in dictionary[key]:
if column == 'IDfg':
dictionary[key]= dictionary[key].replace([np.inf, -np.inf], np.nan).fillna(0) #if not needed, remove
randomizer = 0
means = 0
stdevs = 0
dictionary[key].loc[key + ' Mean',column] = means
dictionary[key].loc[key + ' Risk',column] = stdevs
sample_players.update({key : dictionary[key].loc[key + ' Mean'].to_frame().transpose()})
sample_players_risk.update({key: dictionary[key].loc[key + ' Risk'].to_frame().transpose()})
sample_players_risk[key]= sample_players_risk[key].replace([np.inf, -np.inf], np.nan).fillna(0) #if not needed, remove
sample_players[key] = sample_players[key].reset_index()
sample_players[key].rename(columns = {'index': 'Name'}, inplace = True)
sample_players_risk[key] = sample_players_risk[key].reset_index()
sample_players_risk[key].rename(columns = {'index': 'Name'}, inplace = True)
else:
dictionary[key]= dictionary[key].replace([np.inf, -np.inf], np.nan).fillna(0) #if not needed, remove
randomizer = [randint(int(0.9*np.mean(dictionary[key][column])) + int(0.1*min(dictionary[key][column])), int(0.1*np.mean(dictionary[key][column]))+ int(0.9*max(dictionary[key][column]))) for i in range(simulations)]
means = np.mean(randomizer)
stdevs = np.std(randomizer)
dictionary[key].loc[key + ' Mean',column] = means
dictionary[key].loc[key + ' Risk',column] = stdevs
sample_players.update({key : dictionary[key].loc[key + ' Mean'].to_frame().transpose()})
sample_players_risk.update({key: dictionary[key].loc[key + ' Risk'].to_frame().transpose()})
sample_players_risk[key]= sample_players_risk[key].replace([np.inf, -np.inf], np.nan).fillna(0) #if not needed, remove
sample_players[key] = sample_players[key].reset_index()
sample_players[key].rename(columns = {'index': 'Name'}, inplace = True)
sample_players_risk[key] = sample_players_risk[key].reset_index()
sample_players_risk[key].rename(columns = {'index': 'Name'}, inplace = True)
return sample_players, sample_players_risk
self.sample_batters, self.sample_batters_risk = sample_averager(self.full_batters, self.simulations)
self.sample_starting_pitchers, self.sample_starting_pitchers_risk = sample_averager(self.full_starters, self.simulations)
self.sample_relief_pitchers, self.sample_relief_pitchers_risk = sample_averager(self.full_relievers, self.simulations)
self.sample_closers, self.sample_closers_risk = sample_averager(self.full_closers, self.simulations)
#get the dataframes for fantasy points
#for point totals
self.sample_batters = merge_dict(self.sample_batters, self.batter_columns)
self.sample_starting_pitchers = merge_dict(self.sample_starting_pitchers, self.pitcher_columns)
self.sample_relief_pitchers = merge_dict(self.sample_relief_pitchers, self.pitcher_columns)
self.sample_closers = merge_dict(self.sample_closers, self.pitcher_columns)
#for risk
self.sample_batters_risk = merge_dict(self.sample_batters_risk, self.batter_columns)
self.sample_starting_pitchers_risk = merge_dict(self.sample_starting_pitchers_risk, self.pitcher_columns)
self.sample_relief_pitchers_risk = merge_dict(self.sample_relief_pitchers_risk, self.pitcher_columns)
self.sample_closers_risk = merge_dict(self.sample_closers_risk, self.pitcher_columns)
#calculate fantasy points for batters
def fantasy_batter_points(dataframe):
dataframe['Fantasy Total'] = 2*dataframe['1B'] + 4*dataframe['2B'] + 6*dataframe['3B'] + 8*dataframe['HR']+ 1*dataframe['BB'] + 1*dataframe['R']+ 1.5*dataframe['RBI'] -0.5*dataframe['SO'] + 3*dataframe['SB']
return dataframe
#for points
self.sample_batters = fantasy_batter_points(self.sample_batters)
#for risk
self.sample_batters_risk = fantasy_batter_points(self.sample_batters_risk)
#calculate fantasy points for pitchers
def fantasy_pitcher_points(dataframe):
dataframe['Fantasy Total'] = 0.99*dataframe['IP'] -3*dataframe['ER'] + 4*dataframe['W'] + 3*dataframe['SV']+ 3*dataframe['SO'] + 2*dataframe['HLD']
return dataframe
#for points
self.sample_starting_pitchers = fantasy_pitcher_points(self.sample_starting_pitchers)
self.sample_relief_pitchers = fantasy_pitcher_points(self.sample_relief_pitchers)
self.sample_closers = fantasy_pitcher_points(self.sample_closers)
#for risk
self.sample_starting_pitchers_risk = fantasy_pitcher_points(self.sample_starting_pitchers_risk)
self.sample_relief_pitchers_risk = fantasy_pitcher_points(self.sample_relief_pitchers_risk)
self.sample_closers_risk = fantasy_pitcher_points(self.sample_closers_risk)
print('')
print('Simulation Complete')
print('')
#naive risk adjusted scores
def risk_adjusted_scores(points, risk):
#get risk adjusted scores
risk_adjusted_score = []
for score, risk in zip(points['Fantasy Total'], risk['Fantasy Total']):
risk_adjusted_score.append(0.75*score - 0.25*risk)
#make new dataframe
risk_adjusted = pd.DataFrame({'IDfg': points['IDfg'],'Name':points['Name'].apply(lambda x : x.replace(' Mean', '')), 'Risk Adjusted Score': risk_adjusted_score})
return risk_adjusted
#hitters
self.risk_adjusted_batters = risk_adjusted_scores(self.sample_batters, self.sample_batters_risk)
self.risk_adjusted_batters['IDfg'] = self.merged_batter_df['IDfg']
#pitchers
self.risk_adjusted_starters = risk_adjusted_scores(self.sample_starting_pitchers, self.sample_starting_pitchers_risk)
self.risk_adjusted_relief = risk_adjusted_scores(self.sample_relief_pitchers, self.sample_relief_pitchers_risk)
self.risk_adjusted_closers = risk_adjusted_scores(self.sample_closers, self.sample_closers_risk)
#add positions
def add_fielding_positions(start_time, end_time, players):
#chadwick register for players who played in most recent season of analysis
#this will be used for cross-referencing player IDs
chadwick_register = pyb.chadwick_register()
#lahman database to grab positions
lahman = pyb.lahman.fielding()
lahman['key_bbref'] = lahman['playerID']
lahman = lahman.drop(columns = ['playerID'])
lahman = lahman.drop_duplicates('key_bbref')
#merge frames
merged = pd.merge(lahman,chadwick_register, on = 'key_bbref', how = 'outer')
merged = merged[['yearID','key_bbref', 'teamID','POS', 'key_fangraphs', 'name_first', 'name_last']]
merged['IDfg'] = merged['key_fangraphs']
merged.drop(columns = ['key_fangraphs'], inplace = True) #drop missing players for now, which is very few
#merge with player positions
players = pd.merge(players, merged, on = 'IDfg', how = 'left')
players = players[['Name', 'POS', 'Risk Adjusted Score']]
players.dropna(inplace = True)
#fix Ohtani
#we will find a way to add his pitching stats
def ohtani(x):
if x == 'P':
return 'SP'
else:
return x
players['POS'] = players['POS'].apply(lambda x : ohtani(x))
return players
self.risk_adjusted_batters = add_fielding_positions(start_time, end_time, self.risk_adjusted_batters)
#add pitcher positions
def add_pitching_positions(starters, relievers, closers):
#naive criteria to separate into RP and SP
starters['POS'] = ['SP' for i in range(0,len(starters))]
relievers['POS'] = ['RP' for i in range(0,len(relievers))]
closers['POS'] = ['RP' for i in range(0,len(closers))]
return starters, relievers, closers
self.risk_adjusted_starters, self.risk_adjusted_relief, self.risk_adjusted_closers = add_pitching_positions(self.risk_adjusted_starters, self.risk_adjusted_relief, self.risk_adjusted_closers)
#change IDs
def id_changer(players):
players['IDfg'] = [i for i in range(len(players))]
players = players[['IDfg', 'Name', 'POS', 'Risk Adjusted Score']]
return players
self.risk_adjusted_starters = id_changer(self.risk_adjusted_starters)
self.risk_adjusted_relief = id_changer(self.risk_adjusted_relief)
self.risk_adjusted_closers = id_changer(self.risk_adjusted_closers)
self.risk_adjusted_batters = id_changer(self.risk_adjusted_batters)
#combine all players
def combine_all_players(batters, starters, relievers, closers):
players = batters.append(starters)
players = players.append(closers)
players = players.append(relievers)
return players
self.all_players = combine_all_players(self.risk_adjusted_batters, self.risk_adjusted_starters, self.risk_adjusted_relief, self.risk_adjusted_closers)
#Adapt Drafting Technique from : https://github.com/ykeuter/ffl/blob/master/notebooks/mcts.ipynb
#create the draft state so we know who has been taken and who is available and who's turn it is
class DraftState:
def __init__(self, rosters, turns, freeagents, playerjm=None):
self.rosters = rosters
self.turns = turns
self.freeagents = freeagents
self.playerJustMoved = playerjm
#create weights the so algorithm can choose relative to which positions they have chosen from
def GetResult(self, playerjm):
""" Get the game result from the viewpoint of playerjm.
"""
if playerjm is None: return 0
pos_wgts = {
("SP"): [.9, .9, .9 ,.6, .6 ,.6],
("RP"): [.8, .7, .6, .5,.5],
("C"): [.6,.4],
("1B"): [.7,],
("2B"): [.7, .6],
("SS"): [.7, .6],
("3B"): [.7, .6],
("1B", "2B", "3B", "SS", "OF", "C"): [.6],
("1B", "2B", "3B", "SS"): [.6],
("OF"): [.7,.7,.7,.5,.4],
}
result = 0
# map the drafted players to the weights
for p in self.rosters[playerjm]:
max_wgt, _, max_pos, old_wgts = max(
((wgts[0], -len(lineup_pos), lineup_pos, wgts) for lineup_pos, wgts in pos_wgts.items()
if p.position in lineup_pos),
default=(0, 0, (), []))
if max_wgt > 0:
result += max_wgt * p.points
old_wgts.pop(0)
if not old_wgts:
pos_wgts.pop(max_pos)
# map the remaining weights to the top three free agents
for pos, wgts in pos_wgts.items():
result += np.mean([p.points for p in self.freeagents if p.position in pos][:3]) * sum(wgts)
return result
#possible moves for each state, given the position
def GetMoves(self):
""" Get all possible moves from this state.
"""
pos_max = {"SP": 6, "RP": 5, "1B": 1, "C":2, "2B":2 , "3B":2, "SS": 2, "OF":5}
if len(self.turns) == 0: return []
roster_positions = np.array([p.position for p in self.rosters[self.turns[0]]], dtype=str)
moves = [pos for pos, max_ in pos_max.items() if np.sum(roster_positions == pos) < max_]
return moves
#update states after each move
def DoMove(self, move):
""" Update a state by carrying out the given move.
Must update playerJustMoved.
"""
player = next(p for p in self.freeagents if p.position == move)
self.freeagents.remove(player)
rosterId = self.turns.pop(0)
self.rosters[rosterId].append(player)
self.playerJustMoved = rosterId
def Clone(self):
""" Create a deep clone of this game state.
"""
rosters = list(map(lambda r: r[:], self.rosters))
st = DraftState(rosters, self.turns[:], self.freeagents[:],
self.playerJustMoved)
return st
#create a player object with relevant attributes
class MLBPlayer:
def __init__(self, name, position, points):
self.name = name
self.position = position
self.points = points
def __repr__(self):
return "|".join([self.name, self.position, str(self.points)])
# This is a very simple implementation of the uct Monte Carlo Tree Search algorithm in Python 2.7.
# The function uct(rootstate, itermax, verbose = False) is towards the bottom of the code.
# It aims to have the clearest and simplest possible code, and for the sake of clarity, the code
# is orders of magnitude less efficient than it could be made, particularly by using a
# state.GetRandomMove() or state.DoRandomRollout() function.
#
# Written by <NAME>, <NAME>, <NAME> (University of York, UK) September 2012.
#
# Licence is granted to freely use and distribute for any sensible/legal purpose so long as this comment
# remains in any distributed code.
#
# For more information about Monte Carlo Tree Search check out our web site at www.mcts.ai
class Node:
""" A node in the game tree. Note wins is always from the viewpoint of playerJustMoved.
Crashes if state not specified.
"""
def __init__(self, move = None, parent = None, state = None):
self.move = move # the move that got us to this node - "None" for the root node
self.parentNode = parent # "None" for the root node
self.childNodes = []
self.wins = 0
self.visits = 0
self.untriedMoves = state.GetMoves() # future child nodes
self.playerJustMoved = state.playerJustMoved # the only part of the state that the Node needs later
def uctSelectChild(self):
""" Use the UCB1 formula to select a child node. Often a constant uctK is applied so we have
lambda c: c.wins/c.visits + uctK * sqrt(2*log(self.visits)/c.visits to vary the amount of
exploration versus exploitation.
"""
uctK = 1000 #200 #2000 #100 #20000
s = sorted(self.childNodes, key = lambda c: c.wins/c.visits + uctK * math.sqrt(2*math.log(self.visits)/c.visits))[-1]
return s
def AddChild(self, m, s):
""" Remove m from untriedMoves and add a new child node for this move.
Return the added child node
"""
n = Node(move = m, parent = self, state = s)
self.untriedMoves.remove(m)
self.childNodes.append(n)
return n
def Update(self, result):
""" Update this node - one additional visit and result additional wins. result must be from the viewpoint of playerJustmoved.
"""
self.visits += 1
self.wins += result
def uct(rootstate, itermax, verbose = False):
""" Conduct a uct search for itermax iterations starting from rootstate.
Return the best move from the rootstate.
"""
rootnode = Node(state = rootstate)
for i in range(itermax):
node = rootnode
state = rootstate.Clone()
# Select
while node.untriedMoves == [] and node.childNodes != []: # node is fully expanded and non-terminal
node = node.uctSelectChild()
state.DoMove(node.move)
# Expand
if node.untriedMoves != []: # if we can expand (i.e. state/node is non-terminal)
m = random.choice(node.untriedMoves)
state.DoMove(m)
node = node.AddChild(m,state) # add child and descend tree
# Rollout - this can often be made orders of magnitude quicker using a state.GetRandomMove() function
while state.GetMoves() != []: # while state is non-terminal
state.DoMove(random.choice(state.GetMoves()))
# Backpropagate
while node != None: # backpropagate from the expanded node and work back to the root node
node.Update(state.GetResult(node.playerJustMoved)) # state is terminal. Update node with result from POV of node.playerJustMoved
node = node.parentNode
return sorted(rootnode.childNodes, key = lambda c: c.visits)[-1].move # return the move that was most visited
class Drafter(DraftState):
def __init__(self, forecaster, draftstate = DraftState, mlbplayer = MLBPlayer, uct = uct):
self.forecaster = forecaster
self.draftstate = draftstate
self.mlbplayer = mlbplayer
self.uct = uct
#prepare the draft
def prepare_draft(self) :
#create position weights for drafting importance
self.draftstsate.GetResult = self.draftstate.GetResult()
#assign possible moves for each player at each state
self.draftstsate.GetMoves = self.draftstate.GetMoves()
#update states of the draft after each move
self.draftstsate.DoMove = self.draftstate.DoMove()
#create a deep clone of this game state
self.draftstsate.Clone = self.draftstate.Clone()
return self.draftstsate
#simulate a fantasy faseball draft
def draft(self) :
print('')
print('Drafting')
print('')
#import projections
self.forecaster.all_players.set_index('IDfg', inplace = True)
self.forecaster.mlb_players = self.forecaster.all_players
freeagents = [self.mlbplayer(*p) for p in self.forecaster.mlb_players.itertuples(index=False, name=None)]
#create draft competitors
num_competitors = self.forecaster.num_competitors
rosters = [[] for _ in range(num_competitors)] # empty rosters to start with
#create number of rounds and turns
num_rounds = self.forecaster.num_rounds
turns = []
# generate turns by snake order
for i in range(num_rounds):
turns += reversed(range(num_competitors)) if i % 2 else range(num_competitors)
#create draft states
state = self.draftstate(rosters, turns, freeagents)
iterations = self.forecaster.num_iterations
while state.GetMoves() != []:
move = self.uct(state, iterations)
print(move, end=".")
state.DoMove(move)
print('')
print('Draft Complete')
#draft results
self.draft_results = pd.DataFrame({"Team " + str(i + 1): r for i, r in enumerate(state.rosters)})
return self.draft_results
#convert the dataframes to excel sheets
def excel_converter(self):
#excel file
writer = pd.ExcelWriter(f'C:\\Users\\{self.forecaster.user}\\Downloads\\{end_time.year +1}_Projections_{self.forecaster.today}.xlsx')
#Drafting
self.draft_results.to_excel(writer, sheet_name = 'Mock Draft',index = False)
#full list
self.forecaster.all_players.to_excel(writer, sheet_name = 'All Players',index = False)
#risk-adjusted
self.forecaster.risk_adjusted_batters.to_excel(writer, sheet_name = 'Risk Adjusted Batters',index = False)
self.forecaster.risk_adjusted_starters.to_excel(writer, sheet_name = 'Risk Adjusted Starters',index = False)
self.forecaster.risk_adjusted_relief.to_excel(writer, sheet_name = 'Risk Adjusted Relief',index = False)
self.forecaster.risk_adjusted_closers.to_excel(writer, sheet_name = 'Risk Adjusted Closers',index = False)
#points
self.forecaster.sample_batters.drop(columns = ['IDfg'], inplace = True)
self.forecaster.sample_batters.to_excel(writer, sheet_name='Batters Projection',index = False)
self.forecaster.sample_starting_pitchers.drop(columns = ['IDfg'], inplace = True)
self.forecaster.sample_starting_pitchers.to_excel(writer, sheet_name='Starters Projection',index = False)
self.forecaster.sample_relief_pitchers.drop(columns = ['IDfg'], inplace = True)
self.forecaster.sample_relief_pitchers.to_excel(writer, sheet_name='Relievers Projection',index = False)
self.forecaster.sample_closers.drop(columns = ['IDfg'], inplace = True)
self.forecaster.sample_closers.to_excel(writer, sheet_name='Closers Projection',index = False)
#risk
self.forecaster.sample_batters_risk.drop(columns = ['IDfg'], inplace = True)
self.forecaster.sample_batters_risk.to_excel(writer, sheet_name='Batters Risk',index = False)
self.forecaster.sample_starting_pitchers_risk.drop(columns = ['IDfg'], inplace = True)
self.forecaster.sample_starting_pitchers_risk.to_excel(writer, sheet_name='Starters Risk',index = False)
self.forecaster.sample_relief_pitchers_risk.drop(columns = ['IDfg'], inplace = True)
self.forecaster.sample_relief_pitchers_risk.to_excel(writer, sheet_name='Relievers Risk',index = False)
self.forecaster.sample_closers_risk.drop(columns = ['IDfg'], inplace = True)
self.forecaster.sample_closers_risk.to_excel(writer, sheet_name='Closers Risk',index = False)
#clusters
self.forecaster.cluster_finder_batter.to_excel(writer, sheet_name = 'Batter Clusters',index = False)
self.forecaster.cluster_finder_starting_pitcher.to_excel(writer, sheet_name = 'Starting Clusters',index = False)
self.forecaster.cluster_finder_relief_pitcher.to_excel(writer, sheet_name = 'Relief Clusters',index = False)
self.forecaster.cluster_finder_closer.to_excel(writer, sheet_name = 'Closer Clusters',index = False)
#save file
writer.save()
return self.forecaster
#call the excel converter
def call_converter(self):
return self.excel_converter()
|
[
"tkinter.Button",
"math.log",
"numpy.array",
"tkinter.Label",
"getpass.getuser",
"sklearn.cluster.MeanShift",
"pandas.ExcelWriter",
"pandas.date_range",
"numpy.mean",
"tkinter.Entry",
"random.choice",
"pybaseball.chadwick_register",
"pandas.merge",
"numpy.std",
"datetime.date.today",
"warnings.filterwarnings",
"pybaseball.lahman.fielding",
"numpy.unique",
"sklearn.cluster.estimate_bandwidth",
"numpy.sum",
"tkinter.Tk"
] |
[((718, 751), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (741, 751), False, 'import warnings\n'), ((861, 868), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (866, 868), True, 'import tkinter as tk\n'), ((983, 1054), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""Choose the number of simulations for forecasting"""'}), "(root, text='Choose the number of simulations for forecasting')\n", (991, 1054), True, 'import tkinter as tk\n'), ((1079, 1093), 'tkinter.Entry', 'tk.Entry', (['root'], {}), '(root)\n', (1087, 1093), True, 'import tkinter as tk\n'), ((1122, 1173), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""Choose Number of Competitors"""'}), "(root, text='Choose Number of Competitors')\n", (1130, 1173), True, 'import tkinter as tk\n'), ((1202, 1216), 'tkinter.Entry', 'tk.Entry', (['root'], {}), '(root)\n', (1210, 1216), True, 'import tkinter as tk\n'), ((1240, 1303), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""Choose the number of rounds in the draft"""'}), "(root, text='Choose the number of rounds in the draft')\n", (1248, 1303), True, 'import tkinter as tk\n'), ((1327, 1341), 'tkinter.Entry', 'tk.Entry', (['root'], {}), '(root)\n', (1335, 1341), True, 'import tkinter as tk\n'), ((1369, 1462), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""Choose the number of iterations for the Draft Agent\'s Exploration"""'}), '(root, text=\n "Choose the number of iterations for the Draft Agent\'s Exploration")\n', (1377, 1462), True, 'import tkinter as tk\n'), ((1485, 1499), 'tkinter.Entry', 'tk.Entry', (['root'], {}), '(root)\n', (1493, 1499), True, 'import tkinter as tk\n'), ((2243, 2293), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Submit"""', 'command': 'get_params'}), "(root, text='Submit', command=get_params)\n", (2252, 2293), True, 'import tkinter as tk\n'), ((2498, 2505), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (2503, 2505), True, 'import tkinter as tk\n'), ((2597, 2636), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""Start Year: YYYY"""'}), "(root, text='Start Year: YYYY')\n", (2605, 2636), True, 'import tkinter as tk\n'), ((2655, 2669), 'tkinter.Entry', 'tk.Entry', (['root'], {}), '(root)\n', (2663, 2669), True, 'import tkinter as tk\n'), ((2686, 2723), 'tkinter.Label', 'tk.Label', (['root'], {'text': '"""End Year: YYYY"""'}), "(root, text='End Year: YYYY')\n", (2694, 2723), True, 'import tkinter as tk\n'), ((2740, 2754), 'tkinter.Entry', 'tk.Entry', (['root'], {}), '(root)\n', (2748, 2754), True, 'import tkinter as tk\n'), ((3193, 3241), 'tkinter.Button', 'tk.Button', (['root'], {'text': '"""Submit"""', 'command': 'get_year'}), "(root, text='Submit', command=get_year)\n", (3202, 3241), True, 'import tkinter as tk\n'), ((3344, 3399), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_time', 'end': 'end_time', 'freq': '"""D"""'}), "(start=start_time, end=end_time, freq='D')\n", (3357, 3399), True, 'import pandas as pd\n'), ((4023, 4040), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (4038, 4040), False, 'import getpass\n'), ((29350, 29420), 'numpy.array', 'np.array', (['[p.position for p in self.rosters[self.turns[0]]]'], {'dtype': 'str'}), '([p.position for p in self.rosters[self.turns[0]]], dtype=str)\n', (29358, 29420), True, 'import numpy as np\n'), ((36721, 36856), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['f"""C:\\\\Users\\\\{self.forecaster.user}\\\\Downloads\\\\{end_time.year + 1}_Projections_{self.forecaster.today}.xlsx"""'], {}), "(\n f'C:\\\\Users\\\\{self.forecaster.user}\\\\Downloads\\\\{end_time.year + 1}_Projections_{self.forecaster.today}.xlsx'\n )\n", (36735, 36856), True, 'import pandas as pd\n'), ((13438, 13500), 'sklearn.cluster.estimate_bandwidth', 'estimate_bandwidth', (['train'], {'quantile': 'quantile', 'n_samples': '(100000)'}), '(train, quantile=quantile, n_samples=100000)\n', (13456, 13500), False, 'from sklearn.cluster import MeanShift, estimate_bandwidth\n'), ((13577, 13656), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bandwidth': 'bandwidth', 'bin_seeding': '(True)', 'cluster_all': '(True)', 'n_jobs': 'None'}), '(bandwidth=bandwidth, bin_seeding=True, cluster_all=True, n_jobs=None)\n', (13586, 13656), False, 'from sklearn.cluster import MeanShift, estimate_bandwidth\n'), ((13984, 14001), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (13993, 14001), True, 'import numpy as np\n'), ((24062, 24085), 'pybaseball.chadwick_register', 'pyb.chadwick_register', ([], {}), '()\n', (24083, 24085), True, 'import pybaseball as pyb\n'), ((24155, 24176), 'pybaseball.lahman.fielding', 'pyb.lahman.fielding', ([], {}), '()\n', (24174, 24176), True, 'import pybaseball as pyb\n'), ((24404, 24468), 'pandas.merge', 'pd.merge', (['lahman', 'chadwick_register'], {'on': '"""key_bbref"""', 'how': '"""outer"""'}), "(lahman, chadwick_register, on='key_bbref', how='outer')\n", (24412, 24468), True, 'import pandas as pd\n'), ((24831, 24879), 'pandas.merge', 'pd.merge', (['players', 'merged'], {'on': '"""IDfg"""', 'how': '"""left"""'}), "(players, merged, on='IDfg', how='left')\n", (24839, 24879), True, 'import pandas as pd\n'), ((33577, 33609), 'random.choice', 'random.choice', (['node.untriedMoves'], {}), '(node.untriedMoves)\n', (33590, 33609), False, 'import random\n'), ((4062, 4074), 'datetime.date.today', 'date.today', ([], {}), '()\n', (4072, 4074), False, 'from datetime import date, timedelta\n'), ((28910, 28979), 'numpy.mean', 'np.mean', (['[p.points for p in self.freeagents if p.position in pos][:3]'], {}), '([p.points for p in self.freeagents if p.position in pos][:3])\n', (28917, 28979), True, 'import numpy as np\n'), ((5141, 5181), 'pandas.merge', 'pd.merge', (['outDf', 'df0'], {'how': 'how', 'on': 'onCols'}), '(outDf, df0, how=how, on=onCols)\n', (5149, 5181), True, 'import pandas as pd\n'), ((29478, 29509), 'numpy.sum', 'np.sum', (['(roster_positions == pos)'], {}), '(roster_positions == pos)\n', (29484, 29509), True, 'import numpy as np\n'), ((18658, 18677), 'numpy.mean', 'np.mean', (['randomizer'], {}), '(randomizer)\n', (18665, 18677), True, 'import numpy as np\n'), ((18715, 18733), 'numpy.std', 'np.std', (['randomizer'], {}), '(randomizer)\n', (18721, 18733), True, 'import numpy as np\n'), ((32348, 32369), 'math.log', 'math.log', (['self.visits'], {}), '(self.visits)\n', (32356, 32369), False, 'import math\n'), ((18436, 18468), 'numpy.mean', 'np.mean', (['dictionary[key][column]'], {}), '(dictionary[key][column])\n', (18443, 18468), True, 'import numpy as np\n'), ((18519, 18551), 'numpy.mean', 'np.mean', (['dictionary[key][column]'], {}), '(dictionary[key][column])\n', (18526, 18551), True, 'import numpy as np\n')]
|
# Evolutionary Learning Strategy Implementation
# Learn more from https://blog.openai.com/evolution-strategies/
import gym
import numpy as np
from gym import wrappers
# GLOBAL SETTINGS
RNG_SEED = 8
ENVIRONMENT = "LunarLander-v2"
POPULATION_SIZE = 100 # Population size
GENERATION_LIMIT = 100 # Max number of generations
DISPLAY_WEIGHTS = False # Help debug weight update
RENDER = True # Render the generation representative
sigma = 0.1 # Noise standard deviation
alpha = 0.00025 # Learning rate
# Limit steps to enforce stopping early
LIMIT_STEPS = True
STEPS_LIMIT = 255 # Perform the DO_NOTHING_ACTION when step surpass
DO_NOTHING_ACTION = 0 # Action to feed in to do nothing
# Upload to OpenAI
UPLOAD = False
UPLOAD_GENERATION_INTERVAL = 10 # Generate a video at this interval
SESSION_FOLDER = "/tmp/LunarLander-experiment-1"
API_KEY = ""
# Success Mode (Settings to pass OpenAI's requirement)
SUCCESS_MODE = True
SUCCESS_THRESHOLD = 200
CONSECUTIVE_TARGET = 100
def extract_move(action):
return np.argmax(action)
def record_interval(n):
global UPLOAD_GENERATION_INTERVAL
global POPULATION_SIZE
episode_interval = (POPULATION_SIZE + 1) * UPLOAD_GENERATION_INTERVAL
return n % episode_interval == 0
def run_episode(environment, weight, render=False):
global LIMIT_STEPS
global STEPS_LIMIT
obs = environment.reset()
episode_reward = 0
done = False
step = 0
if LIMIT_STEPS:
max_steps = STEPS_LIMIT
else:
max_steps = env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
while not done:
if render:
environment.render()
if step < max_steps:
action = np.matmul(weight.T, obs)
move = extract_move(action)
else:
move = DO_NOTHING_ACTION
obs, reward, done, info = environment.step(move)
step += 1
episode_reward += reward
return episode_reward
env = gym.make(ENVIRONMENT)
if UPLOAD:
if SUCCESS_MODE:
env = wrappers.Monitor(env, SESSION_FOLDER)
else:
env = wrappers.Monitor(env, SESSION_FOLDER,
video_callable=record_interval)
env.seed(RNG_SEED)
np.random.seed(RNG_SEED)
input_size = env.observation_space.shape[0]
try:
output_size = env.action_space.shape[0]
except AttributeError:
output_size = env.action_space.n
# Initial weights
W = np.zeros((input_size, output_size))
for gen in range(GENERATION_LIMIT):
# Measure performance per generation
gen_eval = run_episode(env, W, RENDER and not UPLOAD)
# Success mode enabled
success_count = 1
if SUCCESS_MODE:
track_success = [gen_eval]
curr_mean = np.mean(track_success)
while success_count < CONSECUTIVE_TARGET and curr_mean >= \
SUCCESS_THRESHOLD:
gen_eval = run_episode(env, W)
track_success.append(gen_eval)
curr_mean = np.mean(track_success)
success_count += 1
gen_eval = curr_mean
# Keep track of Returns
R = np.zeros(POPULATION_SIZE)
# Generate noise
N = np.random.randn(POPULATION_SIZE, input_size, output_size)
for j in range(POPULATION_SIZE):
W_ = W + sigma * N[j]
R[j] = run_episode(env, W_)
# Update weights
# Summation of episode_weight * episode_reward
weighted_weights = np.matmul(N.T, R).T
new_W = W + (alpha / (POPULATION_SIZE * sigma)) * weighted_weights
if DISPLAY_WEIGHTS:
print(W)
W = new_W
gen_mean = np.mean(R)
if SUCCESS_MODE:
out = "Generation {}, Success Count: {}, Success Mean: {}, " \
"Population Mean: {}"
out = out.format(gen, success_count, gen_eval, gen_mean)
else:
out = "Generation {}, Return: {}, Population Mean: {}"
out = out.format(gen, gen_eval, gen_mean)
print(out)
env.close()
if UPLOAD:
gym.upload(SESSION_FOLDER, api_key=API_KEY)
|
[
"numpy.mean",
"gym.upload",
"numpy.argmax",
"numpy.zeros",
"numpy.matmul",
"numpy.random.seed",
"gym.wrappers.Monitor",
"numpy.random.randn",
"gym.make"
] |
[((1961, 1982), 'gym.make', 'gym.make', (['ENVIRONMENT'], {}), '(ENVIRONMENT)\n', (1969, 1982), False, 'import gym\n'), ((2212, 2236), 'numpy.random.seed', 'np.random.seed', (['RNG_SEED'], {}), '(RNG_SEED)\n', (2226, 2236), True, 'import numpy as np\n'), ((2414, 2449), 'numpy.zeros', 'np.zeros', (['(input_size, output_size)'], {}), '((input_size, output_size))\n', (2422, 2449), True, 'import numpy as np\n'), ((1019, 1036), 'numpy.argmax', 'np.argmax', (['action'], {}), '(action)\n', (1028, 1036), True, 'import numpy as np\n'), ((3068, 3093), 'numpy.zeros', 'np.zeros', (['POPULATION_SIZE'], {}), '(POPULATION_SIZE)\n', (3076, 3093), True, 'import numpy as np\n'), ((3123, 3180), 'numpy.random.randn', 'np.random.randn', (['POPULATION_SIZE', 'input_size', 'output_size'], {}), '(POPULATION_SIZE, input_size, output_size)\n', (3138, 3180), True, 'import numpy as np\n'), ((3541, 3551), 'numpy.mean', 'np.mean', (['R'], {}), '(R)\n', (3548, 3551), True, 'import numpy as np\n'), ((3911, 3954), 'gym.upload', 'gym.upload', (['SESSION_FOLDER'], {'api_key': 'API_KEY'}), '(SESSION_FOLDER, api_key=API_KEY)\n', (3921, 3954), False, 'import gym\n'), ((2029, 2066), 'gym.wrappers.Monitor', 'wrappers.Monitor', (['env', 'SESSION_FOLDER'], {}), '(env, SESSION_FOLDER)\n', (2045, 2066), False, 'from gym import wrappers\n'), ((2091, 2160), 'gym.wrappers.Monitor', 'wrappers.Monitor', (['env', 'SESSION_FOLDER'], {'video_callable': 'record_interval'}), '(env, SESSION_FOLDER, video_callable=record_interval)\n', (2107, 2160), False, 'from gym import wrappers\n'), ((2712, 2734), 'numpy.mean', 'np.mean', (['track_success'], {}), '(track_success)\n', (2719, 2734), True, 'import numpy as np\n'), ((3380, 3397), 'numpy.matmul', 'np.matmul', (['N.T', 'R'], {}), '(N.T, R)\n', (3389, 3397), True, 'import numpy as np\n'), ((1703, 1727), 'numpy.matmul', 'np.matmul', (['weight.T', 'obs'], {}), '(weight.T, obs)\n', (1712, 1727), True, 'import numpy as np\n'), ((2948, 2970), 'numpy.mean', 'np.mean', (['track_success'], {}), '(track_success)\n', (2955, 2970), True, 'import numpy as np\n')]
|
"""
Onoda 2012 ICA- and PCA-based algorithm
See: Careful seeding method based on independent components analysis for
k-means clustering
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.663.5343&rep=rep1&type=pdf#page=53
"""
from abc import abstractmethod
import numpy as np
from initialisations.base import Initialisation
class Onoda(Initialisation):
"""Base class for the two Onoda 2012 initialisation algorithms"""
def _find_centroids(self, components) -> np.array:
"""Step 1b from the algorithms"""
centroids = []
for component in components:
distances = [self._calc_distance(x, component) for x in self._data]
centroids.append(self._data[np.argmin(distances)])
return np.array(centroids)
@staticmethod
def _calc_distance(row, component):
"""Used in Step 1b from the algorithms"""
mag = np.linalg.norm
return np.dot(component, row) / (mag(component) * mag(row))
@staticmethod
@abstractmethod
def _find_components() -> np.array:
"""Each algorithm must implement this"""
def find_centers(self):
"""Main method"""
return self._find_centroids(self._find_components())
|
[
"numpy.argmin",
"numpy.array",
"numpy.dot"
] |
[((755, 774), 'numpy.array', 'np.array', (['centroids'], {}), '(centroids)\n', (763, 774), True, 'import numpy as np\n'), ((930, 952), 'numpy.dot', 'np.dot', (['component', 'row'], {}), '(component, row)\n', (936, 952), True, 'import numpy as np\n'), ((716, 736), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (725, 736), True, 'import numpy as np\n')]
|
import numpy as np
from math import sqrt
from numba import njit, prange, jitclass
from src.Common import ParticleType
from typing import Tuple
@jitclass([])
class TimeStep:
def __init__(self):
pass
def compute(self, J: int, pA: np.array, gamma_c: float = 0.25, gamma_f: float = 0.25) -> Tuple[float, float, float]:
"""
Computes the minimum time-step
Parameters
----------
J: int
length of the particle array
pA: np.array
Particle array, should have particle_dtype as dtype
gamma_c: float
cfl factor for the courant condition, default 0.4
gamma_f: float
cfl factor for the force condition, default 0.25
Returns
-------
Minimum time-step
Time-step based on courant condition
Time-step based on force condition
"""
min_h, max_c, max_a2 = self.computeVars(J, pA)
c = self.courant(gamma_c, min_h, max_c)
f = self.force(gamma_f, min_h, max_a2)
return min(c, f), c, f
def courant(self, cfl, h_min, c_max) -> float:
""" Timestep due to courant condition. """
return cfl * h_min / c_max
def force(self, cfl, min_h, max_a) -> float:
""" Time-step due to force. """
if max_a < 1e-12:
return 1e10
else:
return cfl * sqrt(min_h / max_a)
def computeVars(self, J: int, pA: np.array):
"""
Computes the minimum h, maximum speed of sound, and maximum acceleration for a given particle array.
Parameters
----------
J: int
Length of particle array
pA: np.array
Particle array
Returns
-------
a tuple with
minimum h, maximum speed of sound, and maximum acceleration
"""
h = []; c = []; a2 = []
# Outside of compute loop so prange can be used.
for j in prange(J):
if pA[j]['label'] == ParticleType.Fluid:
h.append(pA[j]['h'])
c.append(pA[j]['c'])
a2.append(pA[j]['ax'] * pA[j]['ax'] + pA[j]['ay'] * pA[j]['ay'])
# Find the maximum this can not be done parallel.
min_h = np.min(np.array(h))
max_c = np.max(np.array(c))
max_a2 = np.max(np.array(a2))
return min_h, max_c, max_a2
|
[
"numba.prange",
"numpy.array",
"math.sqrt",
"numba.jitclass"
] |
[((145, 157), 'numba.jitclass', 'jitclass', (['[]'], {}), '([])\n', (153, 157), False, 'from numba import njit, prange, jitclass\n'), ((2085, 2094), 'numba.prange', 'prange', (['J'], {}), '(J)\n', (2091, 2094), False, 'from numba import njit, prange, jitclass\n'), ((2387, 2398), 'numpy.array', 'np.array', (['h'], {}), '(h)\n', (2395, 2398), True, 'import numpy as np\n'), ((2424, 2435), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (2432, 2435), True, 'import numpy as np\n'), ((2461, 2473), 'numpy.array', 'np.array', (['a2'], {}), '(a2)\n', (2469, 2473), True, 'import numpy as np\n'), ((1463, 1482), 'math.sqrt', 'sqrt', (['(min_h / max_a)'], {}), '(min_h / max_a)\n', (1467, 1482), False, 'from math import sqrt\n')]
|
import numpy as np
import tensorflow as tf
from pytorch2onnx import AntiSpoofPredict
import cv2
import torch
import time
#load pytorch
device_id = 0
model_path = "./resources/anti_spoof_models/2020-09-28-13-11_Anti_Spoofing_1.2_112x112_model_iter-150.pth"
anti_model = AntiSpoofPredict(device_id, model_path)
dummy_img = cv2.imread("./datasets/RGB_Images/1.2_112x112/test_caffee_model/0/1599816416115_69.png")
dummy_output = anti_model.predict(dummy_img)
print("dummy_output_pytorch", dummy_output)
inputx = anti_model.transform_input(dummy_img)
inputx = inputx.permute(0, 2, 3, 1).numpy()
print(inputx.shape)
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="model.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on random input data.
input_shape = input_details[0]['shape']
print(input_details)
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
print(input_data.shape)
# input_data = dummy_input.numpy()
interpreter.set_tensor(input_details[0]['index'], inputx)
# start_time = mills()
interpreter.invoke()
start_time = time.time()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data = interpreter.get_tensor(output_details[0]['index'])
end_time = time.time()
# end_time = mills()
print("Time taken to one inference in milliseconds", end_time - start_time)
print("output of model",output_data)
|
[
"tensorflow.lite.Interpreter",
"numpy.random.random_sample",
"time.time",
"cv2.imread",
"pytorch2onnx.AntiSpoofPredict"
] |
[((270, 309), 'pytorch2onnx.AntiSpoofPredict', 'AntiSpoofPredict', (['device_id', 'model_path'], {}), '(device_id, model_path)\n', (286, 309), False, 'from pytorch2onnx import AntiSpoofPredict\n'), ((323, 421), 'cv2.imread', 'cv2.imread', (['"""./datasets/RGB_Images/1.2_112x112/test_caffee_model/0/1599816416115_69.png"""'], {}), "(\n './datasets/RGB_Images/1.2_112x112/test_caffee_model/0/1599816416115_69.png'\n )\n", (333, 421), False, 'import cv2\n'), ((671, 717), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': '"""model.tflite"""'}), "(model_path='model.tflite')\n", (690, 717), True, 'import tensorflow as tf\n'), ((1230, 1241), 'time.time', 'time.time', ([], {}), '()\n', (1239, 1241), False, 'import time\n'), ((1441, 1452), 'time.time', 'time.time', ([], {}), '()\n', (1450, 1452), False, 'import time\n'), ((999, 1035), 'numpy.random.random_sample', 'np.random.random_sample', (['input_shape'], {}), '(input_shape)\n', (1022, 1035), True, 'import numpy as np\n')]
|
import os
import numpy as np
def read_data(input_path, english_only, english_indices=None):
"""Returns the list of signatures, labels, and ids. These can have English-only signatures if specified."""
if english_indices is None:
english_indices = []
sig_list = []
lab_list = [] # Genuine = 0, Forged = 1
id_list = []
if english_only:
input_files = [i for i in sorted(os.listdir(input_path)) if int(i.split('S')[0].replace('U','')) in english_indices]
else:
input_files = sorted(os.listdir(input_path))
for file in input_files:
data = np.genfromtxt(input_path + file, skip_header=1)
data = np.delete(data, 2, axis=1) # drop the timestamp column
try:
sig_list.append(data)
id_list.append(file.split('.')[0])
if int(file.split('S')[1].replace('.TXT', '')) < 21:
lab_list.append(0) # Genuine
else:
lab_list.append(1) # Forged
except:
print(file)
return sig_list, lab_list, id_list
def normalize_data(data, skipcols=None):
"""Normalizes the data so that all features are in the range [0,1]"""
rows = 0
for i in range(len(data)):
rows += data[i].shape[0]
rows2 = 0
data_all = np.empty((rows, data[0].shape[1]))
for i in range(len(data)):
data_all[rows2:rows2+data[i].shape[0], :] = data[i]
rows2 += data[i].shape[0]
data_norm = []
for i in range(len(data)):
data_norm_sig = np.zeros((data[i].shape[0], data[i].shape[1]))
for f in range(data[i].shape[1]):
if f in skipcols:
data_norm_sig[:, f] = data[i][:, f]
else:
data_norm_sig[:, f] = (data[i][:, f] - data_all[:, f].min()) / data_all[:, f].ptp()
data_norm.append(data_norm_sig)
return data_norm
def merge_timesteps(x, timesteps_to_merge):
"""Combines multiple timesteps of raw signature data into a single timestep.
E.g., if timesteps_to_merge is 3, then each 3 rows will now be concatenated into 1,
meaning there will be 3*num_features in the row."""
x_merged = []
for x_i in x:
x_i_merged = []
start_index = 0
end_index = timesteps_to_merge
while end_index < x_i.shape[0]:
x_i_merged.append(np.concatenate(x_i[start_index:end_index]))
start_index = start_index + timesteps_to_merge
end_index = end_index + timesteps_to_merge
# Accounting for the very last part of the sequence
x_i_merged.append(np.concatenate(x_i[x_i.shape[0] - timesteps_to_merge:x_i.shape[0], :]))
x_merged.append(np.array(x_i_merged))
return x_merged
def split_sequences(x, y, names, window_length, window_stride):
"""Splits all input sequences into subsequences by moving along a window of given window_length and stride."""
split_x = []
split_y = []
split_ids = []
for (x_i, y_i, id_i) in zip(x, y, names):
start_index = 0
end_index = window_length
while end_index < x_i.shape[0]:
split_x.append(x_i[start_index:end_index, :])
split_y.append(y_i)
split_ids.append(id_i)
start_index = start_index + window_stride
end_index = end_index + window_stride
# Accounting for the very last part of the sequence
split_x.append(x_i[x_i.shape[0]-window_length:x_i.shape[0], :])
split_y.append(y_i)
split_ids.append(id_i)
return np.array(split_x), np.array(split_y), np.array(split_ids)
def split_train_test(x, y, names, train_percentage=0.75):
"""Randomly splitting the data to train and test sets (by IDs) using the given percentage split."""
np.random.seed(0) # Setting this for reproducible results
subjects = [i.split('S')[0] for i in names]
unique_subjects = list(set(subjects))
train_subjects = np.random.choice(unique_subjects, size=int(len(unique_subjects)*train_percentage), replace=False)
train_indices = [i for i, e in enumerate(subjects) if e in train_subjects]
test_indices = [i for i, e in enumerate(subjects) if e not in train_subjects]
x_train = x[train_indices]
y_train = y[train_indices]
x_test = x[test_indices]
y_test = y[test_indices]
return x_train, x_test, y_train, y_test
if __name__ == "__main__":
INPUT_PATH = '../datasets/Task2/'
OUTPUT_PATH = '../datasets/processed'
eng_indices = [2, 4, 6, 8, 10, 12, 13, 15, 18, 20, 22, 24, 25, 28, 30, 32, 33, 34, 35, 40] # English signature numbers
eng_only = True # Whether to only consider English or English and Chinese signatures
stride = 20 # How far to move the window for creating fixed-length subsequences with each signature
length = 25 # How big each window is for the fixed-length sequences
merge_num = 3 # How many rows to concatenate into a single row -- see function for more details
train_test_split = 0.75 # This is how much of the data will be used for TRAINING, the rest is for testing (split by ID)
normalize = True # Whether you want to normalize the data or not
signatures, labels, ids = read_data(INPUT_PATH, english_only=eng_only, english_indices=eng_indices)
if normalize:
signatures_normalized = normalize_data(signatures, skipcols=[2])
signatures_merged = merge_timesteps(x=signatures_normalized, timesteps_to_merge=merge_num)
else:
signatures_merged = merge_timesteps(x=signatures, timesteps_to_merge=merge_num)
signatures_subsequences, labels_subsequences, ids_subsequences = split_sequences(x=signatures_merged, y=labels, names=ids, window_length=length, window_stride=stride)
signatures_train, signatures_test, labels_train, labels_test = split_train_test(x=signatures_subsequences, y=labels_subsequences, names=ids_subsequences, train_percentage=0.75)
|
[
"os.listdir",
"numpy.delete",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.random.seed",
"numpy.concatenate",
"numpy.genfromtxt"
] |
[((1179, 1213), 'numpy.empty', 'np.empty', (['(rows, data[0].shape[1])'], {}), '((rows, data[0].shape[1]))\n', (1187, 1213), True, 'import numpy as np\n'), ((3460, 3477), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3474, 3477), True, 'import numpy as np\n'), ((570, 617), 'numpy.genfromtxt', 'np.genfromtxt', (['(input_path + file)'], {'skip_header': '(1)'}), '(input_path + file, skip_header=1)\n', (583, 617), True, 'import numpy as np\n'), ((628, 654), 'numpy.delete', 'np.delete', (['data', '(2)'], {'axis': '(1)'}), '(data, 2, axis=1)\n', (637, 654), True, 'import numpy as np\n'), ((1403, 1449), 'numpy.zeros', 'np.zeros', (['(data[i].shape[0], data[i].shape[1])'], {}), '((data[i].shape[0], data[i].shape[1]))\n', (1411, 1449), True, 'import numpy as np\n'), ((3235, 3252), 'numpy.array', 'np.array', (['split_x'], {}), '(split_x)\n', (3243, 3252), True, 'import numpy as np\n'), ((3254, 3271), 'numpy.array', 'np.array', (['split_y'], {}), '(split_y)\n', (3262, 3271), True, 'import numpy as np\n'), ((3273, 3292), 'numpy.array', 'np.array', (['split_ids'], {}), '(split_ids)\n', (3281, 3292), True, 'import numpy as np\n'), ((507, 529), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (517, 529), False, 'import os\n'), ((2373, 2443), 'numpy.concatenate', 'np.concatenate', (['x_i[x_i.shape[0] - timesteps_to_merge:x_i.shape[0], :]'], {}), '(x_i[x_i.shape[0] - timesteps_to_merge:x_i.shape[0], :])\n', (2387, 2443), True, 'import numpy as np\n'), ((2464, 2484), 'numpy.array', 'np.array', (['x_i_merged'], {}), '(x_i_merged)\n', (2472, 2484), True, 'import numpy as np\n'), ((2153, 2195), 'numpy.concatenate', 'np.concatenate', (['x_i[start_index:end_index]'], {}), '(x_i[start_index:end_index])\n', (2167, 2195), True, 'import numpy as np\n'), ((391, 413), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (401, 413), False, 'import os\n')]
|
'''
a simple script for generating uniform randoms and
jackknife regions assuming the KiDS-1000 mask
'''
import fitsio
import numpy as np
import healpy as hp
import kmeans_radec
from kmeans_radec import KMeans, kmeans_sample
from astropy.table import Table
lens = fitsio.read("lens.fits", columns = ["ra_gal", "dec_gal", "observed_redshift_gal"])
ra, dec = lens["ra_gal"], lens["dec_gal"]
ra_min, dec_min, ra_max, dec_max = 0, 0, 90, 90
Nr= 50000000
ran_ra = np.random.uniform(0,360,Nr)
ran_dec = np.degrees(np.arcsin(np.random.uniform(-1,1,Nr)))
ran_mask = (ran_ra > ra_min)&(ran_ra < ra_max)&(ran_dec > dec_min)&(ran_dec < dec_max)
ran_ra, ran_dec = ran_ra[ran_mask], ran_dec[ran_mask]
randoms = {'ra': ran_ra,
'dec': ran_dec}
coord = np.vstack([randoms['ra'], randoms['dec']]).T
ncen = 100
km = kmeans_sample(coord, ncen, maxiter=30, tol=1.0e-4)
labels = km.find_nearest(coord)
table = Table([coord[:,0], coord[:,1], labels], names=('RA', 'DEC', 'JK_LABEL'))
table.write('flagship_randoms_v2.fits', format='fits')
np.savetxt("flagship_jk_centers_v2.txt", km.centers)
|
[
"astropy.table.Table",
"fitsio.read",
"numpy.vstack",
"numpy.savetxt",
"numpy.random.uniform",
"kmeans_radec.kmeans_sample"
] |
[((266, 351), 'fitsio.read', 'fitsio.read', (['"""lens.fits"""'], {'columns': "['ra_gal', 'dec_gal', 'observed_redshift_gal']"}), "('lens.fits', columns=['ra_gal', 'dec_gal', 'observed_redshift_gal']\n )\n", (277, 351), False, 'import fitsio\n'), ((464, 493), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(360)', 'Nr'], {}), '(0, 360, Nr)\n', (481, 493), True, 'import numpy as np\n'), ((817, 867), 'kmeans_radec.kmeans_sample', 'kmeans_sample', (['coord', 'ncen'], {'maxiter': '(30)', 'tol': '(0.0001)'}), '(coord, ncen, maxiter=30, tol=0.0001)\n', (830, 867), False, 'from kmeans_radec import KMeans, kmeans_sample\n'), ((910, 984), 'astropy.table.Table', 'Table', (['[coord[:, 0], coord[:, 1], labels]'], {'names': "('RA', 'DEC', 'JK_LABEL')"}), "([coord[:, 0], coord[:, 1], labels], names=('RA', 'DEC', 'JK_LABEL'))\n", (915, 984), False, 'from astropy.table import Table\n'), ((1038, 1090), 'numpy.savetxt', 'np.savetxt', (['"""flagship_jk_centers_v2.txt"""', 'km.centers'], {}), "('flagship_jk_centers_v2.txt', km.centers)\n", (1048, 1090), True, 'import numpy as np\n'), ((756, 798), 'numpy.vstack', 'np.vstack', (["[randoms['ra'], randoms['dec']]"], {}), "([randoms['ra'], randoms['dec']])\n", (765, 798), True, 'import numpy as np\n'), ((523, 551), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'Nr'], {}), '(-1, 1, Nr)\n', (540, 551), True, 'import numpy as np\n')]
|
if True:
import numpy as np
d = 3
K = 50
N = 10 ** 6
a = np.zeros(3)
b = np.ones(3)
orders = np.array([K for i in range(d)])
coeffs = np.random.random([k + 2 for k in orders])
points = np.random.random((N, d)) # each line is a vector
points_c = points.T.copy() # each column is a vector
vals = np.zeros(N)
print(points.max().max())
print(points.min().min())
import time
from alternative_implementations import *
from eval_cubic_splines_cython import vec_eval_cubic_spline_3 as rr
vec_eval_cubic_spline_3(a, b, orders, coeffs, points, vals) # warmup
vec_eval_cubic_spline_3_inlined(a, b, orders, coeffs, points, vals) # warmup
vec_eval_cubic_spline_3_inlined_columns(
a, b, orders, coeffs, points_c, vals
) # warmup
vec_eval_cubic_spline_3_kernel(a, b, orders, coeffs, points, vals) # warmup
vec_eval_cubic_spline_3_inlined_lesswork(orders, coeffs, points, vals, Ad, dAd)
# rr(a,b,orders,coeffs,points,vals,Ad,dAd)
rr(a, b, orders, coeffs, points, vals)
t1 = time.time()
vec_eval_cubic_spline_3(a, b, orders, coeffs, points, vals)
t2 = time.time()
vec_eval_cubic_spline_3_inlined(a, b, orders, coeffs, points, vals)
t3 = time.time()
vec_eval_cubic_spline_3_inlined_columns(a, b, orders, coeffs, points_c, vals)
t4 = time.time()
vec_eval_cubic_spline_3_kernel(a, b, orders, coeffs, points, vals)
t5 = time.time()
vec_eval_cubic_spline_3_inlined_lesswork(orders, coeffs, points, vals, Ad, dAd)
t6 = time.time()
# rr(a,b,orders,coeffs,points,vals,Ad,dAd)
rr(a, b, orders, coeffs, points, vals)
t7 = time.time()
print("one function call per point: {}".format(t2 - t1))
print("inlined (points in rows): {}".format(t3 - t2))
print("inlined (points in columns): {}".format(t4 - t3))
print("kernel: {}".format(t5 - t4))
print("less work: {}".format(t6 - t5))
print("cython: {}".format(t7 - t6))
print(vals[:10, 0])
|
[
"numpy.ones",
"numpy.random.random",
"eval_cubic_splines_cython.vec_eval_cubic_spline_3",
"numpy.zeros",
"time.time"
] |
[((79, 90), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (87, 90), True, 'import numpy as np\n'), ((99, 109), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (106, 109), True, 'import numpy as np\n'), ((168, 211), 'numpy.random.random', 'np.random.random', (['[(k + 2) for k in orders]'], {}), '([(k + 2) for k in orders])\n', (184, 211), True, 'import numpy as np\n'), ((223, 247), 'numpy.random.random', 'np.random.random', (['(N, d)'], {}), '((N, d))\n', (239, 247), True, 'import numpy as np\n'), ((342, 353), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (350, 353), True, 'import numpy as np\n'), ((1030, 1068), 'eval_cubic_splines_cython.vec_eval_cubic_spline_3', 'rr', (['a', 'b', 'orders', 'coeffs', 'points', 'vals'], {}), '(a, b, orders, coeffs, points, vals)\n', (1032, 1068), True, 'from eval_cubic_splines_cython import vec_eval_cubic_spline_3 as rr\n'), ((1079, 1090), 'time.time', 'time.time', ([], {}), '()\n', (1088, 1090), False, 'import time\n'), ((1164, 1175), 'time.time', 'time.time', ([], {}), '()\n', (1173, 1175), False, 'import time\n'), ((1257, 1268), 'time.time', 'time.time', ([], {}), '()\n', (1266, 1268), False, 'import time\n'), ((1360, 1371), 'time.time', 'time.time', ([], {}), '()\n', (1369, 1371), False, 'import time\n'), ((1452, 1463), 'time.time', 'time.time', ([], {}), '()\n', (1461, 1463), False, 'import time\n'), ((1557, 1568), 'time.time', 'time.time', ([], {}), '()\n', (1566, 1568), False, 'import time\n'), ((1623, 1661), 'eval_cubic_splines_cython.vec_eval_cubic_spline_3', 'rr', (['a', 'b', 'orders', 'coeffs', 'points', 'vals'], {}), '(a, b, orders, coeffs, points, vals)\n', (1625, 1661), True, 'from eval_cubic_splines_cython import vec_eval_cubic_spline_3 as rr\n'), ((1671, 1682), 'time.time', 'time.time', ([], {}), '()\n', (1680, 1682), False, 'import time\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
def get_ellipse_params(rho,M):
"""
Returns ellipse params (excl center point)
"""
#eigenvalue decomposition to get the axes
w,v=np.linalg.eigh(M/rho)
try:
#let the smaller eigenvalue define the width (major axis*2!)
width = 2/float(np.sqrt(w[0]))
height = 2/float(np.sqrt(w[1]))
#the angle of the ellipse is defined by the eigenvector assigned to the smallest eigenvalue (because this defines the major axis (width of the ellipse))
angle = np.rad2deg(np.arctan2(v[:,0][1],v[:,0][0]))
except:
print("paramters do not represent an ellipse.")
return width,height,angle
def get_ellipse_patch(px,py,rho,M,alpha_val=1,linec="red",facec="none",linest="solid"):
"""
return an ellipse patch
"""
w,h,a = get_ellipse_params(rho,M)
return patches.Ellipse((px,py), w, h, a, alpha=alpha_val,ec=linec,facecolor=facec,linestyle=linest)
def plot_ellipse(px,py,rho, M, save_to=None, show=True):
p=get_ellipse_patch(px,py,rho,M)
fig, ax = plt.subplots()
ax.add_patch(p)
l=np.max([p.width,p.height])
ax.set_xlim(px-l/2,px+l/2)
ax.set_ylim(py-l/2,py+l/2)
ax.grid(True)
if not (save_to is None):
plt.savefig(save_to)
if show:
plt.show()
|
[
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"numpy.max",
"numpy.arctan2",
"numpy.linalg.eigh",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((232, 255), 'numpy.linalg.eigh', 'np.linalg.eigh', (['(M / rho)'], {}), '(M / rho)\n', (246, 255), True, 'import numpy as np\n'), ((925, 1026), 'matplotlib.patches.Ellipse', 'patches.Ellipse', (['(px, py)', 'w', 'h', 'a'], {'alpha': 'alpha_val', 'ec': 'linec', 'facecolor': 'facec', 'linestyle': 'linest'}), '((px, py), w, h, a, alpha=alpha_val, ec=linec, facecolor=\n facec, linestyle=linest)\n', (940, 1026), False, 'from matplotlib import patches\n'), ((1132, 1146), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1144, 1146), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1200), 'numpy.max', 'np.max', (['[p.width, p.height]'], {}), '([p.width, p.height])\n', (1179, 1200), True, 'import numpy as np\n'), ((1321, 1341), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_to'], {}), '(save_to)\n', (1332, 1341), True, 'import matplotlib.pyplot as plt\n'), ((1363, 1373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1371, 1373), True, 'import matplotlib.pyplot as plt\n'), ((610, 644), 'numpy.arctan2', 'np.arctan2', (['v[:, 0][1]', 'v[:, 0][0]'], {}), '(v[:, 0][1], v[:, 0][0])\n', (620, 644), True, 'import numpy as np\n'), ((358, 371), 'numpy.sqrt', 'np.sqrt', (['w[0]'], {}), '(w[0])\n', (365, 371), True, 'import numpy as np\n'), ((398, 411), 'numpy.sqrt', 'np.sqrt', (['w[1]'], {}), '(w[1])\n', (405, 411), True, 'import numpy as np\n')]
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from google.colab.patches import cv2_imshow
from tensorflow.keras.applications import *
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from math import *
import glob
import os
import time
from tensorflow.keras import regularizers
from pathfile import *
def iou(box_a,box_b):
y1=max(box_a[1],box_b[1])
y2=min(box_a[1]+box_a[3],box_b[1]+box_b[3])
x1=max(box_a[0],box_b[0])
x2=min(box_a[0]+box_a[2],box_b[0]+box_b[2])
if y1>y2 or x1>x2:
return 0 #iou calculation
inter_area=(x2-x1)*(y2-y1)
union_area=box_a[2]*box_a[3]+box_b[2]*box_b[3]-inter_area
if union_area == 0:
return 0
return inter_area/union_area
def get_proposals(path):
im=cv2.imread(path)
im=cv2.resize(im,(800,800),cv2.INTER_AREA)
im=cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
edge_detection = cv2.ximgproc.createStructuredEdgeDetection(MODEL_YML)
edges = edge_detection.detectEdges(np.float32(im) / 255.0)
orimap = edge_detection.computeOrientation(edges)
edges = edge_detection.edgesNms(edges, orimap)
edge_boxes = cv2.ximgproc.createEdgeBoxes()
edge_boxes.setMaxBoxes(64)
boxes = edge_boxes.getBoundingBoxes(edges, orimap)
return boxes
def get_feature_map(path,model):
im=tf.keras.preprocessing.image.load_img(path=path,target_size=(800,800))
im = tf.keras.preprocessing.image.img_to_array(im)
im=tf.keras.applications.vgg16.preprocess_input(im)
im=np.reshape(im,(1,800,800,3))
pred=model.predict(im)[0]
return pred
def get_vgg():
m=VGG16(include_top=False,input_shape=(800,800,3))
x=Input((800,800,3))
inp=x
for layer in m.layers:
if layer.__class__.__name__=='InputLayer':
continue
if layer.output_shape[1]>=50:
x=layer(x)
model=Model(inp,x)
model.trainable=False
return model
def get_resized_boxes(path,original_boxes):
temp=cv2.imread(path)
gt_boxes=[]
width,height=len(temp[0]),len(temp)
temp=cv2.resize(temp,(800,800),interpolation=cv2.INTER_AREA)
for gt in original_boxes:
x,y,w,h=list(map(int,gt))
x=int(x*(800.0/width))
y=int(y*(800.0/height))
w=int(w*(800.0/width))
h=int(h*(800.0/height))
#print(x,y,w,h)
gt_boxes.append([x,y,w,h])
return gt_boxes
def get_fastrcnn():
y=Input((14,14,512))
y_inp=y
y=MaxPool2D(2)(y)
y=Flatten()(y)
y=Dense(1024)(y)
y=Dropout(0.25)(y)
y=Dense(1024)(y)
y=Dropout(0.25)(y)
y=Dense(512)(y)
y=Dropout(0.25)(y)
y=Dense(1024,name='logits')(y)
reg=Dense(4,activity_regularizer=regularizers.l2(1e-1),name='regression_layer')(y)
cls=Dense(11,name='class_layer')(y)
cls=Softmax()(cls)
fastrcnn=Model(inputs=y_inp,outputs=[reg,cls])
return fastrcnn
|
[
"tensorflow.keras.preprocessing.image.load_img",
"numpy.reshape",
"cv2.ximgproc.createStructuredEdgeDetection",
"tensorflow.keras.applications.vgg16.preprocess_input",
"cv2.cvtColor",
"tensorflow.keras.preprocessing.image.img_to_array",
"cv2.ximgproc.createEdgeBoxes",
"cv2.resize",
"cv2.imread",
"numpy.float32",
"tensorflow.keras.regularizers.l2"
] |
[((882, 898), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (892, 898), False, 'import cv2\n'), ((907, 949), 'cv2.resize', 'cv2.resize', (['im', '(800, 800)', 'cv2.INTER_AREA'], {}), '(im, (800, 800), cv2.INTER_AREA)\n', (917, 949), False, 'import cv2\n'), ((955, 990), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (967, 990), False, 'import cv2\n'), ((1012, 1065), 'cv2.ximgproc.createStructuredEdgeDetection', 'cv2.ximgproc.createStructuredEdgeDetection', (['MODEL_YML'], {}), '(MODEL_YML)\n', (1054, 1065), False, 'import cv2\n'), ((1255, 1285), 'cv2.ximgproc.createEdgeBoxes', 'cv2.ximgproc.createEdgeBoxes', ([], {}), '()\n', (1283, 1285), False, 'import cv2\n'), ((1434, 1506), 'tensorflow.keras.preprocessing.image.load_img', 'tf.keras.preprocessing.image.load_img', ([], {'path': 'path', 'target_size': '(800, 800)'}), '(path=path, target_size=(800, 800))\n', (1471, 1506), True, 'import tensorflow as tf\n'), ((1515, 1560), 'tensorflow.keras.preprocessing.image.img_to_array', 'tf.keras.preprocessing.image.img_to_array', (['im'], {}), '(im)\n', (1556, 1560), True, 'import tensorflow as tf\n'), ((1569, 1617), 'tensorflow.keras.applications.vgg16.preprocess_input', 'tf.keras.applications.vgg16.preprocess_input', (['im'], {}), '(im)\n', (1613, 1617), True, 'import tensorflow as tf\n'), ((1626, 1658), 'numpy.reshape', 'np.reshape', (['im', '(1, 800, 800, 3)'], {}), '(im, (1, 800, 800, 3))\n', (1636, 1658), True, 'import numpy as np\n'), ((2101, 2117), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2111, 2117), False, 'import cv2\n'), ((2186, 2244), 'cv2.resize', 'cv2.resize', (['temp', '(800, 800)'], {'interpolation': 'cv2.INTER_AREA'}), '(temp, (800, 800), interpolation=cv2.INTER_AREA)\n', (2196, 2244), False, 'import cv2\n'), ((1106, 1120), 'numpy.float32', 'np.float32', (['im'], {}), '(im)\n', (1116, 1120), True, 'import numpy as np\n'), ((2834, 2854), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.1)'], {}), '(0.1)\n', (2849, 2854), False, 'from tensorflow.keras import regularizers\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from config import gamma, lr
def flat_grad(grads):
grad_flatten = []
for grad in grads:
grad_flatten.append(grad.view(-1))
grad_flatten = torch.cat(grad_flatten)
return grad_flatten
def flat_hessian(hessians):
hessians_flatten = []
for hessian in hessians:
hessians_flatten.append(hessian.contiguous().view(-1))
hessians_flatten = torch.cat(hessians_flatten).data
return hessians_flatten
def flat_params(model):
params = []
for param in model.parameters():
params.append(param.data.view(-1))
params_flatten = torch.cat(params)
return params_flatten
def update_model(model, new_params):
index = 0
for params in model.parameters():
params_length = len(params.view(-1))
new_param = new_params[index: index + params_length]
new_param = new_param.view(params.size())
params.data.copy_(new_param)
index += params_length
def kl_divergence(net, old_net, states):
policy = net(states)
old_policy = old_net(states).detach()
kl = old_policy * torch.log(old_policy / policy)
kl = kl.sum(1, keepdim=True)
return kl
def fisher_vector_product(net, states, p, cg_damp=0.1):
kl = kl_divergence(net, net, states)
kl = kl.mean()
kl_grad = torch.autograd.grad(kl, net.parameters(), create_graph=True) # create_graph is True if we need higher order derivative products
kl_grad = flat_grad(kl_grad)
kl_grad_p = (kl_grad * p.detach()).sum()
kl_hessian_p = torch.autograd.grad(kl_grad_p, net.parameters())
kl_hessian_p = flat_hessian(kl_hessian_p)
return kl_hessian_p + cg_damp * p.detach()
def conjugate_gradient(net, states, loss_grad, n_step=10, residual_tol=1e-10):
x = torch.zeros(loss_grad.size())
r = loss_grad.clone()
p = loss_grad.clone()
r_dot_r = torch.dot(r, r)
for i in range(n_step):
A_dot_p = fisher_vector_product(net, states, p)
alpha = r_dot_r / torch.dot(p, A_dot_p)
x += alpha * p
r -= alpha * A_dot_p
new_r_dot_r = torch.dot(r,r)
betta = new_r_dot_r / r_dot_r
p = r + betta * p
r_dot_r = new_r_dot_r
if r_dot_r < residual_tol:
break
return x
class TNPG(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(TNPG, self).__init__()
self.t = 0
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.fc_1 = nn.Linear(num_inputs, 128)
self.fc_2 = nn.Linear(128, num_outputs)
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform(m.weight)
def forward(self, input):
x = torch.tanh(self.fc_1(input))
policy = F.softmax(self.fc_2(x))
return policy
@classmethod
def train_model(cls, net, transitions):
states, actions, rewards, masks = transitions.state, transitions.action, transitions.reward, transitions.mask
states = torch.stack(states)
actions = torch.stack(actions)
rewards = torch.Tensor(rewards)
masks = torch.Tensor(masks)
returns = torch.zeros_like(rewards)
running_return = 0
for t in reversed(range(len(rewards))):
running_return = rewards[t] + gamma * running_return * masks[t]
returns[t] = running_return
policies = net(states)
policies = policies.view(-1, net.num_outputs)
policy_actions = (policies * actions.detach()).sum(dim=1)
loss = (policy_actions * returns).mean()
loss_grad = torch.autograd.grad(loss, net.parameters())
loss_grad = flat_grad(loss_grad)
step_dir = conjugate_gradient(net, states, loss_grad.data)
params = flat_params(net)
new_params = params + lr * step_dir
update_model(net, new_params)
return -loss
def get_action(self, input):
policy = self.forward(input)
policy = policy[0].data.numpy()
action = np.random.choice(self.num_outputs, 1, p=policy)[0]
return action
|
[
"torch.log",
"numpy.random.choice",
"torch.stack",
"torch.Tensor",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform",
"torch.zeros_like",
"torch.cat",
"torch.dot"
] |
[((246, 269), 'torch.cat', 'torch.cat', (['grad_flatten'], {}), '(grad_flatten)\n', (255, 269), False, 'import torch\n'), ((667, 684), 'torch.cat', 'torch.cat', (['params'], {}), '(params)\n', (676, 684), False, 'import torch\n'), ((1921, 1936), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (1930, 1936), False, 'import torch\n'), ((464, 491), 'torch.cat', 'torch.cat', (['hessians_flatten'], {}), '(hessians_flatten)\n', (473, 491), False, 'import torch\n'), ((1157, 1187), 'torch.log', 'torch.log', (['(old_policy / policy)'], {}), '(old_policy / policy)\n', (1166, 1187), False, 'import torch\n'), ((2144, 2159), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (2153, 2159), False, 'import torch\n'), ((2545, 2571), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', '(128)'], {}), '(num_inputs, 128)\n', (2554, 2571), True, 'import torch.nn as nn\n'), ((2592, 2619), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'num_outputs'], {}), '(128, num_outputs)\n', (2601, 2619), True, 'import torch.nn as nn\n'), ((3078, 3097), 'torch.stack', 'torch.stack', (['states'], {}), '(states)\n', (3089, 3097), False, 'import torch\n'), ((3116, 3136), 'torch.stack', 'torch.stack', (['actions'], {}), '(actions)\n', (3127, 3136), False, 'import torch\n'), ((3155, 3176), 'torch.Tensor', 'torch.Tensor', (['rewards'], {}), '(rewards)\n', (3167, 3176), False, 'import torch\n'), ((3193, 3212), 'torch.Tensor', 'torch.Tensor', (['masks'], {}), '(masks)\n', (3205, 3212), False, 'import torch\n'), ((3232, 3257), 'torch.zeros_like', 'torch.zeros_like', (['rewards'], {}), '(rewards)\n', (3248, 3257), False, 'import torch\n'), ((2048, 2069), 'torch.dot', 'torch.dot', (['p', 'A_dot_p'], {}), '(p, A_dot_p)\n', (2057, 2069), False, 'import torch\n'), ((4095, 4142), 'numpy.random.choice', 'np.random.choice', (['self.num_outputs', '(1)'], {'p': 'policy'}), '(self.num_outputs, 1, p=policy)\n', (4111, 4142), True, 'import numpy as np\n'), ((2711, 2743), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['m.weight'], {}), '(m.weight)\n', (2733, 2743), True, 'import torch.nn as nn\n')]
|
from numpy.random import randint
def find_sum(a_list, target):
r = []
sz = len(a_list)
for i in range(sz):
for j in range(i+1, sz):
if (a_list[i] + a_list[j]) == target:
r.append((a_list[i], a_list[j]))
return r
def fast_sum(a_list, target):
r = []
h = {}
for x in a_list:
y = target - x
if y in h:
r.append(y, x)
h[x] = x
return r
if __name__ == '__main__':
values = randint(0, 100, 20)
target = randint(100)
print(f"values {values} target {target}")
print(find_sum(values, target))
|
[
"numpy.random.randint"
] |
[((482, 501), 'numpy.random.randint', 'randint', (['(0)', '(100)', '(20)'], {}), '(0, 100, 20)\n', (489, 501), False, 'from numpy.random import randint\n'), ((515, 527), 'numpy.random.randint', 'randint', (['(100)'], {}), '(100)\n', (522, 527), False, 'from numpy.random import randint\n')]
|
import os
import re
import mne
import numpy as np
class SeegRecording():
def __init__(self, contacts, data, sampling_rate):
'''
contacts (list of tuples) is a list of all the contact labels and their
corresponding number
data (np.ndarray) is a numpy array of the seeg data [chans x time]
sampling_rate (float) is the sampling rate in Hz
'''
# Sort the contacts first
contacts, indices = zip(*sorted((c, i)
for i, c in enumerate(contacts)))
# set the contacts
self.contacts = contacts
self.ncontacts = len(self.contacts)
self.contact_names = [str(a) + str(b) for a, b in self.contacts]
# can't do assertion here if we trim contacts with a list...
assert data.ndim == 2
# assert data.shape[0] == self.ncontacts
self.data = data[indices, :]
self.sampling_rate = sampling_rate
nsamples = self.data.shape[1]
self.t = np.linspace(0, (nsamples - 1) *
(1. / self.sampling_rate), nsamples)
self.electrodes = {}
for i, (name, number) in enumerate(self.contacts):
if name not in self.electrodes:
self.electrodes[name] = []
self.electrodes[name].append(i)
self.set_bipolar()
@classmethod
def from_ades(cls, filename):
data_file = os.path.splitext(filename)[0] + ".dat"
contact_names = []
contacts = []
sampling_rate = None
nsamples = None
seeg_idxs = []
bad_channels = []
bad_file = filename + ".bad"
if os.path.isfile(bad_file):
with open(bad_file, 'r') as fd:
bad_channels = [ch.strip()
for ch in fd.readlines() if ch.strip() != ""]
with open(filename, 'r') as fd:
fd.readline() # ADES header file
kw, sampling_rate = [s.strip()
for s in fd.readline().strip().split('=')]
assert kw == 'samplingRate'
sampling_rate = float(sampling_rate)
kw, nsamples = [s.strip()
for s in fd.readline().strip().split('=')]
assert kw == 'numberOfSamples'
nsamples = int(nsamples)
channel_idx = 0
for line in fd.readlines():
if not line.strip():
continue
parts = [p.strip() for p in line.strip().split('=')]
if len(parts) > 1 and parts[1] == 'SEEG':
name, idx = re.match(
"([A-Za-z]+[']*)([0-9]+)", parts[0]).groups()
idx = int(idx)
if parts[0] not in bad_channels:
contacts.append((name, idx))
seeg_idxs.append(channel_idx)
channel_idx += 1
data = np.fromfile(data_file, dtype='f4')
ncontacts = data.size // nsamples
if data.size != nsamples * ncontacts:
print("!! data.size != nsamples*ncontacts")
print("!! %d != %d %d" % (data.size, nsamples, ncontacts))
print("!! Ignoring nsamples")
nsamples = int(data.size / ncontacts)
data = data.reshape((nsamples, ncontacts)).T
data = data[seeg_idxs, :]
return cls(contacts, data, sampling_rate)
@classmethod
def from_fif(cls, filename, drop_channels=None, rename_channels=None):
raw = mne.io.read_raw_fif(filename)
if rename_channels is not None:
raw.rename_channels(rename_channels)
return cls._from_mne_raw(raw, drop_channels)
@classmethod
def from_edf(cls, filename, drop_channels=None, rename_channels=None):
raw = mne.io.read_raw_edf(filename, preload=True)
if rename_channels is not None:
raw.rename_channels(rename_channels)
return cls._from_mne_raw(raw, drop_channels)
@classmethod
def _from_mne_raw(cls, raw, drop_channels=None):
contacts = []
seeg_idxs = []
if drop_channels is None:
drop_channels = []
for i, ch_name in enumerate(raw.ch_names):
if ch_name in raw.info['bads'] or ch_name in drop_channels:
continue
match = re.match("^([A-Za-z]+[']*)([0-9]+)$", ch_name)
if match is not None:
name, idx = match.groups()
contacts.append((name, int(idx)))
seeg_idxs.append(i)
return cls(contacts, raw.get_data()[seeg_idxs, :], raw.info['sfreq'])
def get_data_bipolar(self):
data_bipolar = np.zeros((len(self.bipolar), len(self.t)))
for i, (_, i1, i2) in enumerate(self.bipolar):
data_bipolar[i, :] = self.data[i1, :] - self.data[i2, :]
return data_bipolar
|
[
"numpy.fromfile",
"mne.io.read_raw_fif",
"re.match",
"os.path.splitext",
"os.path.isfile",
"numpy.linspace",
"mne.io.read_raw_edf"
] |
[((1067, 1136), 'numpy.linspace', 'np.linspace', (['(0)', '((nsamples - 1) * (1.0 / self.sampling_rate))', 'nsamples'], {}), '(0, (nsamples - 1) * (1.0 / self.sampling_rate), nsamples)\n', (1078, 1136), True, 'import numpy as np\n'), ((1725, 1749), 'os.path.isfile', 'os.path.isfile', (['bad_file'], {}), '(bad_file)\n', (1739, 1749), False, 'import os\n'), ((3022, 3056), 'numpy.fromfile', 'np.fromfile', (['data_file'], {'dtype': '"""f4"""'}), "(data_file, dtype='f4')\n", (3033, 3056), True, 'import numpy as np\n'), ((3611, 3640), 'mne.io.read_raw_fif', 'mne.io.read_raw_fif', (['filename'], {}), '(filename)\n', (3630, 3640), False, 'import mne\n'), ((3892, 3935), 'mne.io.read_raw_edf', 'mne.io.read_raw_edf', (['filename'], {'preload': '(True)'}), '(filename, preload=True)\n', (3911, 3935), False, 'import mne\n'), ((4432, 4478), 're.match', 're.match', (['"""^([A-Za-z]+[\']*)([0-9]+)$"""', 'ch_name'], {}), '("^([A-Za-z]+[\']*)([0-9]+)$", ch_name)\n', (4440, 4478), False, 'import re\n'), ((1485, 1511), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1501, 1511), False, 'import os\n'), ((2697, 2742), 're.match', 're.match', (['"""([A-Za-z]+[\']*)([0-9]+)"""', 'parts[0]'], {}), '("([A-Za-z]+[\']*)([0-9]+)", parts[0])\n', (2705, 2742), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 16 15:31:16 2016
@author: shaw
"""
import xlrd
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import numpy as np
from pylab import *
from itertools import product
from matplotlib.colors import LogNorm
#import gc
def config():
# gc.collect()
m = 2
n = 2
length = 230
wide = 200
cte = m / 2
num_l = length / m
num_w = wide / n
square = length * wide
pixel = m * n
j = int(square / pixel)
x1 = {1: 10, 2: 30, 3: 55, 4: 75, 5: 145, 6: 165, 7: 100, 8: 120, 9: 10, 10: 30,
11: 55, 12: 75, 13: 145, 14: 165, 15: 100, 16: 120, 17: 10, 18: 30, 19: 55, 20: 75,
21: 145, 22: 165, 23: 100, 24: 120, 25: 10, 26: 30, 27: 55, 28: 75, 29: 145, 30: 165,
31: 100, 32: 120, 33: 10, 34: 30, 35: 55, 36: 75, 37: 145, 38: 165, 39: 100, 40: 120,
41: 100, 42: 120, 43: 10, 44: 90, 45: 55, 46: 75, 47: 145, 48: 165, 49: 100, 50: 120,
51: 10, 52: 30, 53: 55, 54: 75, 55: 145, 56: 165, 57: 100, 58: 120, 59: 10, 60: 30,
61: 55, 62: 75, 63: 145, 64: 165, 65: 100, 66: 120, 67: 10, 68: 30, 69: 55, 70: 75,
71: 145, 72: 165, 73: 100, 74: 120, 75: 10, 76: 30, 77: 55, 78: 75, 79: 145, 80: 165,
97: 190, 99: 210, 100: 190, 101: 210, 102: 190, 103: 210, 104: 190, 105: 210, 106: 190, 107: 210,
108: 190, 109: 210, 110: 190, 112: 210, 113: 190, 114: 210, 115: 190, 116: 210, 117: 190, 118: 210}
y1 = {1: 190, 2: 190, 3: 190, 4: 190, 5: 90, 6: 90, 7: 190, 8: 190, 9: 170, 10: 170,
11: 170, 12: 170, 13: 70, 14: 70, 15: 170, 16: 170, 17: 150, 18: 150, 19: 150, 20: 150,
21: 50, 22: 50, 23: 150, 24: 150, 25: 130, 26: 130, 27: 130, 28: 130, 29:30, 30: 30,
31: 130, 32: 130, 33: 110, 34: 110, 35: 110, 36: 110, 37: 10, 38: 10, 39: 110, 40: 110,
41: 90, 42: 90, 43: 90, 44: 90, 45: 90, 46: 90, 47: 190, 48: 190, 49: 70, 50: 70,
51: 70, 52: 70, 53: 70, 54: 70, 55: 170, 56: 170, 57: 50, 58: 50, 59:50, 60: 50,
61: 50, 62: 50, 63: 150, 64: 150, 65: 30, 66: 30, 67: 30, 68: 30, 69: 30, 70: 30,
71: 130, 72: 130, 73: 10, 74: 10, 75: 10, 76: 10, 77: 10, 78: 10 , 79: 110, 80: 110,
97: 190, 99: 190, 100: 170, 101: 170, 102: 150, 103: 150, 104: 130, 105: 130, 106: 110, 107: 110,
108: 90, 109: 90, 110: 70, 112: 70, 113: 50, 114: 50, 115: 30, 116: 30, 117: 10, 118: 10}
return j, num_l, num_w, m, n, cte, x1, y1
def center_voxel(j,cte,num_l, num_w, m, n):
t = [[0 for col in range(2)] for row in range(j)]
count = 0
k = 0
l = 0
while(count < j):
tmp_y = cte + m * k
tmp_x = cte + n * l
if(l < num_l):
if(k < num_w):
t[count] = [tmp_x, tmp_y]
count += 1
k += 1
else:
k = 0
l += 1
v = np.array(t)
return v
def vector(path1, path2, x1, y1, v,j):
table1 = xlrd.open_workbook(path1)
table2 = xlrd.open_workbook(path2)
sh1 = table1.sheet_by_index(0)
sh2 = table2.sheet_by_index(0)
r1 = 1
r2 = 1
y = []
epc_index1 = 0
epc_index2 = 0
ante_index1 = 0
ante_index2 = 0
rssi1 = 0.0
rssi2 = 0.0
count = 0
num = 0
p = 10
a = []
while((r1+1 <= sh1.nrows-1)and(r2+1 <= sh2.nrows-1)):
epc_index1 = int(sh1.cell_value(r1, 0))
epc_index2 = int(sh2.cell_value(r2, 0))
epc_next1 = int(sh1.cell_value(r1+1, 0))
epc_next2 = int(sh2.cell_value(r2+1, 0))
ante_index1 = int(sh1.cell_value(r1, 1))
ante_index2 = int(sh2.cell_value(r2, 1))
ante_next1 = int(sh1.cell_value(r1+1, 1))
ante_next2 = int(sh2.cell_value(r2+1, 1))
if((epc_index1 == epc_index2) and (epc_index1 == epc_next1) and (epc_index2 == epc_next2)):
if(ante_index1 == ante_index2):
if(ante_index1 == ante_next1 and ante_index2 == ante_next2):
r1 = r1 + 1
r2 = r2 + 1
elif(ante_index1 == ante_next1 and ante_index2 != ante_next2):
r1 = r1 + 1
rssi1 = float(sh1.cell_value(r1, 6))
rssi2 = float(sh2.cell_value(r2, 6))
i = abs(rssi1 - rssi2)
y.append(i)
count = count + 1
for num in range(j):
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
elif(ante_index1 != ante_next1 and ante_index2 == ante_next2):
r2 = r2 + 1
rssi1 = float(sh1.cell_value(r1, 6))
rssi2 = float(sh2.cell_value(r2, 6))
i = abs(rssi1 - rssi2)
y.append(i)
count = count + 1
for num in range(j):
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
elif(ante_index1 != ante_next1 and ante_index2 != ante_next2):
num = 0
rssi1 = float(sh1.cell_value(r1, 6))
rssi2 = float(sh2.cell_value(r2, 6))
i = abs(rssi1 - rssi2)
y.append(i)
count = count + 1
for num in range(j):
# print y[epc_index1]
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
r1 = r1 + 1
r2 = r2 + 1
elif(ante_index1 > ante_index2):
if(ante_index1 != ante_next1 and ante_index2 != ante_next2):
rssi1 = float(sh1.cell_value(r1, 6))
rssi2 = -80
i = abs(rssi1 - rssi2)
y.append(i)
r2 = r2 +1
count = count + 1
for num in range(j):
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
else:
r2 = r2 + 1
elif(ante_index2 > ante_index1):
if(ante_index2 != ante_next2 and ante_index1 != ante_next1):
rssi1 = -80
rssi2 = float(sh2.cell_value(r2, 6))
i = abs(rssi1 - rssi2)
y.append(i)
r1 = r1 + 1
count = count + 1
for num in range(j):
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
else:
r1 = r1 + 1
elif((epc_index1 == epc_index2) and((epc_index1 != epc_next1) or(epc_index2 != epc_next2))):
if(ante_index1 == ante_index2):
rssi1 = float(sh1.cell_value(r1, 6))
rssi2 = float(sh2.cell_value(r2, 6))
i = abs(rssi1 - rssi2)
y.append(i)
count = count + 1
for num in range(j):
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
elif(ante_index1 > ante_index2):
rssi1 = float(sh1.cell_value(r1, 6))
rssi2 = -80
i = abs(rssi1 - rssi2)
y.append(i)
count = count + 1
for num in range(j):
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
elif(ante_index2 > ante_index1):
rssi1 = -80
rssi2 = float(sh2.cell_value(r2, 6))
i = abs(rssi1 - rssi2)
y.append(i)
count = count + 1
for num in range(j):
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
# elif()
r1 = r1 + 1
r2 = r2 + 1
elif(epc_index1 > epc_index2):
epc_before1 = int(sh1.cell_value(r1-1, 0))
epc_before2 = int(sh2.cell_value(r2-1, 0))
if(epc_before1 != epc_index2 and epc_index1 != epc_next2):
rssi1 = -80
rssi2 = float(sh2.cell_value(r2, 6))
i = abs(rssi1 - rssi2)
y.append(i)
r2 = r2 + 1
count = count + 1
for num in range(j):
d = ((x1[epc_index2]- v[num][0])**2+(y1[epc_index2] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
else:
r2 = r2 + 1
elif(epc_index2 > epc_index1):
epc_before1 = int(sh1.cell_value(r1-1, 0))
epc_before2 = int(sh2.cell_value(r2-1, 0))
if(epc_before2 != epc_index1 and epc_index2 != epc_next1):
rssi1 = float(sh1.cell_value(r1, 6))
rssi2 = -80
i = abs(rssi1 - rssi2)
y.append(i)
r1 = r1 + 1
count = count + 1
for num in range(j):
d = ((x1[epc_index1]- v[num][0])**2+(y1[epc_index1] - v[num][1])**2)**0.5
if(d < p):
a.append(1)
else:
a.append(0)
else:
r1 = r1 + 1
# print count
b = np.array(a)
w = b.reshape(count, j)
# print b.size
# print w
# z = np.dot(w.T, w)
# print z
# q = np.identity(j)
t= np.mat(y)
# l = 40
# u = l * np.dot(q.T, q)
# print u
# o = np.mat((z + u))
# print o
# e = np.dot(w.T, t.T)
# print e
h = np.dot(w.T , t.T)
x = h.getA()
# f = open('C:/Users/songkai/Desktop/record/new/x.txt', 'w')
# for n in range(len(x[0])):
# f.write(x[0][n])
# f.close()
# plt.hist2d(230, 200, bins=40,weight = x)
# plt.colorbar()
# plt.show()
return x
#def vector_x(w, y,j):
# q = np.identity(j)
# t= np.matrix(y)
# print t
# l = 40
# x = (w.T * w + l * q.T * q).I * w.T * t.T
# count_length = length / n
# count_wide = wide / m
# x = t.reshape([count_wide, count_length])
# return x
def draw(v,x):
plt.xlim(0,230)
plt.ylim(0,200)
ax = plt.gca()
# ax.xaxis.set_minor_locator(MultipleLocator(2))
# ax.yaxis.set_minor_locator(MultipleLocator(2))
ax.xaxis.set_major_locator(MultipleLocator(20))
ax.yaxis.set_major_locator(MultipleLocator(20))
# plt.grid()
# plt.show()
# plt.imshow(x, extent=[0,230,0,200])
# plt.imsave('pic.jpg', 'JPG')
# pp1=amap(lambda ra: [ra[0],ra[1]],product(arange(0,230,2),arange(0,200,2)))
scatter(v[:,0],v[:,1],c=x, edgecolor="none")
return 0
def main():
origin = 'C:/Users/shaw/Desktop/record/new/origin_2.4m.xlsx'
stand1 = 'C:/Users/shaw/Desktop/record/new/stand1_0.6m.xlsx'
path1 = origin
path2 = stand1
(j, num_l, num_w, m, n, cte, x1, y1) = config()
v = center_voxel(j,cte,num_l, num_w, m, n)
x = vector(path1, path2, x1, y1, v,j)
# x = vector_x(w, y,j)
# print x
draw(v, x)
# show()
if __name__ == '__main__':
main()
|
[
"numpy.mat",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.gca",
"xlrd.open_workbook",
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim"
] |
[((2998, 3009), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (3006, 3009), True, 'import numpy as np\n'), ((3082, 3107), 'xlrd.open_workbook', 'xlrd.open_workbook', (['path1'], {}), '(path1)\n', (3100, 3107), False, 'import xlrd\n'), ((3122, 3147), 'xlrd.open_workbook', 'xlrd.open_workbook', (['path2'], {}), '(path2)\n', (3140, 3147), False, 'import xlrd\n'), ((11222, 11233), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (11230, 11233), True, 'import numpy as np\n'), ((11369, 11378), 'numpy.mat', 'np.mat', (['y'], {}), '(y)\n', (11375, 11378), True, 'import numpy as np\n'), ((11525, 11541), 'numpy.dot', 'np.dot', (['w.T', 't.T'], {}), '(w.T, t.T)\n', (11531, 11541), True, 'import numpy as np\n'), ((12119, 12135), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(230)'], {}), '(0, 230)\n', (12127, 12135), True, 'import matplotlib.pyplot as plt\n'), ((12140, 12156), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(200)'], {}), '(0, 200)\n', (12148, 12156), True, 'import matplotlib.pyplot as plt\n'), ((12166, 12175), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12173, 12175), True, 'import matplotlib.pyplot as plt\n'), ((12314, 12333), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(20)'], {}), '(20)\n', (12329, 12333), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((12367, 12386), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(20)'], {}), '(20)\n', (12382, 12386), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n')]
|
from __future__ import print_function
import sys, random, json, os, tempfile
from collections import Counter
import numpy as np
INSIDE_BLENDER = True
try:
import bpy
from mathutils import Vector
except ImportError as e:
INSIDE_BLENDER = False
if INSIDE_BLENDER:
try:
import utils
except ImportError as e:
print("\nERROR")
print("Running render_images.py from Blender and cannot import utils.py.")
print("You may need to add a .pth file to the site-packages of Blender's")
print("bundled python with a command like this:\n")
print("echo $PWD >> $BLENDER/$VERSION/python/lib/python3.5/site-packages/clevr.pth")
print("\nWhere $BLENDER is the directory where Blender is installed, and")
print("$VERSION is your Blender version (such as 2.78).")
sys.exit(1)
def render_scene(args,
output_image='render.png',
output_scene='render_json',
output_blendfile=None,
objects=[],
**kwargs
):
# Load the main blendfile
bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)
# Load materials
utils.load_materials(args.material_dir)
# Set render arguments so we can get pixel coordinates later.
# We use functionality specific to the CYCLES renderer so BLENDER_RENDER
# cannot be used.
render_args = bpy.context.scene.render
render_args.engine = "CYCLES"
render_args.filepath = output_image
render_args.resolution_x = args.width
render_args.resolution_y = args.height
render_args.resolution_percentage = 100
render_args.tile_x = args.render_tile_size
render_args.tile_y = args.render_tile_size
if args.use_gpu == 1:
# Blender changed the API for enabling CUDA at some point
if bpy.app.version < (2, 78, 0):
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
bpy.context.user_preferences.system.compute_device = 'CUDA_0'
else:
cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
cycles_prefs.compute_device_type = 'CUDA'
# Some CYCLES-specific stuff
bpy.data.worlds['World'].cycles.sample_as_light = True
bpy.context.scene.cycles.blur_glossy = 2.0
bpy.context.scene.cycles.samples = args.render_num_samples
bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
if args.use_gpu == 1:
bpy.context.scene.cycles.device = 'GPU'
# This will give ground-truth information about the scene and its objects
scene_struct = {
'image_filename': os.path.basename(output_image),
'objects': [],
'directions': {},
}
scene_struct.update(kwargs)
if bpy.app.version < (2, 80, 0):
bpy.ops.mesh.primitive_plane_add(radius=5)
else:
bpy.ops.mesh.primitive_plane_add(size=5)
plane = bpy.context.object
def rand(L):
return 2.0 * L * (random.random() - 0.5)
# Add random jitter to camera position
if args.camera_jitter > 0:
for i in range(3):
bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)
# Figure out the left, up, and behind directions along the plane and record
# them in the scene structure
camera = bpy.data.objects['Camera']
plane_normal = plane.data.vertices[0].normal
if bpy.app.version < (2, 80, 0):
cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
else:
cam_behind = camera.matrix_world.to_quaternion() @ Vector((0, 0, -1))
cam_left = camera.matrix_world.to_quaternion() @ Vector((-1, 0, 0))
cam_up = camera.matrix_world.to_quaternion() @ Vector((0, 1, 0))
plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
plane_up = cam_up.project(plane_normal).normalized()
# Delete the plane; we only used it for normals anyway. The base scene file
# contains the actual ground plane.
utils.delete_object(plane)
# Save all six axis-aligned directions in the scene struct
scene_struct['directions']['behind'] = tuple(plane_behind)
scene_struct['directions']['front'] = tuple(-plane_behind)
scene_struct['directions']['left'] = tuple(plane_left)
scene_struct['directions']['right'] = tuple(-plane_left)
scene_struct['directions']['above'] = tuple(plane_up)
scene_struct['directions']['below'] = tuple(-plane_up)
# Add random jitter to lamp positions
if args.key_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
if args.back_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
if args.fill_light_jitter > 0:
for i in range(3):
bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)
# Now make some random objects
blender_objects = add_objects(args, scene_struct, camera, objects)
# Render the scene and dump the scene data structure
scene_struct['objects'] = objects
scene_struct['relationships'] = compute_all_relationships(scene_struct)
while True:
try:
bpy.ops.render.render(write_still=True)
break
except Exception as e:
print(e)
with open(output_scene, 'w') as f:
json.dump(scene_struct, f, indent=2)
if output_blendfile is not None:
bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
def add_objects(args, scene_struct, camera, objects):
"""
Add objects to the current blender scene
"""
blender_objects = []
for obj in objects:
# Actually add the object to the scene
utils.add_object(args.shape_dir,
obj["shape"],
obj["size"],
obj["location"],
theta=obj["rotation"])
bobj = bpy.context.object
blender_objects.append(bobj)
utils.add_material(obj["material"], Color=obj["color"])
obj["pixel_coords"] = utils.get_camera_coords(camera, bobj.location)
loc = np.array(bobj.location)
dim = np.array(bobj.dimensions)
half = dim / 2
corners = []
corners.append(loc + half * [1,1,1])
corners.append(loc + half * [1,1,-1])
corners.append(loc + half * [1,-1,1])
corners.append(loc + half * [1,-1,-1])
corners.append(loc + half * [-1,1,1])
corners.append(loc + half * [-1,1,-1])
corners.append(loc + half * [-1,-1,1])
corners.append(loc + half * [-1,-1,-1])
import mathutils
corners_camera_coords = np.array([ utils.get_camera_coords(camera, mathutils.Vector(tuple(corner)))
for corner in corners ])
xmax = np.amax(corners_camera_coords[:,0])
ymax = np.amax(corners_camera_coords[:,1])
xmin = np.amin(corners_camera_coords[:,0])
ymin = np.amin(corners_camera_coords[:,1])
obj["bbox"] = (float(xmin), float(ymin), float(xmax), float(ymax))
return blender_objects
def compute_all_relationships(scene_struct, eps=0.2):
"""
Computes relationships between all pairs of objects in the scene.
Returns a dictionary mapping string relationship names to lists of lists of
integers, where output[rel][i] gives a list of object indices that have the
relationship rel with object i. For example if j is in output['left'][i] then
object j is left of object i.
"""
all_relationships = {}
for name, direction_vec in scene_struct['directions'].items():
if name == 'above' or name == 'below': continue
all_relationships[name] = []
for i, obj1 in enumerate(scene_struct['objects']):
coords1 = obj1['location']
related = set()
for j, obj2 in enumerate(scene_struct['objects']):
if obj1 == obj2: continue
coords2 = obj2['location']
diff = [coords2[k] - coords1[k] for k in [0, 1, 2]]
dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2])
if dot > eps:
related.add(j)
all_relationships[name].append(sorted(list(related)))
return all_relationships
def check_visibility(blender_objects, min_pixels_per_object):
"""
Check whether all objects in the scene have some minimum number of visible
pixels; to accomplish this we assign random (but distinct) colors to all
objects, and render using no lighting or shading or antialiasing; this
ensures that each object is just a solid uniform color. We can then count
the number of pixels of each color in the output image to check the visibility
of each object.
Returns True if all objects are visible and False otherwise.
"""
f, path = tempfile.mkstemp(suffix='.png')
object_colors = render_shadeless(blender_objects, path=path)
img = bpy.data.images.load(path)
p = list(img.pixels)
color_count = Counter((p[i], p[i+1], p[i+2], p[i+3])
for i in range(0, len(p), 4))
os.remove(path)
if len(color_count) != len(blender_objects) + 1:
return False
for _, count in color_count.most_common():
if count < min_pixels_per_object:
return False
return True
def render_shadeless(blender_objects, path='flat.png'):
"""
Render a version of the scene with shading disabled and unique materials
assigned to all objects, and return a set of all colors that should be in the
rendered image. The image itself is written to path. This is used to ensure
that all objects will be visible in the final rendered scene.
"""
render_args = bpy.context.scene.render
# Cache the render args we are about to clobber
old_filepath = render_args.filepath
old_engine = render_args.engine
old_use_antialiasing = render_args.use_antialiasing
# Override some render settings to have flat shading
render_args.filepath = path
render_args.engine = 'BLENDER_RENDER'
render_args.use_antialiasing = False
# Move the lights and ground to layer 2 so they don't render
utils.set_layer(bpy.data.objects['Lamp_Key'], 2)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 2)
utils.set_layer(bpy.data.objects['Lamp_Back'], 2)
utils.set_layer(bpy.data.objects['Ground'], 2)
# Add random shadeless materials to all objects
object_colors = set()
old_materials = []
for i, obj in enumerate(blender_objects):
old_materials.append(obj.data.materials[0])
bpy.ops.material.new()
mat = bpy.data.materials['Material']
mat.name = 'Material_%d' % i
while True:
r, g, b = [random.random() for _ in range(3)]
if (r, g, b) not in object_colors: break
object_colors.add((r, g, b))
mat.diffuse_color = [r, g, b]
mat.use_shadeless = True
obj.data.materials[0] = mat
# Render the scene
bpy.ops.render.render(write_still=True)
# Undo the above; first restore the materials to objects
for mat, obj in zip(old_materials, blender_objects):
obj.data.materials[0] = mat
# Move the lights and ground back to layer 0
utils.set_layer(bpy.data.objects['Lamp_Key'], 0)
utils.set_layer(bpy.data.objects['Lamp_Fill'], 0)
utils.set_layer(bpy.data.objects['Lamp_Back'], 0)
utils.set_layer(bpy.data.objects['Ground'], 0)
# Set the render settings back to what they were
render_args.filepath = old_filepath
render_args.engine = old_engine
render_args.use_antialiasing = old_use_antialiasing
return object_colors
|
[
"utils.add_material",
"utils.get_camera_coords",
"numpy.array",
"sys.exit",
"bpy.data.images.load",
"utils.set_layer",
"os.remove",
"mathutils.Vector",
"utils.delete_object",
"bpy.ops.wm.open_mainfile",
"utils.load_materials",
"bpy.ops.mesh.primitive_plane_add",
"utils.add_object",
"bpy.ops.wm.save_as_mainfile",
"bpy.ops.material.new",
"numpy.amin",
"tempfile.mkstemp",
"bpy.ops.render.render",
"os.path.basename",
"random.random",
"numpy.amax",
"json.dump"
] |
[((982, 1042), 'bpy.ops.wm.open_mainfile', 'bpy.ops.wm.open_mainfile', ([], {'filepath': 'args.base_scene_blendfile'}), '(filepath=args.base_scene_blendfile)\n', (1006, 1042), False, 'import bpy\n'), ((1065, 1104), 'utils.load_materials', 'utils.load_materials', (['args.material_dir'], {}), '(args.material_dir)\n', (1085, 1104), False, 'import utils\n'), ((4023, 4049), 'utils.delete_object', 'utils.delete_object', (['plane'], {}), '(plane)\n', (4042, 4049), False, 'import utils\n'), ((8613, 8644), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".png"""'}), "(suffix='.png')\n", (8629, 8644), False, 'import sys, random, json, os, tempfile\n'), ((8716, 8742), 'bpy.data.images.load', 'bpy.data.images.load', (['path'], {}), '(path)\n', (8736, 8742), False, 'import bpy\n'), ((8877, 8892), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (8886, 8892), False, 'import sys, random, json, os, tempfile\n'), ((9893, 9941), 'utils.set_layer', 'utils.set_layer', (["bpy.data.objects['Lamp_Key']", '(2)'], {}), "(bpy.data.objects['Lamp_Key'], 2)\n", (9908, 9941), False, 'import utils\n'), ((9944, 9993), 'utils.set_layer', 'utils.set_layer', (["bpy.data.objects['Lamp_Fill']", '(2)'], {}), "(bpy.data.objects['Lamp_Fill'], 2)\n", (9959, 9993), False, 'import utils\n'), ((9996, 10045), 'utils.set_layer', 'utils.set_layer', (["bpy.data.objects['Lamp_Back']", '(2)'], {}), "(bpy.data.objects['Lamp_Back'], 2)\n", (10011, 10045), False, 'import utils\n'), ((10048, 10094), 'utils.set_layer', 'utils.set_layer', (["bpy.data.objects['Ground']", '(2)'], {}), "(bpy.data.objects['Ground'], 2)\n", (10063, 10094), False, 'import utils\n'), ((10651, 10690), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'write_still': '(True)'}), '(write_still=True)\n', (10672, 10690), False, 'import bpy\n'), ((10888, 10936), 'utils.set_layer', 'utils.set_layer', (["bpy.data.objects['Lamp_Key']", '(0)'], {}), "(bpy.data.objects['Lamp_Key'], 0)\n", (10903, 10936), False, 'import utils\n'), ((10939, 10988), 'utils.set_layer', 'utils.set_layer', (["bpy.data.objects['Lamp_Fill']", '(0)'], {}), "(bpy.data.objects['Lamp_Fill'], 0)\n", (10954, 10988), False, 'import utils\n'), ((10991, 11040), 'utils.set_layer', 'utils.set_layer', (["bpy.data.objects['Lamp_Back']", '(0)'], {}), "(bpy.data.objects['Lamp_Back'], 0)\n", (11006, 11040), False, 'import utils\n'), ((11043, 11089), 'utils.set_layer', 'utils.set_layer', (["bpy.data.objects['Ground']", '(0)'], {}), "(bpy.data.objects['Ground'], 0)\n", (11058, 11089), False, 'import utils\n'), ((2525, 2555), 'os.path.basename', 'os.path.basename', (['output_image'], {}), '(output_image)\n', (2541, 2555), False, 'import sys, random, json, os, tempfile\n'), ((2676, 2718), 'bpy.ops.mesh.primitive_plane_add', 'bpy.ops.mesh.primitive_plane_add', ([], {'radius': '(5)'}), '(radius=5)\n', (2708, 2718), False, 'import bpy\n'), ((2731, 2771), 'bpy.ops.mesh.primitive_plane_add', 'bpy.ops.mesh.primitive_plane_add', ([], {'size': '(5)'}), '(size=5)\n', (2763, 2771), False, 'import bpy\n'), ((5343, 5379), 'json.dump', 'json.dump', (['scene_struct', 'f'], {'indent': '(2)'}), '(scene_struct, f, indent=2)\n', (5352, 5379), False, 'import sys, random, json, os, tempfile\n'), ((5420, 5474), 'bpy.ops.wm.save_as_mainfile', 'bpy.ops.wm.save_as_mainfile', ([], {'filepath': 'output_blendfile'}), '(filepath=output_blendfile)\n', (5447, 5474), False, 'import bpy\n'), ((5683, 5786), 'utils.add_object', 'utils.add_object', (['args.shape_dir', "obj['shape']", "obj['size']", "obj['location']"], {'theta': "obj['rotation']"}), "(args.shape_dir, obj['shape'], obj['size'], obj['location'],\n theta=obj['rotation'])\n", (5699, 5786), False, 'import utils\n'), ((5934, 5989), 'utils.add_material', 'utils.add_material', (["obj['material']"], {'Color': "obj['color']"}), "(obj['material'], Color=obj['color'])\n", (5952, 5989), False, 'import utils\n'), ((6016, 6062), 'utils.get_camera_coords', 'utils.get_camera_coords', (['camera', 'bobj.location'], {}), '(camera, bobj.location)\n', (6039, 6062), False, 'import utils\n'), ((6074, 6097), 'numpy.array', 'np.array', (['bobj.location'], {}), '(bobj.location)\n', (6082, 6097), True, 'import numpy as np\n'), ((6108, 6133), 'numpy.array', 'np.array', (['bobj.dimensions'], {}), '(bobj.dimensions)\n', (6116, 6133), True, 'import numpy as np\n'), ((6711, 6747), 'numpy.amax', 'np.amax', (['corners_camera_coords[:, 0]'], {}), '(corners_camera_coords[:, 0])\n', (6718, 6747), True, 'import numpy as np\n'), ((6758, 6794), 'numpy.amax', 'np.amax', (['corners_camera_coords[:, 1]'], {}), '(corners_camera_coords[:, 1])\n', (6765, 6794), True, 'import numpy as np\n'), ((6805, 6841), 'numpy.amin', 'np.amin', (['corners_camera_coords[:, 0]'], {}), '(corners_camera_coords[:, 0])\n', (6812, 6841), True, 'import numpy as np\n'), ((6852, 6888), 'numpy.amin', 'np.amin', (['corners_camera_coords[:, 1]'], {}), '(corners_camera_coords[:, 1])\n', (6859, 6888), True, 'import numpy as np\n'), ((10287, 10309), 'bpy.ops.material.new', 'bpy.ops.material.new', ([], {}), '()\n', (10307, 10309), False, 'import bpy\n'), ((790, 801), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (798, 801), False, 'import sys, random, json, os, tempfile\n'), ((3316, 3334), 'mathutils.Vector', 'Vector', (['(0, 0, -1)'], {}), '((0, 0, -1))\n', (3322, 3334), False, 'from mathutils import Vector\n'), ((3388, 3406), 'mathutils.Vector', 'Vector', (['(-1, 0, 0)'], {}), '((-1, 0, 0))\n', (3394, 3406), False, 'from mathutils import Vector\n'), ((3458, 3475), 'mathutils.Vector', 'Vector', (['(0, 1, 0)'], {}), '((0, 1, 0))\n', (3464, 3475), False, 'from mathutils import Vector\n'), ((3539, 3557), 'mathutils.Vector', 'Vector', (['(0, 0, -1)'], {}), '((0, 0, -1))\n', (3545, 3557), False, 'from mathutils import Vector\n'), ((3611, 3629), 'mathutils.Vector', 'Vector', (['(-1, 0, 0)'], {}), '((-1, 0, 0))\n', (3617, 3629), False, 'from mathutils import Vector\n'), ((3681, 3698), 'mathutils.Vector', 'Vector', (['(0, 1, 0)'], {}), '((0, 1, 0))\n', (3687, 3698), False, 'from mathutils import Vector\n'), ((5207, 5246), 'bpy.ops.render.render', 'bpy.ops.render.render', ([], {'write_still': '(True)'}), '(write_still=True)\n', (5228, 5246), False, 'import bpy\n'), ((2840, 2855), 'random.random', 'random.random', ([], {}), '()\n', (2853, 2855), False, 'import sys, random, json, os, tempfile\n'), ((10417, 10432), 'random.random', 'random.random', ([], {}), '()\n', (10430, 10432), False, 'import sys, random, json, os, tempfile\n')]
|
""" Renders the animation into a list of frames
"""
__all__ = ['OpenCvRenderer', 'FileRenderer']
from dataclasses import dataclass
import cv2
import imageio as iio
import numpy as np
from .animation import Frame, Animation
ESC = 27
@dataclass
class Options:
brightness: int = 100
cutoff: int = 0
@dataclass
class RenderedFrame:
buffer: np.ndarray
duration: int
def __init__(self, frame: Frame, options: Options) -> None:
buffer = np.array(frame.snapshot, np.int32)[:, :, ::-1]
if 0 < options.cutoff < 256:
buffer[buffer < options.cutoff] = 0
np.clip(buffer, 0, 255, out=buffer)
if 0 <= options.brightness < 256:
buffer *= options.brightness
buffer >>= 8
self.buffer = buffer.astype(np.uint8)
self.duration = frame.duration
def get_pixels(self, scale: int = 1) -> np.ndarray:
if scale < 0:
return self.buffer
h, w, _ = self.buffer.shape
return cv2.resize(self.buffer, (w * scale, h * scale), interpolation=cv2.INTER_NEAREST)
class Renderer(list):
def __init__(self, animation: Animation, *, brightness: int = 256, cutoff: int = 0) -> None:
self.loop_count = animation.loop_count
options = Options(brightness, cutoff)
super().__init__(RenderedFrame(frame, options) for frame in animation)
class OpenCvRenderer(Renderer):
def show(self, scale: int = 8, title : str = 'imagiCharm Preview'):
cv2.namedWindow(title)
def is_window_visible():
return cv2.getWindowProperty(title, cv2.WND_PROP_VISIBLE) >= 1
stop = False
repeats = 0
while 1:
for frame in self:
cv2.imshow(title, frame.get_pixels(scale))
key = cv2.waitKey(frame.duration)
if key == ESC or not is_window_visible():
stop = True
break
if stop:
break
if self.loop_count == 0:
continue
repeats += 1
if repeats >= self.loop_count:
break
if is_window_visible():
cv2.destroyWindow(title)
class FileRenderer(Renderer):
def save(self, path, scale: int = 8):
lc_filename = path.lower()
if lc_filename.endswith('.png'):
if len(self) == 1:
self.save_first_frame(path, scale)
else:
self.save_each_frame(path, scale)
elif lc_filename.endswith('.gif'):
self.save_animated_gif(path, scale)
else:
raise ValueError('Unknown image format, please save to PNG or GIF')
def save_each_frame(self, path: str, scale: int):
for i, frame in enumerate(self):
cv2.imwrite(f'{path[:-4]}.{i:04d}.png', self[i].get_pixels(scale))
def save_first_frame(self, path: str, scale: int):
cv2.imwrite(path, self[0].get_pixels(scale))
def save_animated_gif(self, path: str, scale: int):
frames = [frame.get_pixels(scale)[:, :, ::-1] for frame in self]
durations = [0.001 * frame.duration for frame in self]
# https://buildmedia.readthedocs.org/media/pdf/imageio/stable/imageio.pdf
iio.mimsave(path, frames, duration=durations, loop=self.loop_count)
|
[
"numpy.clip",
"cv2.destroyWindow",
"numpy.array",
"cv2.getWindowProperty",
"imageio.mimsave",
"cv2.resize",
"cv2.waitKey",
"cv2.namedWindow"
] |
[((609, 644), 'numpy.clip', 'np.clip', (['buffer', '(0)', '(255)'], {'out': 'buffer'}), '(buffer, 0, 255, out=buffer)\n', (616, 644), True, 'import numpy as np\n'), ((1002, 1087), 'cv2.resize', 'cv2.resize', (['self.buffer', '(w * scale, h * scale)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(self.buffer, (w * scale, h * scale), interpolation=cv2.INTER_NEAREST\n )\n', (1012, 1087), False, 'import cv2\n'), ((1492, 1514), 'cv2.namedWindow', 'cv2.namedWindow', (['title'], {}), '(title)\n', (1507, 1514), False, 'import cv2\n'), ((3264, 3331), 'imageio.mimsave', 'iio.mimsave', (['path', 'frames'], {'duration': 'durations', 'loop': 'self.loop_count'}), '(path, frames, duration=durations, loop=self.loop_count)\n', (3275, 3331), True, 'import imageio as iio\n'), ((467, 501), 'numpy.array', 'np.array', (['frame.snapshot', 'np.int32'], {}), '(frame.snapshot, np.int32)\n', (475, 501), True, 'import numpy as np\n'), ((2185, 2209), 'cv2.destroyWindow', 'cv2.destroyWindow', (['title'], {}), '(title)\n', (2202, 2209), False, 'import cv2\n'), ((1568, 1618), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['title', 'cv2.WND_PROP_VISIBLE'], {}), '(title, cv2.WND_PROP_VISIBLE)\n', (1589, 1618), False, 'import cv2\n'), ((1798, 1825), 'cv2.waitKey', 'cv2.waitKey', (['frame.duration'], {}), '(frame.duration)\n', (1809, 1825), False, 'import cv2\n')]
|
import os
import random
import numpy as np
import cv2
from lxml import etree
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def object_random(objects):
"""
random choice the object
:param objects: ['object1','object2',...]
:return: 'object3'
"""
return random.choice(objects)
def contrast_img(img1, c, b):
'''
:param img1: original image
:param c: > 1 brighter, < 1 darker
:param b: scalar added to each sum
:return: processed image
'''
rows, cols, chunnel = img1.shape
blank = np.zeros([rows, cols, chunnel], img1.dtype) # np.zeros(img1.shape, dtype=uint8)
dst = cv2.addWeighted(img1, c, blank, 1-c, b)
#cv2.imshow("process", dst)
return dst
def rotateMask(mask,angle):
h,w,c = mask.shape
max_h = np.max(mask.shape)+100
mask_ = np.zeros((max_h,max_h,c))
h_, w_ = int(abs(max_h-h)/2), int(abs(max_h-w)/2)
mask_[h_:(h_+h),w_:(w_+w),:] = mask
M = cv2.getRotationMatrix2D((max_h / 2, max_h / 2), angle, 1)
mask_rot = cv2.warpAffine(mask_, M, mask_.shape[::-1][1:])
mask = get_roi(mask_rot)
mask = mask.astype(np.uint8)
return mask
def get_roi(img):
'''
get rotation image
:param img:
:return:
'''
y_index, x_index = np.where((img != [0, 0, 0]).all(axis=2))
y_min, y_max = np.min(y_index), np.max(y_index)
x_min, x_max = np.min(x_index), np.max(x_index)
img_roi = img[y_min:y_max, x_min:x_max, :]
return img_roi
def occlusion_random():
"""
random decide whether to occlude
:return:
"""
p=random.random()
if p>0.5:
return True
else:
return False
def point_random(p_left_up, p_right_bottom):
'''
:param p_left_up: (xmin,ymin)
:param p_right_bottom: (xmax,ymax)
:return: x,y is not normal
'''
if p_left_up[0]>=p_right_bottom[0]:
y = p_left_up[0]
else:
y = random.randint(p_left_up[0], p_right_bottom[0])
if p_left_up[1]>=p_right_bottom[1]:
x = p_left_up[1]
else:
x = random.randint(p_left_up[1], p_right_bottom[1])
return [x,y]
def img_overlay(image1,image2,point,mask,occlusion):
"""
add image2 to image1 at (point[0],point[1]) with class point[2]
:param image1: background image,(height,width,3)
:param image2: sliding image adding to background image,(height,width,3)
:param point: point[x,y,class,i] indicate where to add and the class of image2
:param mask: creat the mask image with class value, (height,width,2),0 is object class, 1 is num of object
:param occlusion: decide whether the sliding image is occluded by background image, bool value
:return: added image,(height,width,3), and the mask is changed
"""
img1=image1.copy()
img2=image2
height,width,rgb=img1.shape
height_r,width_r,rgb_r=img2.shape
# x is height, y is width, but generally x is width, y is height
x=point[0]
y=point[1]
object=point[2]
# print '...',point[3]
if x+height_r>height or y+width_r>width:
return img1
for i in range(height_r):
for j in range(width_r):
if img2[i,j,0]<5 and img2[i,j,1]<5 and img2[i,j,2]<5:
img1[x+i,y+j,:]=img1[x+i,y+j,:]
else:
if mask[x+i,y+j,0]!=0:
img1[x+i,y+j,:]= img1[x+i,y+j,:] if occlusion else img2[i,j,:]
mask[x + i, y + j, 0] =mask[x + i, y + j, 0] if occlusion else object
mask[x + i, y + j, 1] = mask[x + i, y + j, 1] if occlusion else point[3]
else:
img1[x + i, y + j, :] = img2[i, j, :]
mask[x + i, y + j, 0] = object
mask[x + i, y + j, 1] = point[3]
return img1
def occlusion_ratio(mask,image2,point):
"""
compute the occlusion ration based on image1 and image2
:param mask: mask of synthetic image with lots of objects,(height,width,2)
:param image2: sliding image,(height,width,3)
:param point: [x,y,class,i]
:return:
"""
height, width, rgb = mask.shape
height_r, width_r, rgb_r = image2.shape
x=point[0]
y=point[1]
object=point[2]
if x+height_r>height or y+width_r>width:
return 1
total=0
occlusion=0
for i in range(height_r):
for j in range(width_r):
if image2[i,j,0]>4 or image2[i,j,1]>4 or image2[i,j,2]>4:
total=total+1
if mask[x + i, y + j, 0] != object or mask[x+i,y+j,1]!=point[3]:
occlusion = occlusion + 1
# print '...occlusion,total',occlusion,total
return float(occlusion)/float(total)
def pascal_xml(img_syn,mask,imgs_added,objects_added,points,ratio,path,name):
"""
write synthetic images to xml files like Pascal VOC2007
:param img_syn:
:param mask:
:param imgs_added:
:param objects_added:
:param points: [num][x,y,class,i]
:param ratio:
:param path: '/home/robot/Downloads/segmentation/dataset/data_sr300/VOCdevkit'
:param name: '000000'
:return:
"""
annotation_path=os.path.join(path,'VOC2007','Annotations',name+'.xml')
img_path=os.path.join(path,'VOC2007','JPEGImages',name+'.jpg')
if not os.path.exists(os.path.join(path,'VOC2007','JPEGImages')):
os.makedirs(os.path.join(path,'VOC2007','JPEGImages'))
if not os.path.exists(os.path.join(path,'VOC2007','Annotations')):
os.makedirs(os.path.join(path,'VOC2007','Annotations'))
cv2.imwrite(img_path,img_syn)
annotation=etree.Element("annotation")
etree.SubElement(annotation, "folder").text = "VOC2007"
etree.SubElement(annotation, "filename").text = name+'.jpg'
source = etree.SubElement(annotation, "source")
etree.SubElement(source, "database").text = "The VOC2007 Database"
etree.SubElement(source, "annotation").text = "PASCAL VOC2007"
etree.SubElement(source, "image").text = "flickr"
etree.SubElement(source, "flickrid").text = " "
owner = etree.SubElement(annotation, "owner")
etree.SubElement(owner, "flickrid").text = 'sjtu'
etree.SubElement(owner, "name").text = '<NAME>'
size = etree.SubElement(annotation, "size")
etree.SubElement(size, "width").text = '640'
etree.SubElement(size, "height").text = '480'
etree.SubElement(size, "depth").text = '3'
etree.SubElement(annotation, "segmented").text = '0'
for i,img in enumerate(imgs_added):
point=points[i]
# print '....',i,point
height,width,rgb=img.shape
xmin=point[1]
ymin=point[0]
xmax=point[1]+width
ymax=point[0]+height
ratio_object=occlusion_ratio(mask,img,point) # 1 is occlusion totally.
if ratio_object<1 and ratio_object>ratio:
key_object = etree.SubElement(annotation, "object")
etree.SubElement(key_object, "name").text = objects_added[i]
etree.SubElement(key_object, "difficult").text = '1'
etree.SubElement(key_object, "occlusion").text = str(ratio_object)
bndbox = etree.SubElement(key_object, "bndbox")
etree.SubElement(bndbox, "xmin").text = str(xmin)
etree.SubElement(bndbox, "ymin").text = str(ymin)
etree.SubElement(bndbox, "xmax").text = str(xmax)
etree.SubElement(bndbox, "ymax").text = str(ymax)
elif ratio_object<=ratio:
key_object = etree.SubElement(annotation, "object")
etree.SubElement(key_object, "name").text = objects_added[i]
etree.SubElement(key_object, "difficult").text = '0'
etree.SubElement(key_object, "occlusion").text = str(ratio_object)
bndbox = etree.SubElement(key_object, "bndbox")
etree.SubElement(bndbox, "xmin").text = str(xmin)
etree.SubElement(bndbox, "ymin").text = str(ymin)
etree.SubElement(bndbox, "xmax").text = str(xmax)
etree.SubElement(bndbox, "ymax").text = str(ymax)
doc = etree.ElementTree(annotation)
doc.write(open(annotation_path, "w"), pretty_print=True)
|
[
"lxml.etree.Element",
"cv2.imwrite",
"os.path.exists",
"random.choice",
"cv2.warpAffine",
"lxml.etree.SubElement",
"lxml.etree.ElementTree",
"os.makedirs",
"os.path.join",
"numpy.max",
"cv2.addWeighted",
"numpy.zeros",
"numpy.min",
"cv2.getRotationMatrix2D",
"random.random",
"random.randint"
] |
[((309, 331), 'random.choice', 'random.choice', (['objects'], {}), '(objects)\n', (322, 331), False, 'import random\n'), ((572, 615), 'numpy.zeros', 'np.zeros', (['[rows, cols, chunnel]', 'img1.dtype'], {}), '([rows, cols, chunnel], img1.dtype)\n', (580, 615), True, 'import numpy as np\n'), ((663, 704), 'cv2.addWeighted', 'cv2.addWeighted', (['img1', 'c', 'blank', '(1 - c)', 'b'], {}), '(img1, c, blank, 1 - c, b)\n', (678, 704), False, 'import cv2\n'), ((850, 877), 'numpy.zeros', 'np.zeros', (['(max_h, max_h, c)'], {}), '((max_h, max_h, c))\n', (858, 877), True, 'import numpy as np\n'), ((980, 1037), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(max_h / 2, max_h / 2)', 'angle', '(1)'], {}), '((max_h / 2, max_h / 2), angle, 1)\n', (1003, 1037), False, 'import cv2\n'), ((1053, 1100), 'cv2.warpAffine', 'cv2.warpAffine', (['mask_', 'M', 'mask_.shape[::-1][1:]'], {}), '(mask_, M, mask_.shape[::-1][1:])\n', (1067, 1100), False, 'import cv2\n'), ((1598, 1613), 'random.random', 'random.random', ([], {}), '()\n', (1611, 1613), False, 'import random\n'), ((5113, 5172), 'os.path.join', 'os.path.join', (['path', '"""VOC2007"""', '"""Annotations"""', "(name + '.xml')"], {}), "(path, 'VOC2007', 'Annotations', name + '.xml')\n", (5125, 5172), False, 'import os\n'), ((5181, 5239), 'os.path.join', 'os.path.join', (['path', '"""VOC2007"""', '"""JPEGImages"""', "(name + '.jpg')"], {}), "(path, 'VOC2007', 'JPEGImages', name + '.jpg')\n", (5193, 5239), False, 'import os\n'), ((5509, 5539), 'cv2.imwrite', 'cv2.imwrite', (['img_path', 'img_syn'], {}), '(img_path, img_syn)\n', (5520, 5539), False, 'import cv2\n'), ((5554, 5581), 'lxml.etree.Element', 'etree.Element', (['"""annotation"""'], {}), "('annotation')\n", (5567, 5581), False, 'from lxml import etree\n'), ((5719, 5757), 'lxml.etree.SubElement', 'etree.SubElement', (['annotation', '"""source"""'], {}), "(annotation, 'source')\n", (5735, 5757), False, 'from lxml import etree\n'), ((6014, 6051), 'lxml.etree.SubElement', 'etree.SubElement', (['annotation', '"""owner"""'], {}), "(annotation, 'owner')\n", (6030, 6051), False, 'from lxml import etree\n'), ((6169, 6205), 'lxml.etree.SubElement', 'etree.SubElement', (['annotation', '"""size"""'], {}), "(annotation, 'size')\n", (6185, 6205), False, 'from lxml import etree\n'), ((7993, 8022), 'lxml.etree.ElementTree', 'etree.ElementTree', (['annotation'], {}), '(annotation)\n', (8010, 8022), False, 'from lxml import etree\n'), ((106, 126), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (120, 126), False, 'import os\n'), ((136, 153), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (147, 153), False, 'import os\n'), ((815, 833), 'numpy.max', 'np.max', (['mask.shape'], {}), '(mask.shape)\n', (821, 833), True, 'import numpy as np\n'), ((1350, 1365), 'numpy.min', 'np.min', (['y_index'], {}), '(y_index)\n', (1356, 1365), True, 'import numpy as np\n'), ((1367, 1382), 'numpy.max', 'np.max', (['y_index'], {}), '(y_index)\n', (1373, 1382), True, 'import numpy as np\n'), ((1402, 1417), 'numpy.min', 'np.min', (['x_index'], {}), '(x_index)\n', (1408, 1417), True, 'import numpy as np\n'), ((1419, 1434), 'numpy.max', 'np.max', (['x_index'], {}), '(x_index)\n', (1425, 1434), True, 'import numpy as np\n'), ((1934, 1981), 'random.randint', 'random.randint', (['p_left_up[0]', 'p_right_bottom[0]'], {}), '(p_left_up[0], p_right_bottom[0])\n', (1948, 1981), False, 'import random\n'), ((2069, 2116), 'random.randint', 'random.randint', (['p_left_up[1]', 'p_right_bottom[1]'], {}), '(p_left_up[1], p_right_bottom[1])\n', (2083, 2116), False, 'import random\n'), ((5586, 5624), 'lxml.etree.SubElement', 'etree.SubElement', (['annotation', '"""folder"""'], {}), "(annotation, 'folder')\n", (5602, 5624), False, 'from lxml import etree\n'), ((5646, 5686), 'lxml.etree.SubElement', 'etree.SubElement', (['annotation', '"""filename"""'], {}), "(annotation, 'filename')\n", (5662, 5686), False, 'from lxml import etree\n'), ((5762, 5798), 'lxml.etree.SubElement', 'etree.SubElement', (['source', '"""database"""'], {}), "(source, 'database')\n", (5778, 5798), False, 'from lxml import etree\n'), ((5833, 5871), 'lxml.etree.SubElement', 'etree.SubElement', (['source', '"""annotation"""'], {}), "(source, 'annotation')\n", (5849, 5871), False, 'from lxml import etree\n'), ((5900, 5933), 'lxml.etree.SubElement', 'etree.SubElement', (['source', '"""image"""'], {}), "(source, 'image')\n", (5916, 5933), False, 'from lxml import etree\n'), ((5954, 5990), 'lxml.etree.SubElement', 'etree.SubElement', (['source', '"""flickrid"""'], {}), "(source, 'flickrid')\n", (5970, 5990), False, 'from lxml import etree\n'), ((6056, 6091), 'lxml.etree.SubElement', 'etree.SubElement', (['owner', '"""flickrid"""'], {}), "(owner, 'flickrid')\n", (6072, 6091), False, 'from lxml import etree\n'), ((6110, 6141), 'lxml.etree.SubElement', 'etree.SubElement', (['owner', '"""name"""'], {}), "(owner, 'name')\n", (6126, 6141), False, 'from lxml import etree\n'), ((6210, 6241), 'lxml.etree.SubElement', 'etree.SubElement', (['size', '"""width"""'], {}), "(size, 'width')\n", (6226, 6241), False, 'from lxml import etree\n'), ((6259, 6291), 'lxml.etree.SubElement', 'etree.SubElement', (['size', '"""height"""'], {}), "(size, 'height')\n", (6275, 6291), False, 'from lxml import etree\n'), ((6309, 6340), 'lxml.etree.SubElement', 'etree.SubElement', (['size', '"""depth"""'], {}), "(size, 'depth')\n", (6325, 6340), False, 'from lxml import etree\n'), ((6356, 6397), 'lxml.etree.SubElement', 'etree.SubElement', (['annotation', '"""segmented"""'], {}), "(annotation, 'segmented')\n", (6372, 6397), False, 'from lxml import etree\n'), ((5262, 5305), 'os.path.join', 'os.path.join', (['path', '"""VOC2007"""', '"""JPEGImages"""'], {}), "(path, 'VOC2007', 'JPEGImages')\n", (5274, 5305), False, 'import os\n'), ((5326, 5369), 'os.path.join', 'os.path.join', (['path', '"""VOC2007"""', '"""JPEGImages"""'], {}), "(path, 'VOC2007', 'JPEGImages')\n", (5338, 5369), False, 'import os\n'), ((5395, 5439), 'os.path.join', 'os.path.join', (['path', '"""VOC2007"""', '"""Annotations"""'], {}), "(path, 'VOC2007', 'Annotations')\n", (5407, 5439), False, 'import os\n'), ((5460, 5504), 'os.path.join', 'os.path.join', (['path', '"""VOC2007"""', '"""Annotations"""'], {}), "(path, 'VOC2007', 'Annotations')\n", (5472, 5504), False, 'import os\n'), ((6796, 6834), 'lxml.etree.SubElement', 'etree.SubElement', (['annotation', '"""object"""'], {}), "(annotation, 'object')\n", (6812, 6834), False, 'from lxml import etree\n'), ((7073, 7111), 'lxml.etree.SubElement', 'etree.SubElement', (['key_object', '"""bndbox"""'], {}), "(key_object, 'bndbox')\n", (7089, 7111), False, 'from lxml import etree\n'), ((6847, 6883), 'lxml.etree.SubElement', 'etree.SubElement', (['key_object', '"""name"""'], {}), "(key_object, 'name')\n", (6863, 6883), False, 'from lxml import etree\n'), ((6920, 6961), 'lxml.etree.SubElement', 'etree.SubElement', (['key_object', '"""difficult"""'], {}), "(key_object, 'difficult')\n", (6936, 6961), False, 'from lxml import etree\n'), ((6985, 7026), 'lxml.etree.SubElement', 'etree.SubElement', (['key_object', '"""occlusion"""'], {}), "(key_object, 'occlusion')\n", (7001, 7026), False, 'from lxml import etree\n'), ((7124, 7156), 'lxml.etree.SubElement', 'etree.SubElement', (['bndbox', '"""xmin"""'], {}), "(bndbox, 'xmin')\n", (7140, 7156), False, 'from lxml import etree\n'), ((7186, 7218), 'lxml.etree.SubElement', 'etree.SubElement', (['bndbox', '"""ymin"""'], {}), "(bndbox, 'ymin')\n", (7202, 7218), False, 'from lxml import etree\n'), ((7248, 7280), 'lxml.etree.SubElement', 'etree.SubElement', (['bndbox', '"""xmax"""'], {}), "(bndbox, 'xmax')\n", (7264, 7280), False, 'from lxml import etree\n'), ((7310, 7342), 'lxml.etree.SubElement', 'etree.SubElement', (['bndbox', '"""ymax"""'], {}), "(bndbox, 'ymax')\n", (7326, 7342), False, 'from lxml import etree\n'), ((7419, 7457), 'lxml.etree.SubElement', 'etree.SubElement', (['annotation', '"""object"""'], {}), "(annotation, 'object')\n", (7435, 7457), False, 'from lxml import etree\n'), ((7696, 7734), 'lxml.etree.SubElement', 'etree.SubElement', (['key_object', '"""bndbox"""'], {}), "(key_object, 'bndbox')\n", (7712, 7734), False, 'from lxml import etree\n'), ((7470, 7506), 'lxml.etree.SubElement', 'etree.SubElement', (['key_object', '"""name"""'], {}), "(key_object, 'name')\n", (7486, 7506), False, 'from lxml import etree\n'), ((7543, 7584), 'lxml.etree.SubElement', 'etree.SubElement', (['key_object', '"""difficult"""'], {}), "(key_object, 'difficult')\n", (7559, 7584), False, 'from lxml import etree\n'), ((7608, 7649), 'lxml.etree.SubElement', 'etree.SubElement', (['key_object', '"""occlusion"""'], {}), "(key_object, 'occlusion')\n", (7624, 7649), False, 'from lxml import etree\n'), ((7747, 7779), 'lxml.etree.SubElement', 'etree.SubElement', (['bndbox', '"""xmin"""'], {}), "(bndbox, 'xmin')\n", (7763, 7779), False, 'from lxml import etree\n'), ((7809, 7841), 'lxml.etree.SubElement', 'etree.SubElement', (['bndbox', '"""ymin"""'], {}), "(bndbox, 'ymin')\n", (7825, 7841), False, 'from lxml import etree\n'), ((7871, 7903), 'lxml.etree.SubElement', 'etree.SubElement', (['bndbox', '"""xmax"""'], {}), "(bndbox, 'xmax')\n", (7887, 7903), False, 'from lxml import etree\n'), ((7933, 7965), 'lxml.etree.SubElement', 'etree.SubElement', (['bndbox', '"""ymax"""'], {}), "(bndbox, 'ymax')\n", (7949, 7965), False, 'from lxml import etree\n')]
|
from __future__ import division
from __future__ import print_function
import os
import glob
import time
import random
import argparse
import numpy as np
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from utils import load_data, accuracy
from models import GAT, SpGAT
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False, help='Validate during training pass.')
parser.add_argument('--sparse', action='store_true', default=False, help='GAT with sparse version or not.')
parser.add_argument('--epochs', type=int, default=10000, help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.005, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=8, help='Number of hidden units.')
parser.add_argument('--nb_heads', type=int, default=8, help='Number of head attentions.')
parser.add_argument('--dropout', type=float, default=0.6, help='Dropout rate (1 - keep probability).')
parser.add_argument('--alpha', type=float, default=0.2, help='Alpha for the leaky_relu.')
parser.add_argument('--patience', type=int, default=100, help='Patience')
parser.add_argument('--seed', type=int, default=72, help='Random seed.')
parser.add_argument('--time_file', type=str, default='', help='timing output file')
parser.add_argument('--pkl_file', type=str, default='trained-model.pkl', help='trained model input file (pkl)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
# Model and optimizer
if args.sparse:
model = SpGAT(nfeat=features.shape[1],
nhid=args.hidden,
nclass=int(labels.max()) + 1,
dropout=args.dropout,
nheads=args.nb_heads,
alpha=args.alpha)
else:
model = GAT(nfeat=features.shape[1],
nhid=args.hidden,
nclass=int(labels.max()) + 1,
dropout=args.dropout,
nheads=args.nb_heads,
alpha=args.alpha)
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
features, adj, labels = Variable(features), Variable(adj), Variable(labels)
def compute_test():
with profiler.profile(profile_memory=True, record_shapes=True, use_cuda=True) as prof:
with profiler.record_function("model_inference"):
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.data.item()),
"accuracy= {:.4f}".format(acc_test.data.item()))
#print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
print(prof.key_averages().table(sort_by="cpu_memory_usage", row_limit=10))
def time_model(file):
model.eval()
n_warmup = 50
n_sample = 50
print("=== Running Warmup Passes")
for i in range(0,n_warmup):
output = model(features, adj)
print("=== Collecting Runtime over ", str(n_sample), " Passes")
tic = time.perf_counter()
for i in range(0,n_sample):
output = model(features, adj)
toc = time.perf_counter()
avg_runtime = float(toc - tic)/n_sample
print("average runtime = ", avg_runtime)
# write runtime to file
f = open(file, "w")
f.write(str(avg_runtime)+"\n")
f.close()
if __name__ == "__main__":
map_location=torch.device('cpu')
model.load_state_dict(torch.load(args.pkl_file))
if len(args.time_file) != 0: # time and send time to file
time_model(args.time_file)
compute_test()
|
[
"torch.manual_seed",
"utils.load_data",
"argparse.ArgumentParser",
"torch.nn.functional.nll_loss",
"utils.accuracy",
"torch.load",
"time.perf_counter",
"random.seed",
"torch.autograd.profiler.profile",
"torch.cuda.is_available",
"torch.autograd.profiler.record_function",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.autograd.Variable",
"torch.device"
] |
[((464, 489), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (487, 489), False, 'import argparse\n'), ((1894, 1916), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1905, 1916), False, 'import random\n'), ((1917, 1942), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1931, 1942), True, 'import numpy as np\n'), ((1943, 1971), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1960, 1971), False, 'import torch\n'), ((2091, 2102), 'utils.load_data', 'load_data', ([], {}), '()\n', (2100, 2102), False, 'from utils import load_data, accuracy\n'), ((1868, 1893), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1891, 1893), False, 'import torch\n'), ((1990, 2023), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2012, 2023), False, 'import torch\n'), ((2986, 3004), 'torch.autograd.Variable', 'Variable', (['features'], {}), '(features)\n', (2994, 3004), False, 'from torch.autograd import Variable\n'), ((3006, 3019), 'torch.autograd.Variable', 'Variable', (['adj'], {}), '(adj)\n', (3014, 3019), False, 'from torch.autograd import Variable\n'), ((3021, 3037), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (3029, 3037), False, 'from torch.autograd import Variable\n'), ((4005, 4024), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4022, 4024), False, 'import time\n'), ((4105, 4124), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4122, 4124), False, 'import time\n'), ((4361, 4380), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4373, 4380), False, 'import torch\n'), ((3068, 3140), 'torch.autograd.profiler.profile', 'profiler.profile', ([], {'profile_memory': '(True)', 'record_shapes': '(True)', 'use_cuda': '(True)'}), '(profile_memory=True, record_shapes=True, use_cuda=True)\n', (3084, 3140), True, 'import torch.autograd.profiler as profiler\n'), ((4407, 4432), 'torch.load', 'torch.load', (['args.pkl_file'], {}), '(args.pkl_file)\n', (4417, 4432), False, 'import torch\n'), ((3163, 3206), 'torch.autograd.profiler.record_function', 'profiler.record_function', (['"""model_inference"""'], {}), "('model_inference')\n", (3187, 3206), True, 'import torch.autograd.profiler as profiler\n'), ((3299, 3345), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output[idx_test]', 'labels[idx_test]'], {}), '(output[idx_test], labels[idx_test])\n', (3309, 3345), True, 'import torch.nn.functional as F\n'), ((3369, 3413), 'utils.accuracy', 'accuracy', (['output[idx_test]', 'labels[idx_test]'], {}), '(output[idx_test], labels[idx_test])\n', (3377, 3413), False, 'from utils import load_data, accuracy\n')]
|
import numpy as np
import logging
import random
def open_stl(filename):
count = 0
with open(filename) as f:
for line in f:
count += 1
logging.info("number of lines {}".format(count))
tri_count = (count - 2) / 7
logging.info("number of triangles {}".format(tri_count))
vert_count = tri_count * 3
logging.info("number of vertices {}".format(vert_count))
x = np.zeros((tri_count, 3))
y = np.zeros((tri_count, 3))
z = np.zeros((tri_count, 3))
i = 0
j = 0
with open(filename) as f:
for line in f:
if "vertex" in line:
tokens = line.split()
x[i][j] = float(tokens[1])
y[i][j] = float(tokens[2])
z[i][j] = float(tokens[3])
j += 1
if j == 3:
j = 0
i += 1
ratio = tri_count / 100000
if ratio >= 2:
x = x[::ratio,:]
y = y[::ratio,:]
z = z[::ratio,:]
tri_count = tri_count / ratio
triangles = [None] * tri_count
for i in xrange(tri_count):
v = i * 3
triangles[i] = (v, v+1, v+2)
return x.flatten(), y.flatten(), z.flatten() , triangles
if __name__ == "__main__":
# x, y, z, triangles = open_stl("STL_INSTANCES\\50_cat_3446170_3_d.stl")
x, y, z, triangles = open_stl("20_allison_x4560_1_e.stl")
from mayavi import mlab
s = mlab.triangular_mesh(x, y, z, triangles, color=(random.random() / 2 + 0.5,
random.random() / 2 + 0.5,
random.random() / 2 + 0.5
))
mlab.show()
|
[
"random.random",
"numpy.zeros",
"mayavi.mlab.show"
] |
[((431, 455), 'numpy.zeros', 'np.zeros', (['(tri_count, 3)'], {}), '((tri_count, 3))\n', (439, 455), True, 'import numpy as np\n'), ((465, 489), 'numpy.zeros', 'np.zeros', (['(tri_count, 3)'], {}), '((tri_count, 3))\n', (473, 489), True, 'import numpy as np\n'), ((499, 523), 'numpy.zeros', 'np.zeros', (['(tri_count, 3)'], {}), '((tri_count, 3))\n', (507, 523), True, 'import numpy as np\n'), ((1848, 1859), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (1857, 1859), False, 'from mayavi import mlab\n'), ((1589, 1604), 'random.random', 'random.random', ([], {}), '()\n', (1602, 1604), False, 'import random\n'), ((1673, 1688), 'random.random', 'random.random', ([], {}), '()\n', (1686, 1688), False, 'import random\n'), ((1757, 1772), 'random.random', 'random.random', ([], {}), '()\n', (1770, 1772), False, 'import random\n')]
|
import sys
sys.path.append('../lib')
import exchange
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import norm
def transition_probabilities(chain, offset=1):
states = np.array([s for s in set(chain)])
state_space = {s: i for i, s in enumerate(states)}
transition_matrix = np.zeros((states.shape[0], states.shape[0]))
for i in states:
total_in_state = np.sum(chain == i) - np.sum(chain[-offset:] == i)
relevant_states = np.concatenate(([False] * offset, (chain == i)[:-offset]))
for j in states:
transition_matrix[state_space[i]][state_space[j]] = np.sum(chain[relevant_states] == j) / total_in_state
return transition_matrix, state_space
def main():
e = exchange.Exchange('../lib/binance.db')
times = [datetime.datetime(2018, 4, 1) + datetime.timedelta(days=i) for i in range(500)]
p_00 = []
p_11 = []
prices = []
for start, end in zip(times[:-1], times[1:]):
print(start)
it = e.get_orders('BTCUSDT', start.timestamp() * 1000, end.timestamp() * 1000)
trade_data = {'price': [], 'time': [], 'side': []}
for order in it:
trade_data['price'].append(order.end_price)
trade_data['time'].append(order.time)
trade_data['side'].append(order.buyer) #True if market order is a buy
trade_data = pd.DataFrame(trade_data)
trade_data['price_change'] = np.log(np.concatenate(([1], trade_data['price'].values[1:] / trade_data['price'].values[:-1])))
movement = np.zeros(trade_data.shape[0])
movement[trade_data['price_change'] > 0] = 1
movement[trade_data['price_change'] < 0] = -1
chain = movement[movement != 0]
P, states = transition_probabilities(chain)
try:
p_11.append(P[states[1]][states[1]])
p_00.append(P[states[-1]][states[-1]])
except:
pass
prices.append(np.mean(trade_data['price']))
#fig, ax1 = plt.subplots()
#ax2 = ax1.twinx()
#ax1.plot(prices, color='blue')
#ax2.plot(p_11, color='green', label='p_11')
#ax2.plot(p_00, color='red', label='p_00')
#ax2.legend()
#ax1.set_xlabel('Day')
#ax1.set_ylabel('BTC Price')
#ax2.set_ylabel('Probability')
plt.figure()
plt.hist(np.diff(p_00), 50, density=True)
loc, scale = norm.fit(np.diff(p_00))
x = np.linspace(np.min(np.diff(p_00)), np.max(np.diff(p_00)), 100)
plt.plot(x, norm.pdf(x, loc=loc, scale=scale))
plt.figure()
plt.hist(np.diff(p_11), 50, density=True)
x = np.linspace(np.min(np.diff(p_11)), np.max(np.diff(p_11)), 100)
loc, scale = norm.fit(np.diff(p_11))
plt.plot(x, norm.pdf(x, loc=loc, scale=scale))
plt.show()
if __name__ == '__main__':
main()
|
[
"datetime.datetime",
"numpy.mean",
"numpy.diff",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"exchange.Exchange",
"scipy.stats.norm.pdf",
"numpy.concatenate",
"pandas.DataFrame",
"datetime.timedelta",
"sys.path.append",
"matplotlib.pyplot.show"
] |
[((11, 36), 'sys.path.append', 'sys.path.append', (['"""../lib"""'], {}), "('../lib')\n", (26, 36), False, 'import sys\n'), ((351, 395), 'numpy.zeros', 'np.zeros', (['(states.shape[0], states.shape[0])'], {}), '((states.shape[0], states.shape[0]))\n', (359, 395), True, 'import numpy as np\n'), ((789, 827), 'exchange.Exchange', 'exchange.Exchange', (['"""../lib/binance.db"""'], {}), "('../lib/binance.db')\n", (806, 827), False, 'import exchange\n'), ((2483, 2495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2493, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2730, 2742), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2740, 2742), True, 'import matplotlib.pyplot as plt\n'), ((2977, 2987), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2985, 2987), True, 'import matplotlib.pyplot as plt\n'), ((518, 576), 'numpy.concatenate', 'np.concatenate', (['([False] * offset, (chain == i)[:-offset])'], {}), '(([False] * offset, (chain == i)[:-offset]))\n', (532, 576), True, 'import numpy as np\n'), ((1476, 1500), 'pandas.DataFrame', 'pd.DataFrame', (['trade_data'], {}), '(trade_data)\n', (1488, 1500), True, 'import pandas as pd\n'), ((1663, 1692), 'numpy.zeros', 'np.zeros', (['trade_data.shape[0]'], {}), '(trade_data.shape[0])\n', (1671, 1692), True, 'import numpy as np\n'), ((2513, 2526), 'numpy.diff', 'np.diff', (['p_00'], {}), '(p_00)\n', (2520, 2526), True, 'import numpy as np\n'), ((2576, 2589), 'numpy.diff', 'np.diff', (['p_00'], {}), '(p_00)\n', (2583, 2589), True, 'import numpy as np\n'), ((2686, 2719), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {'loc': 'loc', 'scale': 'scale'}), '(x, loc=loc, scale=scale)\n', (2694, 2719), False, 'from scipy.stats import norm\n'), ((2760, 2773), 'numpy.diff', 'np.diff', (['p_11'], {}), '(p_11)\n', (2767, 2773), True, 'import numpy as np\n'), ((2898, 2911), 'numpy.diff', 'np.diff', (['p_11'], {}), '(p_11)\n', (2905, 2911), True, 'import numpy as np\n'), ((2933, 2966), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {'loc': 'loc', 'scale': 'scale'}), '(x, loc=loc, scale=scale)\n', (2941, 2966), False, 'from scipy.stats import norm\n'), ((442, 460), 'numpy.sum', 'np.sum', (['(chain == i)'], {}), '(chain == i)\n', (448, 460), True, 'import numpy as np\n'), ((463, 491), 'numpy.sum', 'np.sum', (['(chain[-offset:] == i)'], {}), '(chain[-offset:] == i)\n', (469, 491), True, 'import numpy as np\n'), ((846, 875), 'datetime.datetime', 'datetime.datetime', (['(2018)', '(4)', '(1)'], {}), '(2018, 4, 1)\n', (863, 875), False, 'import datetime\n'), ((878, 904), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (896, 904), False, 'import datetime\n'), ((1550, 1642), 'numpy.concatenate', 'np.concatenate', (["([1], trade_data['price'].values[1:] / trade_data['price'].values[:-1])"], {}), "(([1], trade_data['price'].values[1:] / trade_data['price'].\n values[:-1]))\n", (1564, 1642), True, 'import numpy as np\n'), ((2105, 2133), 'numpy.mean', 'np.mean', (["trade_data['price']"], {}), "(trade_data['price'])\n", (2112, 2133), True, 'import numpy as np\n'), ((2622, 2635), 'numpy.diff', 'np.diff', (['p_00'], {}), '(p_00)\n', (2629, 2635), True, 'import numpy as np\n'), ((2645, 2658), 'numpy.diff', 'np.diff', (['p_00'], {}), '(p_00)\n', (2652, 2658), True, 'import numpy as np\n'), ((2824, 2837), 'numpy.diff', 'np.diff', (['p_11'], {}), '(p_11)\n', (2831, 2837), True, 'import numpy as np\n'), ((2847, 2860), 'numpy.diff', 'np.diff', (['p_11'], {}), '(p_11)\n', (2854, 2860), True, 'import numpy as np\n'), ((666, 701), 'numpy.sum', 'np.sum', (['(chain[relevant_states] == j)'], {}), '(chain[relevant_states] == j)\n', (672, 701), True, 'import numpy as np\n')]
|
import numpy as np
import math
from pyspark.sql import Row
"""
Implementation of Lorentz vector
"""
class LorentzVector(object):
def __init__(self, *args):
if len(args)>0:
self.x = args[0]
self.y = args[1]
self.z = args[2]
self.t = args[3]
def SetPtEtaPhiM(self, pt, eta, phi, mass):
pt = abs(pt)
self.SetXYZM(pt*math.cos(phi), pt*math.sin(phi), pt*math.sinh(eta), mass)
def SetXYZM(self, x, y, z, m):
self.x = x;
self.y = y
self.z = z
if (m>=0):
self.t = math.sqrt(x*x + y*y + z*z + m*m)
else:
self.t = math.sqrt(max(x*x + y*y + z*z - m*m, 0))
def E(self):
return self.t
def Px(self):
return self.x
def Py(self):
return self.y
def Pz(self):
return self.z
def Pt(self):
return math.sqrt(self.x*self.x + self.y*self.y)
def Eta(self):
cosTheta = self.CosTheta()
if cosTheta*cosTheta<1:
return -0.5*math.log((1.0 - cosTheta)/(1.0 + cosTheta))
if self.z == 0: return 0
def mag(self):
return math.sqrt(self.x*self.x + self.y*self.y + self.z*self.z)
def CosTheta(self):
return 1.0 if self.mag()==0.0 else self.z/self.mag()
def Phi(self):
return math.atan2(self.y, self.x)
def DeltaR(self, other):
deta = self.Eta() - other.Eta()
dphi = self.Phi() - other.Phi()
pi = math.pi
while dphi > pi: dphi -= 2*pi
while dphi < -pi: dphi += 2*pi
return math.sqrt(deta*deta + dphi*dphi)
"""
Functions used to return the Pt map of selected tracks, neutrals and photons
"""
def ChPtMapp(DR, event):
pTmap = []
for h in event.EFlowTrack:
if h.PT<= 0.5: continue
pTmap.append([h.Eta, h.Phi, h.PT])
return np.asarray(pTmap)
def NeuPtMapp(DR, event):
pTmap = []
for h in event.EFlowNeutralHadron:
if h.ET<= 1.0: continue
pTmap.append([h.Eta, h.Phi, h.ET])
return np.asarray(pTmap)
def PhotonPtMapp(DR, event):
pTmap = []
for h in event.EFlowPhoton:
if h.ET<= 1.0: continue
pTmap.append([h.Eta, h.Phi, h.ET])
return np.asarray(pTmap)
"""
Functions used to return the Pt map of selected tracks, neutrals and photons
Versions used for the optimized filtering with Spark SQL and HOF
"""
# get the selected tracks
def ChPtMapp2(Tracks):
#pTmap = []
pTmap = np.zeros((len(Tracks), 3))
for i, h in enumerate(Tracks):
pTmap[i] = [h["Eta"], h["Phi"], h["PT"]]
return pTmap
# get the selected neutrals
def NeuPtMapp2(NeutralHadrons):
pTmap = np.zeros((len(NeutralHadrons), 3))
for i, h in enumerate(NeutralHadrons):
pTmap[i] = [h["Eta"], h["Phi"], h["ET"]]
return pTmap
# get the selected photons
def PhotonPtMapp2(Photons):
pTmap = np.zeros((len(Photons), 3))
for i, h in enumerate(Photons):
pTmap[i] = [h["Eta"], h["Phi"], h["ET"]]
return pTmap
"""
Get the particle ISO
"""
def PFIso(p, DR, PtMap, subtractPt):
if p.Pt() <= 0.: return 0.
DeltaEta = PtMap[:,0] - p.Eta()
DeltaPhi = PtMap[:,1] - p.Phi()
twopi = 2.* math.pi
DeltaPhi = DeltaPhi - twopi*(DeltaPhi > twopi) + twopi*(DeltaPhi < -1.*twopi)
isInCone = DeltaPhi*DeltaPhi + DeltaEta*DeltaEta < DR*DR
Iso = PtMap[isInCone, 2].sum()/p.Pt()
if subtractPt: Iso = Iso -1
return float(Iso)
|
[
"math.sqrt",
"numpy.asarray",
"math.log",
"math.cos",
"math.atan2",
"math.sin",
"math.sinh"
] |
[((1956, 1973), 'numpy.asarray', 'np.asarray', (['pTmap'], {}), '(pTmap)\n', (1966, 1973), True, 'import numpy as np\n'), ((2141, 2158), 'numpy.asarray', 'np.asarray', (['pTmap'], {}), '(pTmap)\n', (2151, 2158), True, 'import numpy as np\n'), ((2322, 2339), 'numpy.asarray', 'np.asarray', (['pTmap'], {}), '(pTmap)\n', (2332, 2339), True, 'import numpy as np\n'), ((935, 979), 'math.sqrt', 'math.sqrt', (['(self.x * self.x + self.y * self.y)'], {}), '(self.x * self.x + self.y * self.y)\n', (944, 979), False, 'import math\n'), ((1207, 1269), 'math.sqrt', 'math.sqrt', (['(self.x * self.x + self.y * self.y + self.z * self.z)'], {}), '(self.x * self.x + self.y * self.y + self.z * self.z)\n', (1216, 1269), False, 'import math\n'), ((1393, 1419), 'math.atan2', 'math.atan2', (['self.y', 'self.x'], {}), '(self.y, self.x)\n', (1403, 1419), False, 'import math\n'), ((1666, 1702), 'math.sqrt', 'math.sqrt', (['(deta * deta + dphi * dphi)'], {}), '(deta * deta + dphi * dphi)\n', (1675, 1702), False, 'import math\n'), ((600, 640), 'math.sqrt', 'math.sqrt', (['(x * x + y * y + z * z + m * m)'], {}), '(x * x + y * y + z * z + m * m)\n', (609, 640), False, 'import math\n'), ((400, 413), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (408, 413), False, 'import math\n'), ((418, 431), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (426, 431), False, 'import math\n'), ((436, 450), 'math.sinh', 'math.sinh', (['eta'], {}), '(eta)\n', (445, 450), False, 'import math\n'), ((1091, 1136), 'math.log', 'math.log', (['((1.0 - cosTheta) / (1.0 + cosTheta))'], {}), '((1.0 - cosTheta) / (1.0 + cosTheta))\n', (1099, 1136), False, 'import math\n')]
|
import pytest, warnings, numpy as np
from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology
from ....support import assert_equal, assert_all_equal, assert_distribution
# Set seed for reproducible randomness
seed = 0
np.random.seed(seed)
rng = np.random.RandomState(seed)
# ========= #
# _Topology #
# ========= #
# --------------------------- #
# _Topology.uniform_initial() #
# --------------------------- #
def test_uniform_initial_min():
"""Generate a uniform initial state distribution with the minimum number of states"""
topology = _Topology(n_states=1, random_state=rng)
initial = topology.uniform_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
1.
]))
def test_uniform_initial_small():
"""Generate a uniform initial state distribution with a few states"""
topology = _Topology(n_states=2, random_state=rng)
initial = topology.uniform_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
0.5, 0.5
]))
def test_uniform_initial_many():
"""Generate a uniform initial state distribution with many states"""
topology = _Topology(n_states=5, random_state=rng)
initial = topology.uniform_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
0.2, 0.2, 0.2, 0.2, 0.2
]))
# -------------------------- #
# _Topology.random_initial() #
# -------------------------- #
def test_random_initial_min():
"""Generate a random initial state distribution with minimal states"""
topology = _Topology(n_states=1, random_state=rng)
initial = topology.random_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
1.
]))
def test_random_initial_small():
"""Generate a random initial state distribution with few states"""
topology = _Topology(n_states=2, random_state=rng)
initial = topology.random_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
0.57633871, 0.42366129
]))
def test_random_initial_many():
"""Generate a random initial state distribution with many states"""
topology = _Topology(n_states=5, random_state=rng)
initial = topology.random_initial()
assert_distribution(initial)
assert_equal(initial, np.array([
0.15210286, 0.10647349, 0.20059295, 0.11120171, 0.42962898
]))
# ================== #
# _LeftRightTopology #
# ================== #
# ---------------------------------------- #
# _LeftRightTopology.uniform_transitions() #
# ---------------------------------------- #
def test_left_right_uniform_transitions_min():
"""Generate a uniform left-right transition matrix with minimal states"""
topology = _LeftRightTopology(n_states=1, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_left_right_uniform_transitions_small():
"""Generate a uniform left-right transition matrix with few states"""
topology = _LeftRightTopology(n_states=2, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.5, 0.5],
[0. , 1. ]
]))
def test_left_right_uniform_transitions_many():
"""Generate a uniform left-right transition matrix with many states"""
topology = _LeftRightTopology(n_states=5, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.2, 0.2 , 0.2 , 0.2 , 0.2 ],
[0. , 0.25, 0.25 , 0.25 , 0.25 ],
[0. , 0. , 0.33333333, 0.33333333, 0.33333333],
[0. , 0. , 0. , 0.5 , 0.5 ] ,
[0. , 0. , 0. , 0. , 1. ]
]))
# --------------------------------------- #
# _LeftRightTopology.random_transitions() #
# --------------------------------------- #
def test_left_right_random_transitions_min():
"""Generate a random left-right transition matrix with minimal states"""
topology = _LeftRightTopology(n_states=1, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_left_right_random_transitions_small():
"""Generate a random left-right transition matrix with few states"""
topology = _LeftRightTopology(n_states=2, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.23561633, 0.76438367],
[0. , 1. ]
]))
def test_left_right_random_transitions_many():
"""Generate a random left-right transition matrix with many states"""
topology = _LeftRightTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.23169814, 0.71716356, 0.02033845, 0.02516204, 0.00563782],
[0. , 0.19474072, 0.16405008, 0.22228532, 0.41892388],
[0. , 0. , 0.42912755, 0.16545797, 0.40541448],
[0. , 0. , 0. , 0.109713 , 0.890287 ],
[0. , 0. , 0. , 0. , 1. ]
]))
# ----------------------------------------- #
# _LeftRightTopology.validate_transitions() #
# ----------------------------------------- #
def test_left_right_validate_transitions_invalid():
"""Validate an invalid left-right transition matrix"""
topology = _LeftRightTopology(n_states=5, random_state=rng)
transitions = _ErgodicTopology(n_states=5, random_state=rng).random_transitions()
with pytest.raises(ValueError) as e:
topology.validate_transitions(transitions)
assert str(e.value) == 'Left-right transition matrix must be upper-triangular'
def test_left_right_validate_transitions_valid():
"""Validate a valid left-right transition matrix"""
topology = _LeftRightTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
topology.validate_transitions(transitions)
# -------------------------------------- #
# _ErgodicTopology.uniform_transitions() #
# -------------------------------------- #
def test_ergodic_uniform_transitions_min():
"""Generate a uniform ergodic transition matrix with minimal states"""
topology = _ErgodicTopology(n_states=1, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_ergodic_uniform_transitions_small():
"""Generate a uniform ergodic transition matrix with few states"""
topology = _ErgodicTopology(n_states=2, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.5, 0.5],
[0.5, 0.5]
]))
def test_ergodic_uniform_transitions_many():
"""Generate a uniform ergodic transition matrix with many states"""
topology = _ErgodicTopology(n_states=5, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.2, 0.2, 0.2, 0.2, 0.2]
]))
# ------------------------------------- #
# _ErgodicTopology.random_transitions() #
# ------------------------------------- #
def test_ergodic_random_transitions_min():
"""Generate a random ergodic transition matrix with minimal states"""
topology = _ErgodicTopology(n_states=1, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_ergodic_random_transitions_small():
"""Generate a random ergodic transition matrix with few states"""
topology = _ErgodicTopology(n_states=2, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.9474011 , 0.0525989 ],
[0.85567599, 0.14432401]
]))
def test_ergodic_random_transitions_many():
"""Generate a random ergodic transition matrix with many states"""
topology = _ErgodicTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.58715548, 0.14491542, 0.20980762, 0.00623944, 0.05188205],
[0.0840705 , 0.23055049, 0.08297536, 0.25124688, 0.35115677],
[0.02117615, 0.37664662, 0.26705912, 0.09851123, 0.23660688],
[0.01938041, 0.16853843, 0.52046123, 0.07535256, 0.21626737],
[0.04996846, 0.44545843, 0.12079423, 0.07154241, 0.31223646]
]))
# --------------------------------------- #
# _ErgodicTopology.validate_transitions() #
# --------------------------------------- #
def test_ergodic_validate_transitions_invalid():
"""Validate an invalid ergodic transition matrix"""
topology = _ErgodicTopology(n_states=5, random_state=rng)
transitions = _LeftRightTopology(n_states=5, random_state=rng).random_transitions()
with pytest.warns(UserWarning):
topology.validate_transitions(transitions)
def test_ergodic_validate_transitions_valid():
"""Validate a valid ergodic transition matrix"""
topology = _ErgodicTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
topology.validate_transitions(transitions)
# =============== #
# _LinearTopology #
# =============== #
# ------------------------------------- #
# _LinearTopology.uniform_transitions() #
# ------------------------------------- #
def test_linear_uniform_transitions_min():
"""Generate a uniform linear transition matrix with minimal states"""
topology = _LinearTopology(n_states=1, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_linear_uniform_transitions_small():
"""Generate a uniform linear transition matrix with few states"""
topology = _LinearTopology(n_states=2, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.5, 0.5],
[0. , 1. ]
]))
def test_linear_uniform_transitions_many():
"""Generate a uniform linear transition matrix with many states"""
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = topology.uniform_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.5, 0.5, 0. , 0. , 0. ],
[0. , 0.5, 0.5, 0. , 0. ],
[0. , 0. , 0.5, 0.5, 0. ],
[0. , 0. , 0. , 0.5, 0.5],
[0. , 0. , 0. , 0. , 1. ]
]))
# ------------------------------------ #
# _LinearTopology.random_transitions() #
# ------------------------------------ #
def test_linear_random_transitions_min():
"""Generate a random linear transition matrix with minimal states"""
topology = _LinearTopology(n_states=1, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[1.]
]))
def test_linear_random_transitions_small():
"""Generate a random linear transition matrix with few states"""
topology = _LinearTopology(n_states=2, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.65157396, 0.34842604],
[0. , 1. ]
]))
def test_linear_random_transitions_many():
"""Generate a random linear transition matrix with many states"""
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
assert_distribution(transitions)
assert_equal(transitions, np.array([
[0.44455421, 0.55544579, 0. , 0. , 0. ],
[0. , 0.57553614, 0.42446386, 0. , 0. ],
[0. , 0. , 0.92014965, 0.07985035, 0. ],
[0. , 0. , 0. , 0.66790982, 0.33209018],
[0. , 0. , 0. , 0. , 1. ]
]))
# -------------------------------------- #
# _LinearTopology.validate_transitions() #
# -------------------------------------- #
def test_linear_validate_transitions_invalid():
"""Validate an invalid linear transition matrix"""
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = _ErgodicTopology(n_states=5, random_state=rng).random_transitions()
with pytest.raises(ValueError) as e:
topology.validate_transitions(transitions)
assert str(e.value) == 'Left-right transition matrix must be upper-triangular'
def test_linear_validate_transitions_valid():
"""Validate a valid linear transition matrix"""
topology = _LinearTopology(n_states=5, random_state=rng)
transitions = topology.random_transitions()
topology.validate_transitions(transitions)
|
[
"sequentia.classifiers._Topology",
"sequentia.classifiers._LinearTopology",
"pytest.warns",
"sequentia.classifiers._LeftRightTopology",
"numpy.array",
"pytest.raises",
"numpy.random.seed",
"numpy.random.RandomState",
"sequentia.classifiers._ErgodicTopology"
] |
[((261, 281), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (275, 281), True, 'import pytest, warnings, numpy as np\n'), ((288, 315), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (309, 315), True, 'import pytest, warnings, numpy as np\n'), ((594, 633), 'sequentia.classifiers._Topology', '_Topology', ([], {'n_states': '(1)', 'random_state': 'rng'}), '(n_states=1, random_state=rng)\n', (603, 633), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((888, 927), 'sequentia.classifiers._Topology', '_Topology', ([], {'n_states': '(2)', 'random_state': 'rng'}), '(n_states=2, random_state=rng)\n', (897, 927), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((1186, 1225), 'sequentia.classifiers._Topology', '_Topology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (1195, 1225), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((1593, 1632), 'sequentia.classifiers._Topology', '_Topology', ([], {'n_states': '(1)', 'random_state': 'rng'}), '(n_states=1, random_state=rng)\n', (1602, 1632), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((1882, 1921), 'sequentia.classifiers._Topology', '_Topology', ([], {'n_states': '(2)', 'random_state': 'rng'}), '(n_states=2, random_state=rng)\n', (1891, 1921), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((2191, 2230), 'sequentia.classifiers._Topology', '_Topology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (2200, 2230), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((2763, 2811), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(1)', 'random_state': 'rng'}), '(n_states=1, random_state=rng)\n', (2781, 2811), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((3099, 3147), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(2)', 'random_state': 'rng'}), '(n_states=2, random_state=rng)\n', (3117, 3147), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((3461, 3509), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (3479, 3509), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((4202, 4250), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(1)', 'random_state': 'rng'}), '(n_states=1, random_state=rng)\n', (4220, 4250), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((4535, 4583), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(2)', 'random_state': 'rng'}), '(n_states=2, random_state=rng)\n', (4553, 4583), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((4922, 4970), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (4940, 4970), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((5720, 5768), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (5738, 5768), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((6152, 6200), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (6170, 6200), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((6561, 6607), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(1)', 'random_state': 'rng'}), '(n_states=1, random_state=rng)\n', (6577, 6607), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((6889, 6935), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(2)', 'random_state': 'rng'}), '(n_states=2, random_state=rng)\n', (6905, 6935), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((7243, 7289), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (7259, 7289), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((7859, 7905), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(1)', 'random_state': 'rng'}), '(n_states=1, random_state=rng)\n', (7875, 7905), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((8184, 8230), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(2)', 'random_state': 'rng'}), '(n_states=2, random_state=rng)\n', (8200, 8230), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((8563, 8609), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (8579, 8609), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((9347, 9393), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (9363, 9393), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((9685, 9731), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (9701, 9731), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((10148, 10193), 'sequentia.classifiers._LinearTopology', '_LinearTopology', ([], {'n_states': '(1)', 'random_state': 'rng'}), '(n_states=1, random_state=rng)\n', (10163, 10193), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((10473, 10518), 'sequentia.classifiers._LinearTopology', '_LinearTopology', ([], {'n_states': '(2)', 'random_state': 'rng'}), '(n_states=2, random_state=rng)\n', (10488, 10518), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((10824, 10869), 'sequentia.classifiers._LinearTopology', '_LinearTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (10839, 10869), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((11434, 11479), 'sequentia.classifiers._LinearTopology', '_LinearTopology', ([], {'n_states': '(1)', 'random_state': 'rng'}), '(n_states=1, random_state=rng)\n', (11449, 11479), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((11756, 11801), 'sequentia.classifiers._LinearTopology', '_LinearTopology', ([], {'n_states': '(2)', 'random_state': 'rng'}), '(n_states=2, random_state=rng)\n', (11771, 11801), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((12132, 12177), 'sequentia.classifiers._LinearTopology', '_LinearTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (12147, 12177), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((12910, 12955), 'sequentia.classifiers._LinearTopology', '_LinearTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (12925, 12955), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((13331, 13376), 'sequentia.classifiers._LinearTopology', '_LinearTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (13346, 13376), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((734, 749), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (742, 749), True, 'import pytest, warnings, numpy as np\n'), ((1028, 1048), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (1036, 1048), True, 'import pytest, warnings, numpy as np\n'), ((1326, 1361), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2, 0.2, 0.2])\n', (1334, 1361), True, 'import pytest, warnings, numpy as np\n'), ((1732, 1747), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1740, 1747), True, 'import pytest, warnings, numpy as np\n'), ((2021, 2055), 'numpy.array', 'np.array', (['[0.57633871, 0.42366129]'], {}), '([0.57633871, 0.42366129])\n', (2029, 2055), True, 'import pytest, warnings, numpy as np\n'), ((2330, 2400), 'numpy.array', 'np.array', (['[0.15210286, 0.10647349, 0.20059295, 0.11120171, 0.42962898]'], {}), '([0.15210286, 0.10647349, 0.20059295, 0.11120171, 0.42962898])\n', (2338, 2400), True, 'import pytest, warnings, numpy as np\n'), ((2928, 2945), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (2936, 2945), True, 'import pytest, warnings, numpy as np\n'), ((3264, 3298), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.0, 1.0]]'], {}), '([[0.5, 0.5], [0.0, 1.0]])\n', (3272, 3298), True, 'import pytest, warnings, numpy as np\n'), ((3626, 3806), 'numpy.array', 'np.array', (['[[0.2, 0.2, 0.2, 0.2, 0.2], [0.0, 0.25, 0.25, 0.25, 0.25], [0.0, 0.0, \n 0.33333333, 0.33333333, 0.33333333], [0.0, 0.0, 0.0, 0.5, 0.5], [0.0, \n 0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.2, 0.2, 0.2, 0.2, 0.2], [0.0, 0.25, 0.25, 0.25, 0.25], [0.0, \n 0.0, 0.33333333, 0.33333333, 0.33333333], [0.0, 0.0, 0.0, 0.5, 0.5], [\n 0.0, 0.0, 0.0, 0.0, 1.0]])\n', (3634, 3806), True, 'import pytest, warnings, numpy as np\n'), ((4366, 4383), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (4374, 4383), True, 'import pytest, warnings, numpy as np\n'), ((4699, 4747), 'numpy.array', 'np.array', (['[[0.23561633, 0.76438367], [0.0, 1.0]]'], {}), '([[0.23561633, 0.76438367], [0.0, 1.0]])\n', (4707, 4747), True, 'import pytest, warnings, numpy as np\n'), ((5086, 5340), 'numpy.array', 'np.array', (['[[0.23169814, 0.71716356, 0.02033845, 0.02516204, 0.00563782], [0.0, \n 0.19474072, 0.16405008, 0.22228532, 0.41892388], [0.0, 0.0, 0.42912755,\n 0.16545797, 0.40541448], [0.0, 0.0, 0.0, 0.109713, 0.890287], [0.0, 0.0,\n 0.0, 0.0, 1.0]]'], {}), '([[0.23169814, 0.71716356, 0.02033845, 0.02516204, 0.00563782], [\n 0.0, 0.19474072, 0.16405008, 0.22228532, 0.41892388], [0.0, 0.0, \n 0.42912755, 0.16545797, 0.40541448], [0.0, 0.0, 0.0, 0.109713, 0.890287\n ], [0.0, 0.0, 0.0, 0.0, 1.0]])\n', (5094, 5340), True, 'import pytest, warnings, numpy as np\n'), ((5864, 5889), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5877, 5889), False, 'import pytest, warnings, numpy as np\n'), ((6724, 6741), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (6732, 6741), True, 'import pytest, warnings, numpy as np\n'), ((7052, 7086), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.5, 0.5]]'], {}), '([[0.5, 0.5], [0.5, 0.5]])\n', (7060, 7086), True, 'import pytest, warnings, numpy as np\n'), ((7406, 7556), 'numpy.array', 'np.array', (['[[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2,\n 0.2], [0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]]'], {}), '([[0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, \n 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2, 0.2]])\n', (7414, 7556), True, 'import pytest, warnings, numpy as np\n'), ((8021, 8038), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (8029, 8038), True, 'import pytest, warnings, numpy as np\n'), ((8346, 8406), 'numpy.array', 'np.array', (['[[0.9474011, 0.0525989], [0.85567599, 0.14432401]]'], {}), '([[0.9474011, 0.0525989], [0.85567599, 0.14432401]])\n', (8354, 8406), True, 'import pytest, warnings, numpy as np\n'), ((8725, 9063), 'numpy.array', 'np.array', (['[[0.58715548, 0.14491542, 0.20980762, 0.00623944, 0.05188205], [0.0840705, \n 0.23055049, 0.08297536, 0.25124688, 0.35115677], [0.02117615, \n 0.37664662, 0.26705912, 0.09851123, 0.23660688], [0.01938041, \n 0.16853843, 0.52046123, 0.07535256, 0.21626737], [0.04996846, \n 0.44545843, 0.12079423, 0.07154241, 0.31223646]]'], {}), '([[0.58715548, 0.14491542, 0.20980762, 0.00623944, 0.05188205], [\n 0.0840705, 0.23055049, 0.08297536, 0.25124688, 0.35115677], [0.02117615,\n 0.37664662, 0.26705912, 0.09851123, 0.23660688], [0.01938041, \n 0.16853843, 0.52046123, 0.07535256, 0.21626737], [0.04996846, \n 0.44545843, 0.12079423, 0.07154241, 0.31223646]])\n', (8733, 9063), True, 'import pytest, warnings, numpy as np\n'), ((9491, 9516), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (9503, 9516), False, 'import pytest, warnings, numpy as np\n'), ((10310, 10327), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (10318, 10327), True, 'import pytest, warnings, numpy as np\n'), ((10635, 10669), 'numpy.array', 'np.array', (['[[0.5, 0.5], [0.0, 1.0]]'], {}), '([[0.5, 0.5], [0.0, 1.0]])\n', (10643, 10669), True, 'import pytest, warnings, numpy as np\n'), ((10986, 11136), 'numpy.array', 'np.array', (['[[0.5, 0.5, 0.0, 0.0, 0.0], [0.0, 0.5, 0.5, 0.0, 0.0], [0.0, 0.0, 0.5, 0.5,\n 0.0], [0.0, 0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.5, 0.5, 0.0, 0.0, 0.0], [0.0, 0.5, 0.5, 0.0, 0.0], [0.0, 0.0, \n 0.5, 0.5, 0.0], [0.0, 0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.0, 0.0, 1.0]])\n', (10994, 11136), True, 'import pytest, warnings, numpy as np\n'), ((11595, 11612), 'numpy.array', 'np.array', (['[[1.0]]'], {}), '([[1.0]])\n', (11603, 11612), True, 'import pytest, warnings, numpy as np\n'), ((11917, 11965), 'numpy.array', 'np.array', (['[[0.65157396, 0.34842604], [0.0, 1.0]]'], {}), '([[0.65157396, 0.34842604], [0.0, 1.0]])\n', (11925, 11965), True, 'import pytest, warnings, numpy as np\n'), ((12293, 12504), 'numpy.array', 'np.array', (['[[0.44455421, 0.55544579, 0.0, 0.0, 0.0], [0.0, 0.57553614, 0.42446386, 0.0,\n 0.0], [0.0, 0.0, 0.92014965, 0.07985035, 0.0], [0.0, 0.0, 0.0, \n 0.66790982, 0.33209018], [0.0, 0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.44455421, 0.55544579, 0.0, 0.0, 0.0], [0.0, 0.57553614, \n 0.42446386, 0.0, 0.0], [0.0, 0.0, 0.92014965, 0.07985035, 0.0], [0.0, \n 0.0, 0.0, 0.66790982, 0.33209018], [0.0, 0.0, 0.0, 0.0, 1.0]])\n', (12301, 12504), True, 'import pytest, warnings, numpy as np\n'), ((13051, 13076), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13064, 13076), False, 'import pytest, warnings, numpy as np\n'), ((5787, 5833), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (5803, 5833), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((9412, 9460), 'sequentia.classifiers._LeftRightTopology', '_LeftRightTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (9430, 9460), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n'), ((12974, 13020), 'sequentia.classifiers._ErgodicTopology', '_ErgodicTopology', ([], {'n_states': '(5)', 'random_state': 'rng'}), '(n_states=5, random_state=rng)\n', (12990, 13020), False, 'from sequentia.classifiers import _Topology, _LeftRightTopology, _ErgodicTopology, _LinearTopology\n')]
|
# https://arxiv.org/pdf/1703.02910.pdf, Deep Bayesian Active Learning with Image Data
import numpy as np
from .baseline import Strategy
from ..helpers.time import timeit
class BayesianActiveLearning(Strategy):
def __init__(self, nb_forward=10, **kwargs):
super(BayesianActiveLearning, self).__init__()
self.nb_forward = nb_forward
@timeit
def evaluate_dataset(self, dataset, learner, log_time={}):
return np.stack([learner.inference(dataset, bayesian=True)['class_probabilities'] for _ in range(self.nb_forward)])
@timeit
def score_dataset(self, dataset, learner, log_time={}):
raise NotImplementedError
def return_top_indices(self, dataset, learner, top, log_time={}):
scores = self.score_dataset(dataset, learner, log_time=log_time)
sorted_idx = np.argsort(scores)
return sorted_idx[-top:]
class BayesianKLDivergence(BayesianActiveLearning):
@timeit
def score_dataset(self, dataset, learner, log_time={}):
stacked_probabilities = self.evaluate_dataset(
dataset, learner, log_time=log_time)
C, N, _ = stacked_probabilities.shape
consensus_probabilities = np.mean(stacked_probabilities, axis=0)
divergences = np.zeros((N, C))
for i in range(N):
for c in range(C):
probabilities_ic = stacked_probabilities[c, i]
probabilities_i = consensus_probabilities[i]
divergences[i, c] = np.sum(
probabilities_ic * np.log(probabilities_ic/probabilities_i))
return np.mean(divergences, axis=1)
class BayesianEntropyStrategy(BayesianActiveLearning):
@timeit
def score_dataset(self, dataset, learner, log_time={}):
stacked_probabilities = self.evaluate_dataset(
dataset, learner, log_time=log_time)
probabilities = np.mean(stacked_probabilities, axis=0)
assert len(probabilities) == len(dataset)
entropies = -np.sum(probabilities * np.log(probabilities), axis=1)
return entropies
class BayesianBALDStrategy(BayesianActiveLearning):
@timeit
def score_dataset(self, dataset, learner, log_time={}):
inference_result = learner.inference(dataset)
model_probabilities = inference_result['class_probabilities']
model_entropies = - \
np.sum(model_probabilities * np.log(model_probabilities), axis=1)
stacked_probabilities = self.evaluate_dataset(
dataset, learner, log_time=log_time)
average_entropies = - np.mean(
np.sum(stacked_probabilities * np.log(stacked_probabilities), axis=2), axis=0)
return model_entropies - average_entropies
|
[
"numpy.argsort",
"numpy.mean",
"numpy.zeros",
"numpy.log"
] |
[((828, 846), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (838, 846), True, 'import numpy as np\n'), ((1190, 1228), 'numpy.mean', 'np.mean', (['stacked_probabilities'], {'axis': '(0)'}), '(stacked_probabilities, axis=0)\n', (1197, 1228), True, 'import numpy as np\n'), ((1251, 1267), 'numpy.zeros', 'np.zeros', (['(N, C)'], {}), '((N, C))\n', (1259, 1267), True, 'import numpy as np\n'), ((1590, 1618), 'numpy.mean', 'np.mean', (['divergences'], {'axis': '(1)'}), '(divergences, axis=1)\n', (1597, 1618), True, 'import numpy as np\n'), ((1876, 1914), 'numpy.mean', 'np.mean', (['stacked_probabilities'], {'axis': '(0)'}), '(stacked_probabilities, axis=0)\n', (1883, 1914), True, 'import numpy as np\n'), ((2009, 2030), 'numpy.log', 'np.log', (['probabilities'], {}), '(probabilities)\n', (2015, 2030), True, 'import numpy as np\n'), ((2386, 2413), 'numpy.log', 'np.log', (['model_probabilities'], {}), '(model_probabilities)\n', (2392, 2413), True, 'import numpy as np\n'), ((1533, 1575), 'numpy.log', 'np.log', (['(probabilities_ic / probabilities_i)'], {}), '(probabilities_ic / probabilities_i)\n', (1539, 1575), True, 'import numpy as np\n'), ((2609, 2638), 'numpy.log', 'np.log', (['stacked_probabilities'], {}), '(stacked_probabilities)\n', (2615, 2638), True, 'import numpy as np\n')]
|
import torch
from torchvision.utils import make_grid
import numpy as np
from base import BaseTrainer
from models import Generator, Discriminator
from losses import *
from data_loaders import CartoonDataLoader
from utils import MetricTracker
class ExpnameTrainer(BaseTrainer):
def __init__(self, config):
super(ExpnameTrainer, self).__init__(config)
self.logger.info("Creating data loaders...")
self.train_dataloader, self.valid_dataloader = self._build_dataloader()
self.log_step = int(np.sqrt(self.train_dataloader.batch_size))
self.logger.info("Creating model architecture...")
gen, disc = self._build_model()
# resume
if self.config.resume is not None:
self._resume_checkpoint(config.resume)
# move to device
self.gen = gen.to(self.device)
self.disc = disc.to(self.device)
if len(self.device_ids) > 1:
self.gen = torch.nn.DataParallel(self.gen, device_ids=self.device_ids)
self.disc = torch.nn.DataParallel(self.disc, device_ids=self.device_ids)
self.logger.info("Creating optimizers...")
self.gen_optim, self.disc_optim = self._build_optimizer(self.gen, self.disc)
# build loss
self.logger.info("Creating losses...")
self._build_criterion()
self.logger.info("Creating metric trackers...")
self._build_metrics()
def _build_dataloader(self):
train_dataloader = CartoonDataLoader(
data_dir=self.config.data_dir,
src_style='real',
tar_style=self.config.tar_style,
batch_size=self.config.batch_size,
image_size=self.config.image_size,
num_workers=self.config.num_workers)
valid_dataloader = train_dataloader.split_validation()
return train_dataloader, valid_dataloader
def _build_model(self):
""" build generator and discriminator model """
gen = Generator(self.config.image_size, self.config.down_size, self.config.num_res, self.config.skip_conn)
disc = Discriminator(self.config.image_size, self.config.down_size)
return gen, disc
def _build_optimizer(self, gen, disc):
""" build generator and discriminator optimizers """
gen_optim = torch.optim.AdamW(
gen.parameters(),
lr=self.config.g_lr,
weight_decay=self.config.weight_decay,
betas=(0.5, 0.999))
disc_optim = torch.optim.AdamW(
disc.parameters(),
lr=self.config.d_lr,
weight_decay=self.config.weight_decay,
betas=(0.5, 0.999))
return gen_optim, disc_optim
def _build_criterion(self):
self.adv_criterion = eval('{}Loss'.format(self.config.adv_criterion))()
# TODO add extra criterion you need here
def _build_metrics(self):
# TODO: add the loss you want to log here
self.metric_names = ['disc', 'gen']
self.train_metrics = MetricTracker(*[metric for metric in self.metric_names], writer=self.writer)
self.valid_metrics = MetricTracker(*[metric for metric in self.metric_names], writer=self.writer)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
self.gen.train()
self.disc.train()
self.train_metrics.reset()
for batch_idx, (src_imgs, tar_imgs) in enumerate(self.train_dataloader):
src_imgs, tar_imgs = src_imgs.to(self.device), tar_imgs.to(self.device)
self.gen_optim.zero_grad()
self.disc_optim.zero_grad()
raise NotImplementedError
# ============ Generation ============ #
# ============ train D ============ #
# ============ train G ============ #
# ============ log ============ #
self.writer.set_step((epoch - 1) * len(self.train_dataloader) + batch_idx)
# TODO: add the loss you want to log here
if batch_idx % self.log_step == 0:
self.logger.info('Train Epoch: {:d} {:d} Disc. Loss: {:.4f} Gen. Loss {:.4f}'.format(
epoch,
self._progress(batch_idx),
disc_loss.item(),
gen_loss.item()))
log = self.train_metrics.result()
val_log = self._valid_epoch(epoch)
log.update(**{'val_'+k : v for k, v in val_log.items()})
# shuffle data loader
self.train_dataloader.shuffle()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.gen.eval()
self.disc.eval()
disc_losses = []
gen_losses = []
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, (src_imgs, tar_imgs) in enumerate(self.valid_dataloader):
src_imgs, tar_imgs = src_imgs.to(self.device), tar_imgs.to(self.device)
# TODO similar to train but not optimizer.step()
raise NotImplementedError
# ============ Generation ============ #
# ============ D Loss ============ #
# ============ G Loss ============ #
# log losses
self.writer.set_step(epoch)
self.valid_metrics.update('disc', np.mean(disc_losses))
self.valid_metrics.update('gen', np.mean(gen_losses))
# log images
src_tar_imgs = torch.cat([src_imgs.cpu(), fake_tar_imgs.cpu()], dim=-1)
self.writer.add_image('src2tar', make_grid(src_tar_imgs.cpu(), nrow=1, normalize=True))
return self.valid_metrics.result()
def _save_checkpoint(self, epoch):
"""
Saving checkpoints
:param epoch: current epoch number
:param log: logging information of the epoch
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
state = {
'epoch': epoch,
'gen_state_dict': self.gen.state_dict() if len(self.device_ids) <= 1 else self.gen.module.state_dict(),
'disc_state_dict': self.disc.state_dict() if len(self.device_ids) <= 1 else self.disc.module.state_dict(),
'gen_optim': self.gen_optim.state_dict(),
'disc_optim': self.disc_optim.state_dict()
}
filename = str(self.config.checkpoint_dir + 'current.pth')
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if epoch % self.save_period == 0:
filename = str(self.config.checkpoint_dir + 'epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
resume_path = str(resume_path)
self.logger.info("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path)
self.start_epoch = checkpoint['epoch'] + 1
# load architecture params from checkpoint.
self.gen.load_state_dict(checkpoint['gen_state_dict'])
self.disc.load_state_dict(checkpoint['disc_state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed.
self.gen_optim.load_state_dict(checkpoint['gen_optim'])
self.disc_optim.load_state_dict(checkpoint['disc_optim'])
self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
|
[
"numpy.mean",
"numpy.sqrt",
"models.Generator",
"torch.load",
"data_loaders.CartoonDataLoader",
"torch.nn.DataParallel",
"models.Discriminator",
"utils.MetricTracker",
"torch.save",
"torch.no_grad"
] |
[((1475, 1689), 'data_loaders.CartoonDataLoader', 'CartoonDataLoader', ([], {'data_dir': 'self.config.data_dir', 'src_style': '"""real"""', 'tar_style': 'self.config.tar_style', 'batch_size': 'self.config.batch_size', 'image_size': 'self.config.image_size', 'num_workers': 'self.config.num_workers'}), "(data_dir=self.config.data_dir, src_style='real',\n tar_style=self.config.tar_style, batch_size=self.config.batch_size,\n image_size=self.config.image_size, num_workers=self.config.num_workers)\n", (1492, 1689), False, 'from data_loaders import CartoonDataLoader\n'), ((1967, 2072), 'models.Generator', 'Generator', (['self.config.image_size', 'self.config.down_size', 'self.config.num_res', 'self.config.skip_conn'], {}), '(self.config.image_size, self.config.down_size, self.config.\n num_res, self.config.skip_conn)\n', (1976, 2072), False, 'from models import Generator, Discriminator\n'), ((2083, 2143), 'models.Discriminator', 'Discriminator', (['self.config.image_size', 'self.config.down_size'], {}), '(self.config.image_size, self.config.down_size)\n', (2096, 2143), False, 'from models import Generator, Discriminator\n'), ((2999, 3075), 'utils.MetricTracker', 'MetricTracker', (['*[metric for metric in self.metric_names]'], {'writer': 'self.writer'}), '(*[metric for metric in self.metric_names], writer=self.writer)\n', (3012, 3075), False, 'from utils import MetricTracker\n'), ((3105, 3181), 'utils.MetricTracker', 'MetricTracker', (['*[metric for metric in self.metric_names]'], {'writer': 'self.writer'}), '(*[metric for metric in self.metric_names], writer=self.writer)\n', (3118, 3181), False, 'from utils import MetricTracker\n'), ((6700, 6727), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (6710, 6727), False, 'import torch\n'), ((7346, 7369), 'torch.load', 'torch.load', (['resume_path'], {}), '(resume_path)\n', (7356, 7369), False, 'import torch\n'), ((525, 566), 'numpy.sqrt', 'np.sqrt', (['self.train_dataloader.batch_size'], {}), '(self.train_dataloader.batch_size)\n', (532, 566), True, 'import numpy as np\n'), ((944, 1003), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.gen'], {'device_ids': 'self.device_ids'}), '(self.gen, device_ids=self.device_ids)\n', (965, 1003), False, 'import torch\n'), ((1028, 1088), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['self.disc'], {'device_ids': 'self.device_ids'}), '(self.disc, device_ids=self.device_ids)\n', (1049, 1088), False, 'import torch\n'), ((5035, 5050), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5048, 5050), False, 'import torch\n'), ((6939, 6966), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (6949, 6966), False, 'import torch\n'), ((5612, 5632), 'numpy.mean', 'np.mean', (['disc_losses'], {}), '(disc_losses)\n', (5619, 5632), True, 'import numpy as np\n'), ((5679, 5698), 'numpy.mean', 'np.mean', (['gen_losses'], {}), '(gen_losses)\n', (5686, 5698), True, 'import numpy as np\n')]
|
# Copyright 2013-2021 The Salish Sea MEOPAR contributors
# and The University of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Produce a figure that shows colour contours of a tracer on a vertical slice
along a section of the domain thalweg,
and on the surface for a section of the domain that excludes Puget Sound
in the south and Johnstone Strait in the north.
Testing notebook for this module is
https://nbviewer.jupyter.org/github/SalishSeaCast/SalishSeaNowcast/blob/main/notebooks/figures/research/TestTracerThalwegAndSurfaceHourly.ipynb
"""
from types import SimpleNamespace
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import gridspec
from salishsea_tools import visualisations as vis
from salishsea_tools import viz_tools
import nowcast.figures.website_theme
def make_figure(
hr,
tracer_var,
bathy,
mesh_mask,
clevels_thalweg,
clevels_surface,
cmap,
depth_integrated,
figsize=(16, 9),
theme=nowcast.figures.website_theme,
):
"""Plot colour contours of tracer on a vertical slice along a section of
the domain thalweg,
and on the surface for the Strait of Georgia and Juan de Fuca Strait
regions of the domain.
:param hr: UTC time in hours
:type hr: :class: str
:param tracer_var: Hourly average tracer results from NEMO run.
:type tracer_var: :py:class:`netCDF4.Variable`
:param bathy: Salish Sea NEMO model bathymetry data.
:type bathy: :class:`netCDF4.Dataset`
:param mesh_mask: NEMO-generated mesh mask for run that produced tracer_var.
:type mesh_mask: :class:`netCDF4.Dataset`
:param clevels_thalweg: Colour bar contour intervals for thalweg plot.
:type clevels_thalweg: :class:`numpy.ndarray`
:param clevels_surface: Colour bar contour intervals for surface plot.
:type clevels_surface: :class:`numpy.ndarray`
:param cmap: Colour map to use for tracer_var contour plots.
:type cmap: :py:class:`matplotlib.colors.LinearSegmentedColormap`
:param boolean depth_integrated: Integrate the tracer over the water column
depth when :py:obj:`True`.
:param 2-tuple figsize: Figure size (width, height) in inches.
:param theme: Module-like object that defines the style elements for the
figure. See :py:mod:`nowcast.figures.website_theme` for an
example.
:returns: :py:class:`matplotlib.figure.Figure`
"""
plot_data = _prep_plot_data(hr, tracer_var, mesh_mask, depth_integrated)
fig, (ax_thalweg, ax_surface) = _prep_fig_axes(figsize, theme)
cbar_thalweg = _plot_tracer_thalweg(
ax_thalweg, plot_data, bathy, mesh_mask, cmap, clevels_thalweg
)
_thalweg_axes_labels(ax_thalweg, plot_data, clevels_thalweg, cbar_thalweg, theme)
cbar_surface = _plot_tracer_surface(ax_surface, plot_data, cmap, clevels_surface)
_surface_axes_labels(
ax_surface, tracer_var, depth_integrated, clevels_surface, cbar_surface, theme
)
return fig
def clevels(tracer_var, mesh_mask, depth_integrated):
"""Calculate the colour bar contour intervals for the thalweg and surface
plot axes based on the tracer variable values at hr=0.
:param tracer_var: Hourly average tracer results from NEMO run.
:type tracer_var: :py:class:`netCDF4.Variable`
:param mesh_mask: NEMO-generated mesh mask for run that produced tracer_var.
:type mesh_mask: :class:`netCDF4.Dataset`
:param boolean depth_integrated: Integrate the tracer over the water column
depth when :py:obj:`True`.
:returns: Colour bar contour intervals for thalweg and surface plot axes.
:rtype: 2-tuple of :class:`numpy.ndarray` objects
"""
plot_data = _prep_plot_data(0, tracer_var, mesh_mask, depth_integrated)
clevels_thalweg, clevels_surface = _calc_clevels(plot_data)
return clevels_thalweg, clevels_surface
def _prep_plot_data(hr, tracer_var, mesh_mask, depth_integrated):
sj, ej = 200, 800
si, ei = 20, 395
tracer_hr = tracer_var[hr]
masked_tracer_hr = np.ma.masked_where(mesh_mask["tmask"][0, ...] == 0, tracer_hr)
surface_hr = masked_tracer_hr[0, sj:ej, si:ei]
if depth_integrated:
grid_heights = mesh_mask.variables["e3t_1d"][:][0].reshape(
tracer_hr.shape[0], 1, 1
)
height_weighted = masked_tracer_hr[:, sj:ej, si:ei] * grid_heights
surface_hr = height_weighted.sum(axis=0)
return SimpleNamespace(
tracer_var=tracer_var,
tracer_hr=tracer_hr,
surface_hr=surface_hr,
surface_j_limits=(sj, ej),
surface_i_limits=(si, ei),
thalweg_depth_limits=(0, 450),
thalweg_length_limits=(0, 632),
)
def _prep_fig_axes(figsize, theme):
fig = plt.figure(figsize=figsize, facecolor=theme.COLOURS["figure"]["facecolor"])
gs = gridspec.GridSpec(1, 2, width_ratios=[1.4, 1])
ax_thalweg = fig.add_subplot(gs[0])
ax_thalweg.set_facecolor(theme.COLOURS["axes"]["background"])
ax_surface = fig.add_subplot(gs[1])
ax_surface.set_facecolor(theme.COLOURS["axes"]["background"])
return fig, (ax_thalweg, ax_surface)
def _calc_clevels(plot_data):
"""Calculate contour levels for the thalweg and surface plot axes."""
percent_98_thalweg = np.percentile(
np.ma.masked_values(plot_data.tracer_hr, 0).compressed(), 98
)
percent_2_thalweg = np.percentile(
np.ma.masked_values(plot_data.tracer_hr, 0).compressed(), 2
)
percent_98_surf = np.percentile(plot_data.surface_hr.compressed(), 98)
percent_2_surf = np.percentile(plot_data.surface_hr.compressed(), 2)
clevels_thalweg = np.arange(
percent_2_thalweg,
percent_98_thalweg,
(percent_98_thalweg - percent_2_thalweg) / 20.0,
)
clevels_surface = np.arange(
percent_2_surf, percent_98_surf, (percent_98_surf - percent_2_surf) / 20.0
)
return clevels_thalweg, clevels_surface
def _plot_tracer_thalweg(ax, plot_data, bathy, mesh_mask, cmap, clevels):
cbar = vis.contour_thalweg(
ax,
plot_data.tracer_hr,
bathy,
mesh_mask,
clevels=clevels,
cmap=cmap,
## TODO: Can this path be moved into nowcast.yaml config file?
thalweg_file="/SalishSeaCast/tools/bathymetry/thalweg_working" ".txt",
cbar_args={"fraction": 0.030, "pad": 0.04, "aspect": 45},
)
return cbar
def _thalweg_axes_labels(ax, plot_data, clevels, cbar, theme):
ax.set_xlim(plot_data.thalweg_length_limits)
ax.set_ylim(plot_data.thalweg_depth_limits[1], plot_data.thalweg_depth_limits[0])
label = f"{plot_data.tracer_var.long_name} [{plot_data.tracer_var.units}]"
_cbar_labels(cbar, clevels[::2], theme, label)
ax.set_xlabel(
"Distance along thalweg [km]",
color=theme.COLOURS["text"]["axis"],
fontproperties=theme.FONTS["axis"],
)
ax.set_ylabel(
"Depth [m]",
color=theme.COLOURS["text"]["axis"],
fontproperties=theme.FONTS["axis"],
)
theme.set_axis_colors(ax)
def _cbar_labels(cbar, contour_intervals, theme, label):
cbar.set_ticks(contour_intervals)
cbar.ax.axes.tick_params(labelcolor=theme.COLOURS["cbar"]["tick labels"])
cbar.set_label(
label, fontproperties=theme.FONTS["axis"], color=theme.COLOURS["text"]["axis"]
)
def _plot_tracer_surface(ax, plot_data, cmap, clevels):
x, y = np.meshgrid(
np.arange(*plot_data.surface_i_limits, dtype=int),
np.arange(*plot_data.surface_j_limits, dtype=int),
)
mesh = ax.contourf(
x, y, plot_data.surface_hr, levels=clevels, cmap=cmap, extend="both"
)
cbar = plt.colorbar(mesh, ax=ax, fraction=0.034, pad=0.04, aspect=45)
return cbar
def _surface_axes_labels(ax, tracer_var, depth_integrated, clevels, cbar, theme):
cbar_units = f"{tracer_var.units}*m" if depth_integrated else f"{tracer_var.units}"
cbar_label = f"{tracer_var.long_name} [{cbar_units}]"
_cbar_labels(cbar, clevels[::2], theme, cbar_label)
ax.set_xlabel(
"Grid x",
color=theme.COLOURS["text"]["axis"],
fontproperties=theme.FONTS["axis"],
)
ax.set_ylabel(
"Grid y",
color=theme.COLOURS["text"]["axis"],
fontproperties=theme.FONTS["axis"],
)
ax.set_facecolor("burlywood")
viz_tools.set_aspect(ax)
theme.set_axis_colors(ax)
|
[
"numpy.ma.masked_values",
"types.SimpleNamespace",
"matplotlib.pyplot.colorbar",
"numpy.ma.masked_where",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"salishsea_tools.visualisations.contour_thalweg",
"salishsea_tools.viz_tools.set_aspect",
"numpy.arange"
] |
[((4625, 4687), 'numpy.ma.masked_where', 'np.ma.masked_where', (["(mesh_mask['tmask'][0, ...] == 0)", 'tracer_hr'], {}), "(mesh_mask['tmask'][0, ...] == 0, tracer_hr)\n", (4643, 4687), True, 'import numpy as np\n'), ((5016, 5224), 'types.SimpleNamespace', 'SimpleNamespace', ([], {'tracer_var': 'tracer_var', 'tracer_hr': 'tracer_hr', 'surface_hr': 'surface_hr', 'surface_j_limits': '(sj, ej)', 'surface_i_limits': '(si, ei)', 'thalweg_depth_limits': '(0, 450)', 'thalweg_length_limits': '(0, 632)'}), '(tracer_var=tracer_var, tracer_hr=tracer_hr, surface_hr=\n surface_hr, surface_j_limits=(sj, ej), surface_i_limits=(si, ei),\n thalweg_depth_limits=(0, 450), thalweg_length_limits=(0, 632))\n', (5031, 5224), False, 'from types import SimpleNamespace\n'), ((5327, 5402), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'facecolor': "theme.COLOURS['figure']['facecolor']"}), "(figsize=figsize, facecolor=theme.COLOURS['figure']['facecolor'])\n", (5337, 5402), True, 'import matplotlib.pyplot as plt\n'), ((5413, 5459), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)'], {'width_ratios': '[1.4, 1]'}), '(1, 2, width_ratios=[1.4, 1])\n', (5430, 5459), False, 'from matplotlib import gridspec\n'), ((6220, 6321), 'numpy.arange', 'np.arange', (['percent_2_thalweg', 'percent_98_thalweg', '((percent_98_thalweg - percent_2_thalweg) / 20.0)'], {}), '(percent_2_thalweg, percent_98_thalweg, (percent_98_thalweg -\n percent_2_thalweg) / 20.0)\n', (6229, 6321), True, 'import numpy as np\n'), ((6371, 6460), 'numpy.arange', 'np.arange', (['percent_2_surf', 'percent_98_surf', '((percent_98_surf - percent_2_surf) / 20.0)'], {}), '(percent_2_surf, percent_98_surf, (percent_98_surf -\n percent_2_surf) / 20.0)\n', (6380, 6460), True, 'import numpy as np\n'), ((6602, 6832), 'salishsea_tools.visualisations.contour_thalweg', 'vis.contour_thalweg', (['ax', 'plot_data.tracer_hr', 'bathy', 'mesh_mask'], {'clevels': 'clevels', 'cmap': 'cmap', 'thalweg_file': '"""/SalishSeaCast/tools/bathymetry/thalweg_working.txt"""', 'cbar_args': "{'fraction': 0.03, 'pad': 0.04, 'aspect': 45}"}), "(ax, plot_data.tracer_hr, bathy, mesh_mask, clevels=\n clevels, cmap=cmap, thalweg_file=\n '/SalishSeaCast/tools/bathymetry/thalweg_working.txt', cbar_args={\n 'fraction': 0.03, 'pad': 0.04, 'aspect': 45})\n", (6621, 6832), True, 'from salishsea_tools import visualisations as vis\n'), ((8240, 8302), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mesh'], {'ax': 'ax', 'fraction': '(0.034)', 'pad': '(0.04)', 'aspect': '(45)'}), '(mesh, ax=ax, fraction=0.034, pad=0.04, aspect=45)\n', (8252, 8302), True, 'import matplotlib.pyplot as plt\n'), ((8907, 8931), 'salishsea_tools.viz_tools.set_aspect', 'viz_tools.set_aspect', (['ax'], {}), '(ax)\n', (8927, 8931), False, 'from salishsea_tools import viz_tools\n'), ((8006, 8055), 'numpy.arange', 'np.arange', (['*plot_data.surface_i_limits'], {'dtype': 'int'}), '(*plot_data.surface_i_limits, dtype=int)\n', (8015, 8055), True, 'import numpy as np\n'), ((8065, 8114), 'numpy.arange', 'np.arange', (['*plot_data.surface_j_limits'], {'dtype': 'int'}), '(*plot_data.surface_j_limits, dtype=int)\n', (8074, 8114), True, 'import numpy as np\n'), ((5870, 5913), 'numpy.ma.masked_values', 'np.ma.masked_values', (['plot_data.tracer_hr', '(0)'], {}), '(plot_data.tracer_hr, 0)\n', (5889, 5913), True, 'import numpy as np\n'), ((5984, 6027), 'numpy.ma.masked_values', 'np.ma.masked_values', (['plot_data.tracer_hr', '(0)'], {}), '(plot_data.tracer_hr, 0)\n', (6003, 6027), True, 'import numpy as np\n')]
|
#!/usr/bin/env python 3.6
# -*- coding: utf-8 -*-
"""
Created on Saturdau Sep 16 16:58:58 2017
@author: Hans - Clรฉment - Ali
"""
#----------Import_module-----------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn import neighbors
try:
import ConfigParser as conf
except:
import configparser as conf
# KNN =+> on va voir quel sont les individus qui se ressemble et on va prendre des dรฉcision
#-------------------Fontion------------------------------------
def plot(genes) :
plt.pcolor(genes[np.arange(100)])
plt.colorbar()
plt.title('Example of gene expressions')
plt.ylabel('samples')
plt.xlabel('genes')
plt.show()
def geneHightCorrelation(G,Y) :
ncol = G.shape[1]
rho = np.zeros(ncol)
for k in range (ncol) :
#print (len(G[1:,k]))
#print (len(Y))
#print (G[1:,k], Y)
c = np.corrcoef(G[1:,k].astype(float), Y.astype(float))
rho[k] = c [0,1]
#print (rho)
w = np.nonzero(abs(rho)>.1)[0] # On sรฉlecionne uniquement les genes qui ont un
# coefficient de corrรฉlation > 0.1
#print (len(w))
#print (w)
return (rho, w)
def knn (G,Y) :
w = geneHightCorrelation(G,Y)[1]
n = len (X[0]) # Nbre d'รฉchantillon
Xw = X[w] # Recupรจre les valeurs d'expression des gรจnes avec un coeff > 0.1
#print (n)
Xw = Xw[1:]
b=100
n_neighbors = np.arange(1,7)
ErrClassif = np.zeros([len(n_neighbors),b])
#print (ErrClassif)
for i in range (b) :
itrain, itest = train_test_split(range(0, n-1), test_size = 0.25)
Xtrain = Xw.iloc[itrain]
ytrain = Y[np.asarray(itrain)] # because itrain is a list
# and y is indexed from 6 to ...
ytest = Y[np.asarray(itest)] # because itest is a list
for j in n_neighbors:
clf = neighbors.KNeighborsClassifier(j)
clf.fit(Xtrain, ytrain)
yhat = clf.predict(Xw.iloc[itest])
#print (yhat)
ErrClassif[j-1,99] = np.mean(ytest!=yhat)
#print (ErrClassif)
return (ErrClassif, n_neighbors)
"""
# Best result for 1 neighbor
ibest = 1
ntest = 10 # 10 because len(itest) = 10
y_score = np.zeros([ntest,B]) # 10 because len(itest) = 10
y_test = np.zeros([ntest,B]) # 10 because len(itest) = 10
for b in range(B):
itrain,itest=train_test_split(range(0,n-1),test_size=0.25)
Xtrain = Xw.iloc[itrain]
ytrain = Y[np.asarray(itrain)] # because itrain is a list
# and y is indexed from 6 to ...
ytest = Y[np.asarray(itest)] # because itest is a list
y_test[:,b] = ytest
clf = neighbors.KNeighborsClassifier(ibest)
clf.fit(Xtrain, ytrain)
y_score[:,b] = clf.predict_proba(Xw.iloc[itest])[:,1]
ROC(y_test,y_score,"kNN, 1 neighbor")
"""
#----------------Menu Principale----------------------------------
config = conf.ConfigParser()
config.readfp(open('../configuration.ini','r'))
xtrain= config.get('Data', 'xtrain')
path_xtrain = str(xtrain)
gene = pd.read_table("../data/xtrain.txt", header=None)
labels = pd.read_table("../data/ytrain.txt", header=None)
ncol = gene.shape[1]
X = gene.T
Y = np.array(labels).reshape(184)
G = np.array(X)
geneHightCorrelation(G,Y)
ErrClassif , n_neighbors = knn (G,Y)
#plt.boxplot(ErrClassif.T,labels=n_neighbors)
plt.plot(ErrClassif.T)
plt.ylim(0,1)
plt.ylabel('Mean classification error')
plt.xlabel('nb of neighbors')
#plt.plot(rho)
plt.show()
|
[
"numpy.mean",
"matplotlib.pyplot.title",
"configparser.ConfigParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"numpy.asarray",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.array",
"numpy.zeros",
"pandas.read_table",
"matplotlib.pyplot.ylim",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((2982, 3001), 'configparser.ConfigParser', 'conf.ConfigParser', ([], {}), '()\n', (2999, 3001), True, 'import configparser as conf\n'), ((3126, 3174), 'pandas.read_table', 'pd.read_table', (['"""../data/xtrain.txt"""'], {'header': 'None'}), "('../data/xtrain.txt', header=None)\n", (3139, 3174), True, 'import pandas as pd\n'), ((3185, 3233), 'pandas.read_table', 'pd.read_table', (['"""../data/ytrain.txt"""'], {'header': 'None'}), "('../data/ytrain.txt', header=None)\n", (3198, 3233), True, 'import pandas as pd\n'), ((3312, 3323), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3320, 3323), True, 'import numpy as np\n'), ((3443, 3465), 'matplotlib.pyplot.plot', 'plt.plot', (['ErrClassif.T'], {}), '(ErrClassif.T)\n', (3451, 3465), True, 'import matplotlib.pyplot as plt\n'), ((3467, 3481), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (3475, 3481), True, 'import matplotlib.pyplot as plt\n'), ((3482, 3521), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean classification error"""'], {}), "('Mean classification error')\n", (3492, 3521), True, 'import matplotlib.pyplot as plt\n'), ((3523, 3552), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""nb of neighbors"""'], {}), "('nb of neighbors')\n", (3533, 3552), True, 'import matplotlib.pyplot as plt\n'), ((3570, 3580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3578, 3580), True, 'import matplotlib.pyplot as plt\n'), ((721, 735), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (733, 735), True, 'import matplotlib.pyplot as plt\n'), ((741, 781), 'matplotlib.pyplot.title', 'plt.title', (['"""Example of gene expressions"""'], {}), "('Example of gene expressions')\n", (750, 781), True, 'import matplotlib.pyplot as plt\n'), ((787, 808), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""samples"""'], {}), "('samples')\n", (797, 808), True, 'import matplotlib.pyplot as plt\n'), ((814, 833), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""genes"""'], {}), "('genes')\n", (824, 833), True, 'import matplotlib.pyplot as plt\n'), ((839, 849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (847, 849), True, 'import matplotlib.pyplot as plt\n'), ((913, 927), 'numpy.zeros', 'np.zeros', (['ncol'], {}), '(ncol)\n', (921, 927), True, 'import numpy as np\n'), ((1515, 1530), 'numpy.arange', 'np.arange', (['(1)', '(7)'], {}), '(1, 7)\n', (1524, 1530), True, 'import numpy as np\n'), ((3277, 3293), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3285, 3293), True, 'import numpy as np\n'), ((699, 713), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (708, 713), True, 'import numpy as np\n'), ((1736, 1754), 'numpy.asarray', 'np.asarray', (['itrain'], {}), '(itrain)\n', (1746, 1754), True, 'import numpy as np\n'), ((1857, 1874), 'numpy.asarray', 'np.asarray', (['itest'], {}), '(itest)\n', (1867, 1874), True, 'import numpy as np\n'), ((1939, 1972), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', (['j'], {}), '(j)\n', (1969, 1972), False, 'from sklearn import neighbors\n'), ((2087, 2109), 'numpy.mean', 'np.mean', (['(ytest != yhat)'], {}), '(ytest != yhat)\n', (2094, 2109), True, 'import numpy as np\n')]
|
import sox
import numpy as np
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="input file name")
args = parser.parse_args()
print(args.input)
np1 = np.arange(start=-1.0, stop=1.1, step=0.10)
np2 = np.arange(start=0.9, stop=1.11, step=0.01)
np3 = np.arange(start=0.9, stop=1.11, step=0.01)
np4 = np.arange(start=-5.0, stop=5.5, step=0.5)
np.random.shuffle(np1)
np.random.shuffle(np2)
np.random.shuffle(np3)
np.random.shuffle(np4)
x = 0
command ='mv ' + args.input + ' temp.wav'
os.system(command)
while x < 21:
tfm1 = sox.Transformer()
pitch_offset = round(np1[x],1)
tempo_offset = round(np2[x],1)
gain_offset = round(np4[x],1)
tfm1.pitch(pitch_offset)
tfm1.gain(gain_offset, False)
tfm1.tempo(tempo_offset, 's')
tfm1.build_file('temp.wav', 'pp' + str(x) + '-' + args.input)
x = x + 1
|
[
"sox.Transformer",
"argparse.ArgumentParser",
"os.system",
"numpy.arange",
"numpy.random.shuffle"
] |
[((66, 91), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (89, 91), False, 'import argparse\n'), ((203, 244), 'numpy.arange', 'np.arange', ([], {'start': '(-1.0)', 'stop': '(1.1)', 'step': '(0.1)'}), '(start=-1.0, stop=1.1, step=0.1)\n', (212, 244), True, 'import numpy as np\n'), ((252, 294), 'numpy.arange', 'np.arange', ([], {'start': '(0.9)', 'stop': '(1.11)', 'step': '(0.01)'}), '(start=0.9, stop=1.11, step=0.01)\n', (261, 294), True, 'import numpy as np\n'), ((301, 343), 'numpy.arange', 'np.arange', ([], {'start': '(0.9)', 'stop': '(1.11)', 'step': '(0.01)'}), '(start=0.9, stop=1.11, step=0.01)\n', (310, 343), True, 'import numpy as np\n'), ((350, 391), 'numpy.arange', 'np.arange', ([], {'start': '(-5.0)', 'stop': '(5.5)', 'step': '(0.5)'}), '(start=-5.0, stop=5.5, step=0.5)\n', (359, 391), True, 'import numpy as np\n'), ((393, 415), 'numpy.random.shuffle', 'np.random.shuffle', (['np1'], {}), '(np1)\n', (410, 415), True, 'import numpy as np\n'), ((416, 438), 'numpy.random.shuffle', 'np.random.shuffle', (['np2'], {}), '(np2)\n', (433, 438), True, 'import numpy as np\n'), ((439, 461), 'numpy.random.shuffle', 'np.random.shuffle', (['np3'], {}), '(np3)\n', (456, 461), True, 'import numpy as np\n'), ((462, 484), 'numpy.random.shuffle', 'np.random.shuffle', (['np4'], {}), '(np4)\n', (479, 484), True, 'import numpy as np\n'), ((534, 552), 'os.system', 'os.system', (['command'], {}), '(command)\n', (543, 552), False, 'import os\n'), ((577, 594), 'sox.Transformer', 'sox.Transformer', ([], {}), '()\n', (592, 594), False, 'import sox\n')]
|
""" Overall test for the PYGA framework"""
from src.ga import GA
import numpy as np
TEST_CONFIGURATION = {
"generation_size": 100,
"iterate_evolution": True,
"max_fitness": 0.99,
"display_info": False,
}
def give_score(weights) -> float:
""" Higher weights give higher fitness """
return np.mean(weights)
LOCAL_GA = GA(_num_weights=5, fitness_function=give_score)
LOCAL_GA.configure(TEST_CONFIGURATION)
for iteration in LOCAL_GA.evolve():
mean_value = np.mean(iteration)
print("Average fitness this generation:", mean_value)
assert mean_value >= 0.99
"""
had iterate_evolution been set to false,
instead of looping through LOCAL_GA.eolve()
we would've simple said
fittest_weights = LOCAL_GA.evolve()
"""
|
[
"numpy.mean",
"src.ga.GA"
] |
[((346, 393), 'src.ga.GA', 'GA', ([], {'_num_weights': '(5)', 'fitness_function': 'give_score'}), '(_num_weights=5, fitness_function=give_score)\n', (348, 393), False, 'from src.ga import GA\n'), ((316, 332), 'numpy.mean', 'np.mean', (['weights'], {}), '(weights)\n', (323, 332), True, 'import numpy as np\n'), ((487, 505), 'numpy.mean', 'np.mean', (['iteration'], {}), '(iteration)\n', (494, 505), True, 'import numpy as np\n')]
|
import numpy
import torch
from torch_rl.algos.base import BaseAlgo
class I2Algorithm(BaseAlgo):
def __init__(self, environment_class, n_processes=16, seed=1, acmodel=None, num_frames_per_proc=None, discount=0.99,
lr=7e-4, gae_lambda=0.95, entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=1,
rmsprop_alpha=0.99, rmsprop_eps=1e-5, preprocess_obss=None, reshape_reward=None):
num_frames_per_proc = num_frames_per_proc or 8
super().__init__(environment_class, acmodel, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward, n_processes, seed)
# self.optimizer = torch.optim.RMSprop(self.acmodel.parameters(), lr, alpha=rmsprop_alpha, eps=rmsprop_eps)
self.agent_optimizer = None
self.imagination_policy_optimizer = None
def load_acmodel(self, acmodel):
super().load_acmodel(acmodel)
self.agent_optimizer = torch.optim.RMSprop(self.acmodel.parameters(), self.lr, alpha=0.99, eps=1e-5)
self.imagination_policy_optimizer = torch.optim.Adam(self.acmodel.imagination_policy.parameters(), lr=self.lr)
def update_parameters(self):
# Collect experiences
exps, logs = self.collect_experiences()
# Initialize update values
update_entropy = 0
update_value = 0
update_policy_loss = 0
update_value_loss = 0
update_loss = 0
# Compute loss
dist, value = self.acmodel(exps.obs)
entropy = dist.entropy().mean()
policy_loss = -(dist.log_prob(exps.action) * exps.advantage).mean()
value_loss = (value - exps.returnn).pow(2).mean()
loss = policy_loss - self.entropy_coef * entropy + self.value_loss_coef * value_loss
# Update batch values
update_entropy += entropy.item()
update_value += value.mean().item()
update_policy_loss += policy_loss.item()
update_value_loss += value_loss.item()
update_loss += loss
# Update update values
update_entropy /= self.recurrence
update_value /= self.recurrence
update_policy_loss /= self.recurrence
update_value_loss /= self.recurrence
update_loss /= self.recurrence
# Update actor-critic
self.agent_optimizer.zero_grad()
update_loss.backward()
update_grad_norm = sum(p.grad.data.norm(2) ** 2 for p in self.acmodel.parameters()) ** 0.5
torch.nn.utils.clip_grad_norm_(self.acmodel.parameters(), self.max_grad_norm)
self.agent_optimizer.step()
self.imagination_policy_optimizer.zero_grad()
distilled_distributions, _, _ = self.acmodel.imagination_policy(exps.obs, None)
distillation_loss = (-1 * distilled_distributions.logits * dist.probs.detach()).sum(dim=1).mean()
distillation_loss.backward()
self.imagination_policy_optimizer.step()
# Log some values
logs["entropy"] = update_entropy
logs["value"] = update_value
logs["policy_loss"] = update_policy_loss
logs["value_loss"] = update_value_loss
logs["grad_norm"] = update_grad_norm
logs["distillation_loss"] = distillation_loss.item()
return logs
def _get_starting_indexes(self):
"""Gives the indexes of the observations given to the model and the
experiences used to compute the loss at first.
The indexes are the integers from 0 to `self.num_frames` with a step of
`self.recurrence`. If the model is not recurrent, they are all the
integers from 0 to `self.num_frames`.
Returns
-------
starting_indexes : list of int
the indexes of the experiences to be used at first
"""
starting_indexes = numpy.arange(0, self.num_frames, self.recurrence)
return starting_indexes
|
[
"numpy.arange"
] |
[((3872, 3921), 'numpy.arange', 'numpy.arange', (['(0)', 'self.num_frames', 'self.recurrence'], {}), '(0, self.num_frames, self.recurrence)\n', (3884, 3921), False, 'import numpy\n')]
|
import pygame
from pygame.locals import *
from sys import exit
from PIL import Image
from PID import execute_PID
import numpy as np
# Global Variables
larg = 1000.0
alt = 640.0
global position_, angle_, velocidade
position_ = 0
angle_ = 0
velocidade = 0
class Screen:
def __init__(self, larg, alt, bg_image):
pygame.init()
# Set window's name
pygame.display.set_caption("Simulador 2D de Drone")
# Load background image
self.bg_image = bg_image
self.background = pygame.image.load('Imagens/Imagem_fundo_resized.jpg')
# Window's size
self.larg = larg
self.alt = alt
self.screen = pygame.display.set_mode((self.larg, self.alt))
def resize_screen_image(self):
# Resizing background image to match the screen size
image = Image.open(bg_image)
image = image.resize((self.larg, self.alt))
image.save('Imagens/Imagem_fundo_resized.jpg')
def plot(self, x, y):
self.screen.blit(x, y)
def update_screen(self):
# Screen configuration
self.screen.fill((0, 0, 0)) # Clean the last screen to update the frames
self.screen.blit(self.background, (0, 0)) # Load the bg at the (0, 0) position of the screen
# Fonte
fonte = pygame.font.SysFont('arial', 15, True, True)
# Destino
texto = f'Destino do drone: ({mx_real:.2f}, {my_real:.2f})'
texto_formatado = fonte.render(texto, True, (255, 255, 255))
self.screen.blit(texto_formatado, (10, 10))
# Posiรงรฃo Atual
texto = f'Posiรงรฃo atual: ({position_})'
texto_formatado = fonte.render(texto, True, (255, 255, 255))
self.screen.blit(texto_formatado, (10, 30))
# Velocidade Atual
texto = f'Velocidade atual: ({velocidade})'
texto_formatado = fonte.render(texto, True, (255, 255, 255))
self.screen.blit(texto_formatado, (10, 50))
# Angulo Atual
texto = f'รngulo: {angle_:.2f}'
texto_formatado = fonte.render(texto, True, (255, 255, 255))
self.screen.blit(texto_formatado, (10, 70))
class Drone:
def __init__(self, position, angle, vel, drone_image):
# Drone's position, angle and velocity
self.position = position
self.posH = self.position[0]
self.posV = self.position[1]
self.angle = angle
self.vel = vel
# Load drone image
self.drone_image = drone_image
self.drone = pygame.image.load('Imagens/drone_resized.png')
self.tamX = self.drone.get_size()[0]
self.tamY = self.drone.get_size()[0]
self.height = 0, 0
# Get screen class
self.screen = Screen(larg, alt, None)
self.drone_rotated = self.drone
self.drone_rotated_pos = self.position
def resize_drone_image(self):
# Resizing player image
image = Image.open(drone_image)
image = image.resize((100, 50))
image.save('Imagens/drone_resized.png')
def drone_rotate(self, position, angle):
# Rotate drone
self.drone_rotated = pygame.transform.rotate(self.drone, angle)
# correcting drone's rotated position to the center of the drone's image
self.height = self.drone_rotated.get_height() / 2
self.drone_rotated_pos = (position[0] - self.drone_rotated.get_width() / 2, position[1] - self.height)
def drone_update(self, position, angle):
# Rotating drone
self.drone_rotate(position, angle)
# spawn drone
self.screen.plot(self.drone_rotated, self.drone_rotated_pos)
class Drone_Control:
def __init__(self, drone_image):
# Movement, position and rotation parameters
self.position = [500, 540]
self.posH = self.position[0]
self.posV = self.position[1]
self.vel = 10
self.angle = 0
self.drone = Drone(self.position, self.angle, self.vel, drone_image)
self.drone_rotated = self.drone.drone_rotated
# Screen to Real coordinates
self.real_pos = {'x': -(larg / 2 - self.posH), 'y': alt - 100 - self.posV}
# Screen limits (The screen size minus the player size)
self.xlim = larg - self.drone.tamX / 2
self.ylim = alt - self.drone.tamY / 2
self.keys = 0
# Initializing control parameters
self.w1 = 0
self.w2 = 0
self.v1 = 0
self.v2 = 0
self.ang_vel = 0
self.x = np.array([self.w1, self.w2,
self.real_pos['x'], self.real_pos['y'],
self.v1, self.v2,
self.angle * np.pi / 180.,
self.ang_vel * np.pi / 180.])
self.eP = np.array([1, 1]) # Position error
self.ePhi = 2 # angle error
def key_control(self):
self.keys = pygame.key.get_pressed()
self.real_pos = {'x': -(larg / 2 - self.posH), 'y': alt - 100 - self.posV}
destiny_x, destiny_y = self.real_pos['x'], self.real_pos['y']
if self.keys[pygame.K_LEFT] or self.keys[pygame.K_a]:
destiny_x = self.real_pos['x'] - 100.0
if self.keys[pygame.K_RIGHT] or self.keys[pygame.K_d]:
destiny_x = self.real_pos['x'] + 100.0
if self.keys[pygame.K_UP] or self.keys[pygame.K_w]:
destiny_y = self.real_pos['y'] + 100.0
if self.keys[pygame.K_DOWN] or self.keys[pygame.K_s]:
destiny_y = self.real_pos['y'] - 100.0
self.pid_control(destiny_x, destiny_y)
def mouse_control(self, destiny_x, destiny_y):
return self.pid_control(destiny_x, destiny_y)
def pid_control(self, destiny_x, destiny_y):
self.real_pos = {'x': -(larg / 2 - self.posH), 'y': alt - 100 - self.posV}
self.eP = np.array([destiny_x - self.real_pos['x'], destiny_y - self.real_pos['y']])
if np.abs(self.eP[0]) > 0.2 or np.abs(self.eP[1]) > 0.2 or np.abs(self.ePhi) > 0.1:
self.x, self.eP, self.ePhi = execute_PID(self.x, [destiny_x, destiny_y], t)
# Converting from real coordinate to screen coordinate
self.posH, self.posV = self.x[2] + larg / 2, alt - 100 - self.x[3]
# Updating state vector
self.angle = self.x[6]*180/np.pi
self.v1, self.v2 = self.x[4], self.x[5]
self.w1, self.w2 = self.x[0], self.x[1]
self.ang_vel = self.x[7]
# Updating drone's pixel position and angle
self.position = [self.posH, self.posV]
self.drone.drone_update(self.position, self.angle)
################ Printing drone's status
global position_, angle_, velocidade
position_ = (round(self.x[2], 2), round(self.x[3], 2))
angle_ = self.angle
velocidade = (round(self.v1, 2), round(self.v2, 2))
return True
else:
self.real_pos = {'x': -(larg / 2 - self.posH), 'y': alt - 100 - self.posV}
self.posH, self.posV = self.x[2] + larg / 2, alt - 100 - self.x[3]
self.eP = np.array([destiny_x - self.real_pos['x'], destiny_y - self.real_pos['y']])
self.drone.drone_update(self.position, self.angle)
return False
class Game:
def __init__(self, larg, alt, bg_image, drone_image):
self.screen = Screen(larg, alt, bg_image)
self.control = Drone_Control(drone_image)
self.clock = pygame.time.Clock()
self.ticks = 60
self.exit = False
def run(self):
global t, FPS
FPS = 600
auto_move = False
global mx_real, my_real
mx_real, my_real = 0, 0
while True:
self.clock.tick(FPS) # Game FPS
t = self.clock.get_time() / 1000
self.screen.update_screen()
for event in pygame.event.get():
# To quit the game
if event.type == QUIT:
pygame.quit()
exit()
if event.type == pygame.MOUSEBUTTONDOWN:
auto_move = True
# Get the destiny's position from mouse click
mx, my = pygame.mouse.get_pos()
# Transform the mouse click point in real coordinates
mx_real, my_real = -(larg / 2 - mx), alt - 100 - my
# print(mx_real, my_real)
if auto_move:
auto_move = self.control.mouse_control(mx_real, my_real)
else:
self.control.key_control()
pygame.display.update()
if __name__ == '__main__':
bg_image = 'Imagens/ghibli_background.jpg'
drone_image = 'Imagens/drone.png'
game = Game(larg, alt, bg_image, drone_image)
game.run()
|
[
"numpy.abs",
"PIL.Image.open",
"sys.exit",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.mouse.get_pos",
"pygame.time.Clock",
"pygame.transform.rotate",
"numpy.array",
"pygame.key.get_pressed",
"pygame.display.set_caption",
"pygame.image.load",
"pygame.display.update",
"pygame.font.SysFont",
"PID.execute_PID"
] |
[((315, 328), 'pygame.init', 'pygame.init', ([], {}), '()\n', (326, 328), False, 'import pygame\n'), ((353, 404), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Simulador 2D de Drone"""'], {}), "('Simulador 2D de Drone')\n", (379, 404), False, 'import pygame\n'), ((478, 531), 'pygame.image.load', 'pygame.image.load', (['"""Imagens/Imagem_fundo_resized.jpg"""'], {}), "('Imagens/Imagem_fundo_resized.jpg')\n", (495, 531), False, 'import pygame\n'), ((602, 648), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.larg, self.alt)'], {}), '((self.larg, self.alt))\n', (625, 648), False, 'import pygame\n'), ((747, 767), 'PIL.Image.open', 'Image.open', (['bg_image'], {}), '(bg_image)\n', (757, 767), False, 'from PIL import Image\n'), ((1164, 1208), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""arial"""', '(15)', '(True)', '(True)'], {}), "('arial', 15, True, True)\n", (1183, 1208), False, 'import pygame\n'), ((2222, 2268), 'pygame.image.load', 'pygame.image.load', (['"""Imagens/drone_resized.png"""'], {}), "('Imagens/drone_resized.png')\n", (2239, 2268), False, 'import pygame\n'), ((2574, 2597), 'PIL.Image.open', 'Image.open', (['drone_image'], {}), '(drone_image)\n', (2584, 2597), False, 'from PIL import Image\n'), ((2757, 2799), 'pygame.transform.rotate', 'pygame.transform.rotate', (['self.drone', 'angle'], {}), '(self.drone, angle)\n', (2780, 2799), False, 'import pygame\n'), ((3946, 4094), 'numpy.array', 'np.array', (["[self.w1, self.w2, self.real_pos['x'], self.real_pos['y'], self.v1, self.v2,\n self.angle * np.pi / 180.0, self.ang_vel * np.pi / 180.0]"], {}), "([self.w1, self.w2, self.real_pos['x'], self.real_pos['y'], self.v1,\n self.v2, self.angle * np.pi / 180.0, self.ang_vel * np.pi / 180.0])\n", (3954, 4094), True, 'import numpy as np\n'), ((4129, 4145), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (4137, 4145), True, 'import numpy as np\n'), ((4234, 4258), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (4256, 4258), False, 'import pygame\n'), ((5067, 5141), 'numpy.array', 'np.array', (["[destiny_x - self.real_pos['x'], destiny_y - self.real_pos['y']]"], {}), "([destiny_x - self.real_pos['x'], destiny_y - self.real_pos['y']])\n", (5075, 5141), True, 'import numpy as np\n'), ((6482, 6501), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (6499, 6501), False, 'import pygame\n'), ((5260, 5306), 'PID.execute_PID', 'execute_PID', (['self.x', '[destiny_x, destiny_y]', 't'], {}), '(self.x, [destiny_x, destiny_y], t)\n', (5271, 5306), False, 'from PID import execute_PID\n'), ((6163, 6237), 'numpy.array', 'np.array', (["[destiny_x - self.real_pos['x'], destiny_y - self.real_pos['y']]"], {}), "([destiny_x - self.real_pos['x'], destiny_y - self.real_pos['y']])\n", (6171, 6237), True, 'import numpy as np\n'), ((6793, 6811), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6809, 6811), False, 'import pygame\n'), ((7325, 7348), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7346, 7348), False, 'import pygame\n'), ((5147, 5165), 'numpy.abs', 'np.abs', (['self.eP[0]'], {}), '(self.eP[0])\n', (5153, 5165), True, 'import numpy as np\n'), ((5175, 5193), 'numpy.abs', 'np.abs', (['self.eP[1]'], {}), '(self.eP[1])\n', (5181, 5193), True, 'import numpy as np\n'), ((5203, 5220), 'numpy.abs', 'np.abs', (['self.ePhi'], {}), '(self.ePhi)\n', (5209, 5220), True, 'import numpy as np\n'), ((6868, 6881), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (6879, 6881), False, 'import pygame\n'), ((6887, 6893), 'sys.exit', 'exit', ([], {}), '()\n', (6891, 6893), False, 'from sys import exit\n'), ((7032, 7054), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (7052, 7054), False, 'import pygame\n')]
|
import os
import math
import numpy as np
from skimage import transform, io
from PIL import Image
import os
Image.MAX_IMAGE_PIXELS = None
root_path = r'X:\test_image\output'
image_name = 'mask.tiff'
output_name = 'new_heatmap.tiff'
img_path = os.path.join(root_path, image_name)
output_path = os.path.join(root_path, output_name)
image = io.imread(img_path)[::2, ::2].astype('uint8')
heat_map = np.zeros(image.shape).astype('uint16')
h, w = image.shape
r = 20
index = 255 // (r + 1)
offset = 10
# for i in range(h):
# for j in range(w):
# if image[i, j] != 0:
# for m in range(-r, r):
# for n in range(-r, r):
# if 0 <= j + n < w and 0 <= i + m < h:
# distant = int((n ** 2 + m ** 2) ** 0.5)
# if distant <= r:
# distant = distant * index + offset
# if distant != 0:
# heat_map[i + m, j + n] += image[i, j] // distant
# else:
# heat_map[i, j] += image[i, j]
step = 50
for i in range(0, h, step):
for j in range(0, w, step):
heat_map[i:i + step, j:j + step] = image[i:i + step, j:j + step].sum()
if i % 1000 == 0:
print(i)
norm1 = heat_map / np.linalg.norm(heat_map).astype('uint8')
g_layer = np.zeros(image.shape).astype('uint8')
b_layer = np.zeros(image.shape).astype('uint8')
result = np.stack([norm1, g_layer, b_layer], axis=0).astype('uint8')
io.imsave(output_path, result)
print(result.shape)
|
[
"os.path.join",
"numpy.stack",
"numpy.zeros",
"skimage.io.imread",
"skimage.io.imsave",
"numpy.linalg.norm"
] |
[((244, 279), 'os.path.join', 'os.path.join', (['root_path', 'image_name'], {}), '(root_path, image_name)\n', (256, 279), False, 'import os\n'), ((294, 330), 'os.path.join', 'os.path.join', (['root_path', 'output_name'], {}), '(root_path, output_name)\n', (306, 330), False, 'import os\n'), ((1533, 1563), 'skimage.io.imsave', 'io.imsave', (['output_path', 'result'], {}), '(output_path, result)\n', (1542, 1563), False, 'from skimage import transform, io\n'), ((398, 419), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (406, 419), True, 'import numpy as np\n'), ((1378, 1399), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (1386, 1399), True, 'import numpy as np\n'), ((1426, 1447), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (1434, 1447), True, 'import numpy as np\n'), ((1473, 1516), 'numpy.stack', 'np.stack', (['[norm1, g_layer, b_layer]'], {'axis': '(0)'}), '([norm1, g_layer, b_layer], axis=0)\n', (1481, 1516), True, 'import numpy as np\n'), ((340, 359), 'skimage.io.imread', 'io.imread', (['img_path'], {}), '(img_path)\n', (349, 359), False, 'from skimage import transform, io\n'), ((1327, 1351), 'numpy.linalg.norm', 'np.linalg.norm', (['heat_map'], {}), '(heat_map)\n', (1341, 1351), True, 'import numpy as np\n')]
|
import gsum as gm
import numpy as np
from numpy import ndarray
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
import matplotlib.patches as mpatches
from matplotlib.patches import Ellipse
from matplotlib.legend_handler import HandlerPatch
from matplotlib.legend import Legend
from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator
import docrep
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel
import seaborn as sns
from seaborn import utils
import pandas as pd
from .matter import nuclear_density, fermi_momentum, ratio_kf
from .graphs import confidence_ellipse, confidence_ellipse_mean_cov
from os.path import join
from scipy import stats
from copy import deepcopy
from os import path
docstrings = docrep.DocstringProcessor()
docstrings.get_sections(str(gm.ConjugateGaussianProcess.__doc__), 'ConjugateGaussianProcess')
black = 'k'
softblack = 'k'
gray = '0.75'
darkgray = '0.5'
text_bbox = dict(boxstyle='round', fc=(1, 1, 1, 0.6), ec=black, lw=0.8)
class HandlerEllipse(HandlerPatch):
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
p = mpatches.Ellipse(xy=center, width=width + xdescent,
height=height + ydescent)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
Legend.update_default_handler_map({Ellipse: HandlerEllipse()})
def compute_breakdown_posterior(model, X, data, orders, max_idx, logprior, breakdowns, lengths=None):
"""Put this in the specific class?
Parameters
----------
model : gm.TruncationGP
X :
data
orders
max_idx
logprior
breakdowns
lengths
Returns
-------
pdf : ndarray, shape = (N,)
"""
model.fit(X, data[:, :max_idx+1], orders=orders[:max_idx+1])
if lengths is None:
log_ell = model.coeffs_process.kernel_.theta
lengths = np.exp(log_ell)
else:
log_ell = np.log(lengths)
log_like = np.array([[model.log_marginal_likelihood([t], breakdown=lb) for lb in breakdowns] for t in log_ell])
log_like += logprior
posterior_2d = np.exp(log_like - np.max(log_like))
breakdown_pdf = np.trapz(posterior_2d, x=lengths, axis=0)
breakdown_pdf /= np.trapz(breakdown_pdf, x=breakdowns) # Normalize
return breakdown_pdf
def compute_pdf_median_and_bounds(x, pdf, cred):
R"""Computes the median and credible intervals for a 1d pdf
Parameters
----------
x : 1d array
The input variable
pdf : 1d array
The normalized pdf
cred : Iterable
The credible intervals in the range (0, 1)
Returns
-------
median : float
bounds : ndarray, shape = (len(cred), 2)
"""
bounds = np.zeros((len(cred), 2))
for i, p in enumerate(cred):
bounds[i] = gm.hpd_pdf(pdf=pdf, alpha=p, x=x)
median = gm.median_pdf(pdf=pdf, x=x)
return median, bounds
def draw_summary_statistics(bounds68, bounds95, median, height=0., linewidth=1., ax=None):
if ax is None:
ax = plt.gca()
ax.plot(bounds68, [height, height], c=darkgray, lw=3*linewidth, solid_capstyle='round')
ax.plot(bounds95, [height, height], c=darkgray, lw=linewidth, solid_capstyle='round')
ax.plot([median], [height], c='white', marker='o', zorder=10, markersize=1.5*linewidth)
return ax
def offset_xlabel(ax):
ax.set_xticks([0])
ax.set_xticklabels(labels=[0], fontdict=dict(color='w'))
ax.tick_params(axis='x', length=0)
return ax
def joint_plot(ratio=1, height=3.):
"""Taken from Seaborn JointGrid"""
fig = plt.figure(figsize=(height, height))
gsp = plt.GridSpec(ratio+1, ratio+1)
ax_joint = fig.add_subplot(gsp[1:, :-1])
ax_marg_x = fig.add_subplot(gsp[0, :-1], sharex=ax_joint)
ax_marg_y = fig.add_subplot(gsp[1:, -1], sharey=ax_joint)
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
# Turn off the ticks on the density axis for the marginal plots
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Make the grid look nice
# utils.despine(fig)
utils.despine(ax=ax_marg_x, left=True)
utils.despine(ax=ax_marg_y, bottom=True)
fig.tight_layout(h_pad=0, w_pad=0)
ax_marg_y.tick_params(axis='y', which='major', direction='out')
ax_marg_x.tick_params(axis='x', which='major', direction='out')
ax_marg_y.tick_params(axis='y', which='minor', direction='out')
ax_marg_x.tick_params(axis='x', which='minor', direction='out')
ax_marg_y.margins(x=0.1, y=0.)
fig.subplots_adjust(hspace=0, wspace=0)
return fig, ax_joint, ax_marg_x, ax_marg_y
def compute_2d_posterior(model, X, data, orders, breakdown, ls=None, logprior=None, max_idx=None):
R"""
Parameters
----------
model : gm.TruncationGP
X : ndarray, shape = (N,None)
data : ndarray, shape = (N,[n_curves])
orders : ndarray, shape = (n_curves,)
max_idx : ndarray, shape = (n_orders,)
breakdown : ndarray, shape = (n_breakdown,)
ls : ndarray, shape = (n_ls,)
logprior : ndarray, optional, shape = (n_ls, n_breakdown)
Returns
-------
joint_pdf : ndarray
ratio_pdf : ndarray
ls_pdf : ndarray
"""
if max_idx is not None:
data = data[:, :max_idx + 1]
orders = orders[:max_idx + 1]
model.fit(X, data, orders=orders)
if ls is None:
ls = np.exp(model.coeffs_process.kernel_.theta)
print('Setting ls to', ls)
ls = np.atleast_1d(ls)
# log_like = np.array([
# [model.log_marginal_likelihood(theta=[np.log(ls_), ], breakdown=lb) for lb in breakdown] for ls_ in ls
# ])
from joblib import Parallel, delayed
import multiprocessing
num_cores = multiprocessing.cpu_count()
log_like = np.array(
Parallel(n_jobs=num_cores, prefer='processes')(
delayed(model.log_marginal_likelihood)(theta=[np.log(ls_), ], breakdown=lb)
for ls_ in ls for lb in breakdown
)
).reshape(len(ls), len(breakdown))
if logprior is not None:
log_like += logprior
joint_pdf = np.exp(log_like - np.max(log_like))
if len(ls) > 1:
ratio_pdf = np.trapz(joint_pdf, x=ls, axis=0)
else:
ratio_pdf = np.squeeze(joint_pdf)
ls_pdf = np.trapz(joint_pdf, x=breakdown, axis=-1)
# Normalize them
ratio_pdf /= np.trapz(ratio_pdf, x=breakdown, axis=0)
if len(ls) > 1:
ls_pdf /= np.trapz(ls_pdf, x=ls, axis=0)
return joint_pdf, ratio_pdf, ls_pdf
def plot_2d_joint(ls_vals, Lb_vals, like_2d, like_ls, like_Lb, data_str=r'\vec{\mathbf{y}}_k)',
xlabel=None, ylabel=None):
if data_str is None:
data_str = r'\vec{\mathbf{y}}_k)'
from matplotlib.cm import get_cmap
with plt.rc_context({"text.usetex": True, "text.latex.preview": True}):
cmap_name = 'Blues'
cmap = get_cmap(cmap_name)
# Setup axes
fig, ax_joint, ax_marg_x, ax_marg_y = joint_plot(ratio=5, height=3.4)
# Plot contour
ax_joint.contour(ls_vals, Lb_vals, like_2d.T,
levels=[np.exp(-0.5*r**2) for r in np.arange(9, 0, -0.5)] + [0.999],
cmap=cmap_name, vmin=-0.05, vmax=0.8, zorder=1)
# Now plot the marginal distributions
ax_marg_y.plot(like_Lb, Lb_vals, c=cmap(0.8), lw=1)
ax_marg_y.fill_betweenx(Lb_vals, np.zeros_like(like_Lb),
like_Lb, facecolor=cmap(0.2), lw=1)
ax_marg_x.plot(ls_vals, like_ls, c=cmap(0.8), lw=1)
ax_marg_x.fill_between(ls_vals, np.zeros_like(ls_vals),
like_ls, facecolor=cmap(0.2), lw=1)
# Formatting
ax_joint.set_xlabel(xlabel)
ax_joint.set_ylabel(ylabel)
ax_joint.margins(x=0, y=0.)
ax_marg_x.set_ylim(bottom=0)
ax_marg_y.set_xlim(left=0)
ax_joint.text(
0.95, 0.95, rf'pr$(\ell, \Lambda_b \,|\, {data_str}$)', ha='right', va='top',
transform=ax_joint.transAxes,
bbox=text_bbox
)
ax_joint.tick_params(direction='in')
plt.show()
return fig
def pdfplot(
x, y, pdf, data, hue=None, order=None, hue_order=None, cut=1e-2, linewidth=None,
palette=None, saturation=1., ax=None, margin=None, legend_title=None, loc='best'
):
R"""Like seaborn's violinplot, but takes PDF values rather than tabular data.
Parameters
----------
x : str
The column of the DataFrame to use as the x axis. The pdfs are a function of this variable.
y : str
The column of the DataFrame to use as the y axis. A pdf will be drawn for each unique value in data[y].
pdf : str
The column of the DataFrame to use as the pdf values.
data : pd.DataFrame
The DataFrame containing the pdf data
hue : str, optional
Splits data[y] up by the value of hue, and plots each pdf separately as a specific color.
order : list, optional
The order in which to plot the y values, from top to bottom
hue_order : list, optional
The order in which to plot the hue values, from top to bottom.
cut : float, optional
The value below which the pdfs will not be shown. This is taken as a fraction of the total height of each pdf.
linewidth : float, optional
The linewidth of the pdf lines
palette : str, list, optional
The color palette to fill underneath the curves
saturation : float, optional
The level of saturation for the color palette. Only works if the palette is a string recognized by
sns.color_palette
ax : matplotlib.axes.Axes
The axis on which to draw the plot
margin : float, optional
The vertical margin between each pdf.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(3.4, 3.4))
y_vals = data[y].unique()
if order is not None:
y_vals = order
legend_vals = y_vals
hue_vals = [None]
n_colors = len(y_vals)
if hue is not None:
hue_vals = data[hue].unique()
if hue_order is not None:
hue_vals = hue_order
legend_vals = hue_vals
n_colors = len(hue_vals)
if isinstance(palette, str) or palette is None:
colors = sns.color_palette(palette, n_colors=n_colors, desat=saturation)
elif isinstance(palette, list):
colors = palette
else:
raise ValueError('palette must be str or list')
if margin is None:
_, margin = plt.margins()
offset = 1.
minor_ticks = []
major_ticks = []
for i, y_val in enumerate(y_vals):
max_height_hue = offset - margin
for j, hue_val in enumerate(hue_vals):
mask = data[y] == y_val
if hue is not None:
mask = mask & (data[hue] == hue_val)
color = colors[j]
else:
color = colors[i]
df = data[mask]
x_vals = df[x].values
pdf_vals = df[pdf].values.copy()
pdf_vals /= np.trapz(pdf_vals, x_vals)
# Assumes normalized
median, bounds = compute_pdf_median_and_bounds(
x=x_vals, pdf=pdf_vals, cred=[0.68, 0.95]
)
pdf_vals /= (1. * np.max(pdf_vals)) # Scale so they're all the same height
# Make the lines taper off
x_vals = x_vals[pdf_vals > cut]
pdf_vals = pdf_vals[pdf_vals > cut]
offset -= (1 + margin)
# Plot and fill posterior, and add summary statistics
ax.plot(x_vals, pdf_vals + offset, c=darkgray, lw=linewidth)
ax.fill_between(x_vals, offset, pdf_vals + offset, facecolor=color)
draw_summary_statistics(*bounds, median, ax=ax, height=offset, linewidth=1.5*linewidth)
min_height_hue = offset
minor_ticks.append(offset - margin/2.)
major_ticks.append((max_height_hue + min_height_hue) / 2.)
minor_ticks = minor_ticks[:-1]
# Plot formatting
ax.set_yticks(major_ticks, minor=False)
ax.set_yticks(minor_ticks, minor=True)
ax.set_yticklabels(y_vals, fontdict=dict(verticalalignment='center'))
ax.tick_params(axis='both', which='both', direction='in')
ax.tick_params(which='major', length=0)
ax.tick_params(which='minor', length=7, right=True)
ax.set_xlabel(x)
ax.set_axisbelow(True)
if hue is not None:
legend_elements = [
Patch(facecolor=color, edgecolor=darkgray, label=leg_val) for color, leg_val in zip(colors, legend_vals)
]
ax.legend(handles=legend_elements, loc=loc, title=legend_title)
return ax
def joint2dplot(ls_df, breakdown_df, joint_df, system, order, data_str=None):
ls_df = ls_df[(ls_df['system'] == system) & (ls_df['Order'] == order)]
breakdown_df = breakdown_df[(breakdown_df['system'] == system) & (breakdown_df['Order'] == order)]
joint_df = joint_df[(joint_df['system'] == system) & (joint_df['Order'] == order)]
ls = ls_df[r'$\ell$ [fm$^{-1}$]']
breakdown = breakdown_df[r'$\Lambda_b$ [MeV]']
joint = joint_df['pdf'].values.reshape(len(ls), len(breakdown))
fig = plot_2d_joint(
ls_vals=ls, Lb_vals=breakdown, like_2d=joint,
like_ls=ls_df['pdf'].values, like_Lb=breakdown_df['pdf'].values,
data_str=data_str, xlabel=r'$\ell$ [fm$^{-1}$]', ylabel=r'$\Lambda_b$ [MeV]',
)
return fig
def minimum_samples(mean, cov, n=5000, x=None):
gp = stats.multivariate_normal(mean=mean, cov=cov)
samples = gp.rvs(n)
min_idxs = np.argmin(samples, axis=1)
min_y = np.min(samples, axis=1)
if x is not None:
min_x = x[min_idxs]
return min_x, min_y
return min_idxs, min_y
# def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):
# """
# Create a plot of the covariance confidence ellipse of *x* and *y*.
#
# Parameters
# ----------
# x, y : array-like, shape (n, )
# Input data.
# ax : matplotlib.axes.Axes
# The axes object to draw the ellipse into.
# n_std : float
# The number of standard deviations to determine the ellipse's radii.
# facecolor : str
# The color of the ellipse
#
# Returns
# -------
# matplotlib.patches.Ellipse
#
# Other parameters
# ----------------
# kwargs : `~matplotlib.patches.Patch` properties
# """
# import matplotlib.transforms as transforms
# if x.size != y.size:
# raise ValueError("x and y must be the same size")
#
# cov = np.cov(x, y)
# pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])
# # Using a special case to obtain the eigenvalues of this
# # two-dimensional dataset.
# ell_radius_x = np.sqrt(1 + pearson)
# ell_radius_y = np.sqrt(1 - pearson)
# ellipse = Ellipse(
# (0, 0),
# width=ell_radius_x * 2,
# height=ell_radius_y * 2,
# facecolor=facecolor,
# **kwargs
# )
#
# # Calculating the standard deviation of x from
# # the square root of the variance and multiplying
# # with the given number of standard deviations.
# scale_x = np.sqrt(cov[0, 0]) * n_std
# mean_x = np.mean(x)
#
# # calculating the standard deviation of y ...
# scale_y = np.sqrt(cov[1, 1]) * n_std
# mean_y = np.mean(y)
#
# trans = transforms.Affine2D() \
# .rotate_deg(45) \
# .scale(scale_x, scale_y) \
# .translate(mean_x, mean_y)
#
# ellipse.set_transform(trans + ax.transData)
# # sns.kdeplot(x, y, ax=ax)
# scat_color = darken_color(facecolor, 0.5)
# ax.plot(x, y, ls='', marker='.', markersize=0.6, color=scat_color)
# ax.add_patch(ellipse)
# return ellipse
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def darken_color(color, amount=0.5):
"""
Darken the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> darken_color('g', 0.3)
>> darken_color('#F034A3', 0.6)
>> darken_color((.3,.55,.1), 0.5)
"""
return lighten_color(color, 1./amount)
def cov_no_centering(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""Copied from numpy.cov, but commented out the centering. Why isn't this toggleable with an argument?
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
.. versionadded:: 1.5
fweights : array_like, int, optional
1-D array of integer frequency weights; the number of times each
observation vector should be repeated.
.. versionadded:: 1.10
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
.. versionadded:: 1.10
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> m = np.arange(10, dtype=np.float64)
>>> f = np.arange(10) * 2
>>> a = np.arange(10) ** 2.
>>> ddof = 9 # N - 1
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=None, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.stack((x, y), axis=0)
>>> np.cov(X)
array([[11.71 , -4.286 ], # may vary
[-4.286 , 2.144133]])
>>> np.cov(x, y)
array([[11.71 , -4.286 ], # may vary
[-4.286 , 2.144133]])
>>> np.cov(x)
array(11.71)
"""
from numpy import array, average, dot
import warnings
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions")
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = np.asarray(y)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=dtype)
if not rowvar and y.shape[0] != 1:
y = y.T
X = np.concatenate((X, y), axis=0)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof * sum(w * aweights) / w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice",
RuntimeWarning, stacklevel=3)
fact = 0.0
# X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X * w).T
c = dot(X, X_T.conj())
c *= np.true_divide(1, fact)
return c.squeeze()
def create_rbf_cross_covariance(X1, X2, std1, std2, ls1, ls2, rho=None):
if rho is None:
ls_off = np.sqrt((ls1 ** 2 + ls2 ** 2) / 2)
rho = np.sqrt(2 * ls1 * ls2 / (ls1**2 + ls2**2))
else:
ls_off = ls1
k_off = ConstantKernel(std1 * std2) * RBF(ls_off)
K_off = k_off(X1, X2)
K_off *= rho
return K_off
def create_rbf_multi_covariance(X1, X2, std1, std2, ls1, ls2, nugget=0, rho=None):
k1 = ConstantKernel(std1 ** 2) * RBF(ls1)
if rho is None:
k2 = ConstantKernel(std2 ** 2) * RBF(ls2)
else:
k2 = ConstantKernel(std2 ** 2) * RBF(ls1)
K1 = k1(X1)
K2 = k2(X2)
K_off = create_rbf_cross_covariance(X1, X2, std1, std2, ls1, ls2, rho=rho)
K = np.block([
[K1, K_off],
[K_off.T, K2]
])
K[np.diag_indices_from(K)] += nugget
return K
def create_truncation_cross_covariance(X1, X2, std1, std2, ls1, ls2, ref1, ref2, Q1, Q2, kmin=0, kmax=None, rho=None):
K_off = create_rbf_cross_covariance(X1, X2, std1, std2, ls1, ls2, rho=rho)
ref1 = np.atleast_1d(ref1)
ref2 = np.atleast_1d(ref2)
Q_num1 = Q1 ** kmin
Q_num2 = Q2 ** kmin
if kmax is not None:
Q_num1 -= Q1 ** (kmax + 1)
Q_num2 -= Q2 ** (kmax + 1)
Q_sum1 = Q_num1 / np.sqrt(1 - Q1 ** 2)
Q_sum2 = Q_num2 / np.sqrt(1 - Q2 ** 2)
K_off = (ref1 * Q_sum1)[:, None] * (ref2 * Q_sum2) * K_off
return K_off
def create_truncation_multi_covariance(
X1, X2, std1, std2, ls1, ls2, ref1, ref2, Q1, Q2, kmin=0, kmax=None, nugget=0, rho=None):
ref1 = np.atleast_1d(ref1)
ref2 = np.atleast_1d(ref2)
# Must square now, take square root after subtracting
Q_num1 = Q1 ** (2 * kmin)
Q_num2 = Q2 ** (2 * kmin)
if kmax is not None:
Q_num1 -= Q1 ** (2 * (kmax + 1))
Q_num2 -= Q2 ** (2 * (kmax + 1))
Q_sum1 = np.sqrt(Q_num1) / np.sqrt(1 - Q1 ** 2)
Q_sum2 = np.sqrt(Q_num2) / np.sqrt(1 - Q2 ** 2)
k1 = ConstantKernel(std1 ** 2) * RBF(ls1)
if rho is None:
k2 = ConstantKernel(std2 ** 2) * RBF(ls2)
else:
k2 = ConstantKernel(std2 ** 2) * RBF(ls1)
K1 = k1(X1)
K2 = k2(X2)
K1 = (ref1 * Q_sum1)[:, None] * (ref1 * Q_sum1) * K1
K2 = (ref2 * Q_sum2)[:, None] * (ref2 * Q_sum2) * K2
K_off = create_truncation_cross_covariance(
X1, X2, std1, std2, ls1, ls2, ref1, ref2, Q1, Q2, kmin, kmax, rho=rho
)
K = np.block([
[K1, K_off],
[K_off.T, K2]
])
K[np.diag_indices_from(K)] += nugget
return K
def create_sym_energy_rbf_covariance(density, std_n, std_s, ls_n, ls_s, nugget=0, rho=None):
Kf_n = fermi_momentum(density, 2)[:, None]
Kf_s = fermi_momentum(density, 4)[:, None]
# Convert symmetric matter kf and ell to neutron matter
# The scaling is irrelevant for the kernel as long as it is consistent, but we must ensure that
# points *at the same density* are the most correlated in the off-diagonal block.
# therefore the conventions must be consistent. Else points at high density will
# be less correlated than points at low density.
factor = 2. ** (1 / 3.)
Kf_s = Kf_s * factor
if rho is None:
ls_s = ls_s * factor
# print(Kf_n - Kf_s)
cov = create_rbf_multi_covariance(
X1=Kf_n, X2=Kf_s, std1=std_n, std2=std_s, ls1=ls_n, ls2=ls_s, nugget=nugget, rho=rho
)
N = len(density)
cov_n = cov[:N, :N]
cov_s = cov[N:, N:]
cov_ns = cov[:N, N:]
cov_sn = cov[N:, :N]
return cov_n + cov_s - cov_ns - cov_sn
def create_sym_energy_truncation_covariance(
density, std_n, std_s, ls_n, ls_s, ref_n, ref_s, Q_n, Q_s, kmin=0, kmax=None, nugget=0, rho=None,
ignore_corr=False
):
Kf_n = fermi_momentum(density, 2)[:, None]
Kf_s = fermi_momentum(density, 4)[:, None]
# Convert symmetric matter kf and ell to neutron matter
# The scaling is irrelevant for the kernel as long as it is consistent, but we must ensure that
# points *at the same density* are the most correlated in the off-diagonal block.
# therefore the conventions must be consistent. Else points at high density will
# be less correlated than points at low density.
factor = 2. ** (1/3.)
Kf_s = Kf_s * factor
if rho is None:
ls_s = ls_s * factor
# print(Kf_n - Kf_s)
cov = create_truncation_multi_covariance(
X1=Kf_n, X2=Kf_s, std1=std_n, std2=std_s, ls1=ls_n, ls2=ls_s,
ref1=ref_n, ref2=ref_s, Q1=Q_n, Q2=Q_s, kmin=kmin, kmax=kmax, nugget=nugget, rho=rho
)
N = len(density)
cov_n = cov[:N, :N]
cov_s = cov[N:, N:]
cov_ns = cov[:N, N:]
cov_sn = cov[N:, :N]
if ignore_corr:
return cov_n + cov_s
return cov_n + cov_s - cov_ns - cov_sn
@docstrings.get_sectionsf('ConvergenceAnalysis')
@docstrings.dedent
class ConvergenceAnalysis:
R"""A generic class for studying convergence of observables.
This is meant to provide the framework for particular analyses, which should subclass this class.
Parameters
----------
X : ndarray, shape = (N,p)
The feature matrix
y : ndarray, shape = (N, n_curves)
The response curves
orders : ndarray, shape = (n_orders,)
train : ndarray, shape = (N,)
A boolean array that is `True` if that point is to be used to train the convergence model.
valid : ndarray, shape = (N,)
A boolean array that is `True` if that point is to be used to validate the convergence model.
ref : float or callable
The reference scale
ratio : float or callable
The ratio Q
excluded : ndarray, optional
The orders for which the coefficients should not be used in training the convergence model.
colors : ndarray, optional
Colors for plotting orders and their diagnostics.
Other Parameters
----------------
%(ConjugateGaussianProcess.parameters)s
"""
def __init__(self, X, y2, y3, orders, train, valid, ref2, ref3, ratio, body, *, excluded=None, colors=None, **kwargs):
self.X = X
self.orders_original = np.atleast_1d(orders)
marker_list = ['^', 'X', 'o', 's']
markerfillstyle_2bf = 'full'
markerfillstyle_3bf = 'left'
linestyle_2bf = '-'
linestyle_3bf = '--'
colors_original = colors
if body == 'Appended':
print('Appending 2bf and 3bf predictions...')
try:
ref3_vals = ref3(X)
except TypeError:
ref3_vals = ref3
try:
ref2_vals = ref2(X)
except TypeError:
ref2_vals = ref2
try:
ratio_vals = ratio(X)
except TypeError:
ratio_vals = ratio
c2 = gm.coefficients(y2, ratio_vals, ref2_vals, orders)
c3 = gm.coefficients(y3-y2, ratio_vals, ref3_vals, orders)
c = []
colors_all = []
orders_all = []
markers = []
markerfillstyles = []
linestyles = []
n_bodies = []
for i, n in enumerate(orders):
c.append(c2[:, i])
orders_all.append(n)
colors_all.append(colors[i])
markers.append(marker_list[i])
markerfillstyles.append(markerfillstyle_2bf)
linestyles.append(linestyle_2bf)
n_bodies.append('2')
if n > 2: # Has 3-body forces
c.append(c3[:, i])
orders_all.append(n)
colors_all.append(colors[i])
markers.append(marker_list[i])
markerfillstyles.append(markerfillstyle_3bf)
linestyles.append(linestyle_3bf)
n_bodies.append('3')
c = np.array(c).T
orders_all = np.array(orders_all)
print(f'Reseting orders to be {orders_all}')
y = gm.partials(c, ratio_vals, ref2_vals, orders_all)
self.y = y
self.orders = orders_all
self.ref = ref2
elif body == 'NN-only':
self.y = y2
self.orders = orders
self.ref = ref2
colors_all = colors
markerfillstyles = [markerfillstyle_2bf] * len(orders)
linestyles = [linestyle_2bf] * len(orders)
n_bodies = ['2'] * len(orders)
markers = marker_list
elif body == 'NN+3N':
self.y = y3
self.orders = orders
self.ref = ref2
colors_all = colors
markerfillstyles = [markerfillstyle_2bf] * len(orders)
linestyles = [linestyle_2bf] * len(orders)
# n_bodies = ['2+3'] * len(orders)
n_bodies = [None] * len(orders)
markers = marker_list
elif body == '3N':
self.y = y3 - y2
self.orders = orders
self.ref = ref3
colors_all = colors
markerfillstyles = [markerfillstyle_3bf] * len(orders)
linestyles = [linestyle_3bf] * len(orders)
n_bodies = ['3'] * len(orders)
markers = marker_list
else:
raise ValueError('body not in allowed values')
self.train = train
self.valid = valid
self.X_train = X[train]
self.X_valid = X[valid]
self.y2 = y2
if body != '3N':
self.y2_train = y2[train]
self.y2_valid = y2[valid]
else:
self.y2_train = None
self.y2_train = None
self.y3 = y3
if body != 'NN-only':
self.y3_train = y3[train]
self.y3_valid = y3[valid]
else:
self.y3_train = None
self.y3_train = None
self.y_train = self.y[train]
self.y_valid = self.y[valid]
self.n_bodies = n_bodies
self.ratio = ratio
# self.ref = ref
self.ref2 = ref2
self.ref3 = ref3
self.excluded = excluded
if excluded is None:
excluded_mask = np.ones_like(self.orders, dtype=bool)
else:
excluded_mask = ~np.isin(self.orders, excluded)
self.excluded_mask = excluded_mask
self.orders_not_excluded = self.orders[excluded_mask]
if excluded is None:
excluded_mask_original = np.ones_like(orders, dtype=bool)
else:
excluded_mask_original = ~np.isin(orders, excluded)
self.excluded_mask_original = excluded_mask_original
colors_all = np.atleast_1d(colors_all)
self.colors_not_excluded = colors_all[excluded_mask]
self.colors = colors_all
self.colors_original = colors_original = np.atleast_1d(colors_original)
self.colors_original_not_excluded = colors_original[excluded_mask_original]
self.orders_original_not_excluded = self.orders_original[excluded_mask_original]
self.markers = markers = np.atleast_1d(markers)
self.markers_not_excluded = markers[excluded_mask]
self.markerfillstyles = markerfillstyles = np.atleast_1d(markerfillstyles)
self.markerfillstyles_not_excluded = markerfillstyles[excluded_mask]
self.linestyles = linestyles = np.atleast_1d(linestyles)
self.linestyles_not_excluded = linestyles[excluded_mask]
self.kwargs = kwargs
def compute_coefficients(self, show_excluded=False, **kwargs):
ratio = self.ratio(self.X, **kwargs)
try:
ref = self.ref(self.X)
except TypeError:
ref = self.ref
c = gm.coefficients(self.y, ratio, ref, self.orders)
if not show_excluded:
c = c[:, self.excluded_mask]
return c
def plot_coefficients(self, *args, **kwargs):
raise NotImplementedError
def plot_pchol(self):
pass
def plot_md_squared(self):
pass
@docstrings.dedent
class MatterConvergenceAnalysis(ConvergenceAnalysis):
"""A convenience class to compute quantities related to nuclear matter convergence
Parameters
----------
%(ConvergenceAnalysis.parameters)s
density : ndarray
system : str
The physical system to consider. Can be 'neutron', 'symmetric', or 'difference'. Affects how to convert
between kf and density, and also the way that files are named.
fit_n2lo : str
The fit number for the NN+3N N2LO potential. Used for naming files.
fit_n3lo : str
The fit number for the NN+3N N3LO potential. Used for naming files.
Lambda : int
The Lambda regulator for the potential. Used for naming files.
body : str
Either 'NN-only' or 'NN+3N'
savefigs : bool, optional
Whether to save figures when plot_* is called. Defaults to `False`
Other Parameters
----------------
%(ConvergenceAnalysis.other_parameters)s
"""
system_strings = dict(
neutron='neutron',
symmetric='symmetric',
difference='difference',
)
system_strings_short = dict(
neutron='n',
symmetric='s',
difference='d',
)
system_math_strings = dict(
neutron='E/N',
symmetric='E/A',
difference='S_2',
)
ratio_map = dict(
kf=ratio_kf
)
MD_label = r'\mathrm{D}_{\mathrm{MD}}^2'
PC_label = r'\mathrm{D}_{\mathrm{PC}}'
CI_label = r'\mathrm{D}_{\mathrm{CI}}'
def __init__(self, X, y2, y3, orders, train, valid, ref2, ref3, ratio, density, *, system='neutron',
fit_n2lo=None, fit_n3lo=None, Lambda=None, body=None, savefigs=False,
fig_path='new_figures', **kwargs):
self.ratio_str = ratio
ratio = self.ratio_map[ratio]
color_list = ['Oranges', 'Greens', 'Blues', 'Reds', 'Purples', 'Greys']
cmaps = [plt.get_cmap(name) for name in color_list[:len(orders)]]
colors = [cmap(0.55 - 0.1 * (i == 0)) for i, cmap in enumerate(cmaps)]
body_vals = ['NN-only', 'NN+3N', '3N', 'Appended']
if body not in body_vals:
raise ValueError(f'body must be in {body_vals}')
# TODO: allow `excluded` to work properly in plots, etc.
super().__init__(
X, y2, y3, orders, train, valid, ref2, ref3, ratio, body=body, colors=colors, **kwargs)
self.system = system
self.fit_n2lo = fit_n2lo
self.fit_n3lo = fit_n3lo
self.Lambda = Lambda
self.body = body
self.savefigs = savefigs
self.fig_path = fig_path
self.system_math_string = self.system_math_strings[system]
self.density = density
self.df_joint = None
self.df_breakdown = None
self.df_ls = None
self.breakdown = None
self.breakdown_min, self.breakdown_max, self.breakdown_num = None, None, None
self.ls_min, self.ls_max, self.ls_num = None, None, None
self._breakdown_map = None
self._ls_map = None
self.ls = None
self.max_idx = None
self.logprior = None
def compute_density(self, kf):
degeneracy = None
if self.system == 'neutron':
degeneracy = 2
elif self.system == 'symmetric':
# print('warning: assuming neutron matter for testing')
# degeneracy = 2
degeneracy = 4
elif self.system == 'difference':
raise ValueError('not sure what to do for symmetry energy')
return nuclear_density(kf, degeneracy)
def compute_momentum(self, density):
degeneracy = None
if self.system == 'neutron':
degeneracy = 2
elif self.system == 'symmetric':
# print('warning: assuming neutron matter for testing')
# degeneracy = 2
degeneracy = 4
elif self.system == 'difference':
raise ValueError('not sure what to do for symmetry energy')
return fermi_momentum(density, degeneracy)
def setup_posteriors(self, max_idx, breakdown_min, breakdown_max, breakdown_num, ls_min, ls_max, ls_num,
logprior=None, max_idx_labels=None):
R"""Computes and stores the values for the breakdown and length scale posteriors.
This must be run before running functions that depend on these posteriors.
Parameters
----------
max_idx : List[int], int
All orders up to self.orders[:max_idx+1] are kept and used to compute posteriors. If a list is provided,
then the posterior is computed for each of the max_indices in the list.
breakdown_min : float
The minimum value for the breakdown scale. Will be used to compute
`np.linspace(breakdown_min, breakdown_max, breakdown_num)`.
breakdown_max : float
The maximum value for the breakdown scale. Will be used to compute
`np.linspace(breakdown_min, breakdown_max, breakdown_num)`.
breakdown_num : int
The number of breakdown scale values to use in the posterior. Will be used to compute
`np.linspace(breakdown_min, breakdown_max, breakdown_num)`.
ls_min : float
The minimum value for the length scale. Will be used to compute
`np.linspace(ls_min, ls_max, ls_num)`. if `ls_min`, `ls_max`, and `ls_num` are all `None`, then
the MAP value of the length scale will be used for the breakdown posterior. No length scale posterior
will be computed in this case.
ls_max : float
The maximum value for the length scale. Will be used to compute
`np.linspace(ls_min, ls_max, ls_num)`. if `ls_min`, `ls_max`, and `ls_num` are all `None`, then
the MAP value of the length scale will be used for the breakdown posterior. No length scale posterior
will be computed in this case.
ls_num : int
The number of length scales to use in the posterior. Will be used to compute
`np.linspace(ls_min, ls_max, ls_num)`. if `ls_min`, `ls_max`, and `ls_num` are all `None`, then
the MAP value of the length scale will be used for the breakdown posterior. No length scale posterior
will be computed in this case.
logprior : ndarray, optional, shape = (ls_num, breakdown_num)
The prior pr(breakdown, ls). If `None`, then a flat prior is used.
Returns
-------
"""
dfs_breakdown = []
dfs_ls = []
dfs_joint = []
self.breakdown_min, self.breakdown_max, self.breakdown_num = breakdown_min, breakdown_max, breakdown_num
self.ls_min, self.ls_max, self.ls_num = ls_min, ls_max, ls_num
breakdown = np.linspace(breakdown_min, breakdown_max, breakdown_num)
if ls_min is None and ls_max is None and ls_num is None:
ls = None
else:
ls = np.linspace(ls_min, ls_max, ls_num)
breakdown_maps = []
ls_maps = []
max_idx = np.atleast_1d(max_idx)
if max_idx_labels is None:
max_idx_labels = max_idx
for idx, idx_label in zip(max_idx, max_idx_labels):
joint_pdf, breakdown_pdf, ls_pdf = self.compute_breakdown_ls_posterior(
breakdown, ls, max_idx=idx, logprior=logprior)
df_breakdown = pd.DataFrame(np.array([breakdown, breakdown_pdf]).T, columns=[r'$\Lambda_b$ [MeV]', 'pdf'])
df_breakdown['Order'] = fr'N$^{idx_label}$LO'
df_breakdown['Order Index'] = idx
df_breakdown['system'] = fr'${self.system_math_string}$'
df_breakdown['Body'] = self.body
dfs_breakdown.append(df_breakdown)
if ls is not None:
df_ls = pd.DataFrame(np.array([ls, ls_pdf]).T, columns=[r'$\ell$ [fm$^{-1}$]', 'pdf'])
df_ls['Order'] = fr'N$^{idx_label}$LO'
df_ls['Order Index'] = idx
df_ls['system'] = fr'${self.system_math_string}$'
df_ls['Body'] = self.body
dfs_ls.append(df_ls)
X = gm.cartesian(ls, breakdown)
df_joint = pd.DataFrame(X, columns=[r'$\ell$ [fm$^{-1}$]', r'$\Lambda_b$ [MeV]'])
df_joint['pdf'] = joint_pdf.ravel()
df_joint['Order'] = fr'N$^{idx_label}$LO'
df_joint['Order Index'] = idx
df_joint['system'] = fr'${self.system_math_string}$'
df_joint['Body'] = self.body
dfs_joint.append(df_joint)
map_idx = np.argmax(joint_pdf)
map_idx = np.unravel_index(map_idx, joint_pdf.shape)
breakdown_maps.append(breakdown[map_idx[1]])
if ls is not None:
ls_maps.append(ls[map_idx[0]])
df_breakdown = pd.concat(dfs_breakdown, ignore_index=True)
df_ls = None
if ls is not None:
df_ls = pd.concat(dfs_ls, ignore_index=True)
df_joint = pd.concat(dfs_joint, ignore_index=True)
self.breakdown = breakdown
self.ls = ls
self.logprior = logprior
self.max_idx = max_idx
self.max_idx_labels = max_idx_labels
self.df_joint = df_joint
self.df_breakdown = df_breakdown
self.df_ls = df_ls
self._breakdown_map = breakdown_maps
self._ls_map = ls_maps
return df_joint, df_breakdown, df_ls
@property
def breakdown_map(self):
return self._breakdown_map
@property
def ls_map(self):
return self._ls_map
def compute_underlying_graphical_diagnostic(self, breakdown, show_excluded=False, interp=False, kernel=None):
coeffs = coeffs_not_excluded = self.compute_coefficients(
breakdown=breakdown, show_excluded=show_excluded
)
colors = self.colors
markerfillstyles = self.markerfillstyles
markers = self.markers
if not show_excluded:
colors = self.colors_not_excluded
markerfillstyles = self.markerfillstyles_not_excluded
markers = self.markers_not_excluded
coeffs_not_excluded = self.compute_coefficients(breakdown=breakdown, show_excluded=False)
gp_kwargs = self.kwargs.copy()
if kernel is not None:
gp_kwargs['kernel'] = kernel
process = gm.ConjugateGaussianProcess(**gp_kwargs)
process.fit(self.X_train, coeffs_not_excluded[self.train]) # in either case, only fit to non-excluded coeffs
if interp:
mean, cov = process.predict(self.X_valid, return_cov=True, pred_noise=True)
# print(mean.shape, mean)
# print(cov)
data = coeffs[self.valid] - mean
mean = np.zeros(len(mean))
else:
mean = process.mean(self.X_valid)
cov = process.cov(self.X_valid)
data = coeffs[self.valid]
# print(mean.shape, mean)
# print(data)
# But it may be useful to visualize the diagnostics off all coefficients
graph = gm.GraphicalDiagnostic(
data, mean, cov, colors=colors, gray=gray, black=softblack,
markerfillstyles=markerfillstyles, markers=markers
)
return graph
def compute_breakdown_ls_posterior(self, breakdown, ls, max_idx=None, logprior=None):
# orders = self.orders[:max_idx + 1]
orders = self.orders
model = gm.TruncationGP(ref=self.ref, ratio=self.ratio, excluded=self.excluded, **self.kwargs)
X = self.X_train
data = self.y_train
joint_pdf, Lb_pdf, ls_pdf = compute_2d_posterior(
model, X, data, orders, breakdown, ls, logprior=logprior, max_idx=max_idx,
)
return joint_pdf, Lb_pdf, ls_pdf
def compute_best_length_scale_for_breakdown(self, breakdown, max_idx):
ord = rf'N$^{max_idx}$LO'
df_best = self.df_joint[
(self.df_joint[r'$\Lambda_b$ [MeV]'] == breakdown) &
(self.df_joint['Order'] == ord)
]
ls_max_idx = df_best['pdf'].idxmax()
return df_best.loc[ls_max_idx][r'$\ell$ [fm$^{-1}$]']
def order_index(self, order):
return np.squeeze(np.argwhere(self.orders == order))
def setup_and_fit_truncation_process(self, breakdown):
model = gm.TruncationGP(
ratio=self.ratio, ref=self.ref, excluded=self.excluded,
ratio_kws=dict(breakdown=breakdown), **self.kwargs
)
# Only update hyperparameters based on train
model.fit(self.X_train, y=self.y_train, orders=self.orders)
return model
def compute_minimum(self, order, n_samples, breakdown=None, X=None, nugget=0, cond=None):
if X is None:
X = self.X
if breakdown is None:
breakdown = self.breakdown_map[-1]
if cond is None:
cond = self.train
x = X.ravel()
# ord = self.orders == order
orders = self.orders_original
# colors = self.colors_original
if self.body == 'NN-only':
y = self.y2
elif self.body == 'NN+3N':
y = self.y3
elif self.body == 'Appended':
y = self.y3
elif self.body == '3N':
y = self.y3
else:
raise ValueError('body not in allowed values')
ord = np.squeeze(np.argwhere(orders == order))
if ord.ndim > 0:
raise ValueError('Found multiple orders that match order')
model = gm.TruncationGP(
ratio=self.ratio, ref=self.ref, excluded=self.excluded,
ratio_kws=dict(breakdown=breakdown), **self.kwargs
)
# Only update hyperparameters based on train
model.fit(self.X_train, y=self.y_train, orders=self.orders)
print(model.coeffs_process.kernel_)
# But then condition on `cond` X, y points to get a good interpolant
pred, cov = model.predict(X, order=order, return_cov=True, Xc=self.X[cond], y=y[cond, ord], kind='both')
if self.body == 'Appended':
try:
ref3_vals = self.ref3(X)
except TypeError:
ref3_vals = self.ref3
try:
ref2_vals = self.ref2(X)
except TypeError:
ref2_vals = self.ref2
ref2_vals = np.atleast_1d(ref2_vals)
ref3_vals = np.atleast_1d(ref3_vals)
# For appended, the standard reference is the 2-body one. So swap for the 3-body ref
cov_3bf = cov * (ref3_vals[:, None] * ref3_vals) / (ref2_vals[:, None] * ref2_vals)
cov = cov + cov_3bf
# pred, cov = model.predict(X, order=order, return_cov=True, kind='both')
# pred += self.y[:, ord]
# cov += np.diag(cov) * nugget * np.eye(cov.shape[0])
x_min, y_min = minimum_samples(pred, (cov + nugget * np.eye(cov.shape[0])), n=n_samples, x=x)
is_endpoint = x_min == X[-1].ravel()
x_min = x_min[~is_endpoint]
y_min = y_min[~is_endpoint]
# Don't interpolate
# min_idx = np.argmin(self.y[:, ord])
# x_min_no_trunc, y_min_no_trunc = self.X.ravel()[min_idx], self.y[min_idx][ord]
# Do interpolate
min_idx = np.argmin(pred)
x_min_no_trunc, y_min_no_trunc = X.ravel()[min_idx], pred[min_idx]
return x_min_no_trunc, y_min_no_trunc, x_min, y_min, pred, cov
def figure_name(self, prefix, breakdown=None, ls=None, max_idx=None, include_system=True):
body = self.body
fit_n2lo = self.fit_n2lo
fit_n3lo = self.fit_n3lo
Lambda = self.Lambda
ref = self.ref
if not include_system:
system = 'x'
else:
system = self.system_strings_short[self.system]
full_name = prefix + f'sys-{system}_{body}'
if body == 'NN+3N' or body == '3N':
full_name += f'_fit-{fit_n2lo}-{fit_n3lo}'
else:
full_name += f'_fit-0-0'
full_name += f'_Lamb-{Lambda:.0f}_Q-{self.ratio_str}'
if isinstance(breakdown, tuple):
full_name += f'_Lb-{breakdown[0]:.0f}-{breakdown[1]:.0f}-{breakdown[2]:.0f}'
elif breakdown is not None:
full_name += f'_Lb-{breakdown:.0f}'
else:
full_name += f'_Lb-x'
if isinstance(ls, tuple):
full_name += f'_ls-{ls[0]:.0f}-{ls[1]:.0f}-{ls[2]:.0f}'
elif ls is not None:
full_name += f'_ls-{ls:.0f}'
else:
full_name += f'_ls-x'
try:
full_name += f'_ref-{ref:.0f}'
except TypeError: # If it's a function
pass
if max_idx is not None:
full_name += f'_midx-{max_idx}'
else:
full_name += f'_midx-x'
center = str(self.kwargs.get('center', 0)).replace('.', 'p')
disp = str(self.kwargs.get('disp', 1)).replace('.', 'p')
df = str(self.kwargs.get('df', 1)).replace('.', 'p')
scale = str(self.kwargs.get('scale', 1)).replace('.', 'p')
full_name += f'_hyp-{center}-{disp}-{df}-{scale}'
full_name = join(self.fig_path, full_name)
return full_name
def model_info(self, breakdown=None, ls=None, max_idx=None):
if breakdown is None:
breakdown = np.NaN
if ls is None:
ls = np.NaN
if max_idx is None:
max_idx = np.NaN
info = dict(
body=self.body,
fit_n2lo=self.fit_n2lo,
fit_n3lo=self.fit_n3lo,
Lambda=self.Lambda,
ref=self.ref,
center=self.kwargs.get('center', 0),
disp=self.kwargs.get('disp', 1),
df=self.kwargs.get('df', 1),
scale=self.kwargs.get('scale', 1),
breakdown=breakdown,
ls=ls,
max_idx=max_idx,
)
return info
def compute_y_label(self):
if self.system == 'neutron':
y_label = fr'Energy per Neutron '
elif self.system == 'symmetric':
y_label = 'Energy per Particle '
elif self.system == 'difference':
y_label = 'Symmetry Energy '
else:
raise ValueError('system has wrong value')
y_label += fr'${self.system_math_strings[self.system]}$'
return y_label
def setup_ticks(self, ax, is_density_primary, train, valid, show_2nd_axis=True, show_train_valid=True):
d_label = r'Density $n$ [fm$^{-3}$]'
kf_label = r'Fermi Momentum $k_\mathrm{F}$ [fm$^{-1}$]'
# ax.set_xticks(x_ticks)
# ax2.set_xticks(x_ticks)
x_min, x_max = ax.get_xlim()
if is_density_primary:
x_label = d_label
x = self.density
if show_train_valid:
x_ticks = x[train]
else:
ax.xaxis.set_major_locator(MultipleLocator(0.1))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
x_ticks = ax.get_xticks()
x_ticks = x_ticks[(x_ticks >= x_min) & (x_ticks <= x_max)]
if show_2nd_axis:
x_label2 = kf_label
x_ticks2 = self.compute_momentum(x_ticks)
# ax.set_xlabel(d_label)
# ax.set_xticks(x_ticks)
# ax.set_xticks(self.density[valid], minor=True)
#
# ax2.plot(x_ticks, ax.get_yticks().mean() * np.ones_like(x_ticks), ls='')
# ax2.set_xlabel(kf_label)
# ax2.set_xticklabels(self.compute_momentum(x_ticks))
else:
x_label = kf_label
x = self.X.ravel()
if show_train_valid:
x_ticks = x[train]
else:
ax.xaxis.set_major_locator(MultipleLocator(0.02))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
plt.draw()
x_ticks = ax.get_xticks()
x_ticks = x_ticks[(x_ticks >= x_min) & (x_ticks <= x_max)]
if show_2nd_axis:
x_label2 = d_label
x_ticks2 = self.compute_density(x_ticks)
# ax.set_xlabel(kf_label)
# x_ticks = self.X[train].ravel()
# ax.set_xticks(x_ticks)
# ax.set_xticks(self.X[valid].ravel(), minor=True)
#
# ax2.plot(x_ticks, ax.get_yticks().mean() * np.ones_like(x_ticks), ls='')
# ax2.set_xlabel(d_label)
# ax2.set_xticks(x_ticks)
# ax2.set_xticklabels(self.compute_density(x_ticks))
ax.set_xlabel(x_label)
if show_train_valid:
ax.set_xticks(x_ticks)
x_ticks_minor = x[valid]
ax.set_xticks(x_ticks_minor, minor=True)
else:
x_ticks_minor = ax.get_xticks(minor=True)
ax.tick_params(right=True)
y_label = self.compute_y_label()
ax.set_ylabel(y_label)
if show_2nd_axis:
ax2 = ax.twiny()
ax2.margins(*ax.margins()) # Give them same margins, can't change ax.margins after this!
# Plot invisible line to get ticks right
ax2.plot([x_min, x_max], ax.get_yticks().mean() * np.ones(2), ls='')
ax2.set_xlabel(x_label2)
ax2.set_xticks(x_ticks)
ax2.set_xticks(x_ticks_minor, minor=True)
ax2.set_xticklabels([f'{tick:0.2f}' for tick in x_ticks2])
return ax, ax2
return ax
def compute_std_and_kernel(self, breakdown=None):
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
coeffs_not_excluded = self.compute_coefficients(breakdown=breakdown, show_excluded=False)
model = gm.ConjugateGaussianProcess(**self.kwargs)
model.fit(self.X_train, coeffs_not_excluded[self.train])
return np.sqrt(model.cbar_sq_mean_), model.kernel_
def plot_coefficients(self, breakdown=None, ax=None, show_process=False, savefig=None, return_info=False,
show_excluded=False, show_2nd_axis=True, kernel=None, show_train_valid=True, loc='best'):
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
if ax is None:
fig, ax = plt.subplots(figsize=(3.4, 3.4))
kf = self.X.ravel()
density = self.density
train = self.train
if show_process:
coeffs_not_excluded = self.compute_coefficients(breakdown=breakdown, show_excluded=False)
gp_kwargs = self.kwargs.copy()
if kernel is not None:
gp_kwargs['kernel'] = kernel
model = gm.ConjugateGaussianProcess(**gp_kwargs)
model.fit(self.X_train, coeffs_not_excluded[train])
print(model.kernel_)
print('cbar mean:', np.sqrt(model.cbar_sq_mean_))
if show_excluded:
model_all = gm.ConjugateGaussianProcess(**gp_kwargs)
coeffs_all = self.compute_coefficients(breakdown=breakdown, show_excluded=True)
model_all.fit(self.X_train, coeffs_all[train])
pred, std = model_all.predict(self.X, return_std=True)
else:
pred, std = model.predict(self.X, return_std=True)
mu = model.center_
cbar = np.sqrt(model.cbar_sq_mean_)
ax.axhline(mu, 0, 1, lw=1, c='k', zorder=0)
ax.axhline(2*cbar, 0, 1, c=gray, zorder=0)
ax.axhline(-2*cbar, 0, 1, c=gray, zorder=0)
coeffs = self.compute_coefficients(breakdown=breakdown, show_excluded=show_excluded)
colors = self.colors
orders = self.orders
markers = self.markers
markerfillstyles = self.markerfillstyles
if not show_excluded:
colors = self.colors_not_excluded
orders = self.orders_not_excluded
markers = self.markers_not_excluded
markerfillstyles = self.markerfillstyles_not_excluded
light_colors = [lighten_color(c, 0.5) for c in colors]
is_density_primary = True
if is_density_primary:
x = density
else:
x = kf
for i, n in enumerate(orders):
z = i / 20
label = fr'$c_{{{n}}}$'
if self.n_bodies[i] is not None:
label = fr'$c_{{{n}}}^{{({self.n_bodies[i]})}}$'
ax.plot(
x, coeffs[:, i], c=colors[i], label=label, zorder=z,
markevery=train, marker=markers[i], fillstyle=markerfillstyles[i])
# ax.plot(x[train], coeffs[train, i], marker=markers[i], ls='', c=colors[i], zorder=z,
# fillstyle=markerfillstyles[i])
if show_process:
# ax.plot(x, pred[:, i], c=colors[i], zorder=z, ls='--')
ax.fill_between(
x, pred[:, i] + 2*std, pred[:, i] - 2*std, zorder=z,
lw=0.5, alpha=1, facecolor=light_colors[i], edgecolor=colors[i]
)
# ax.axhline(0, 0, 1, ls='--', c=gray, zorder=-1)
# ax2 = ax.twiny()
# ax2.plot(d, np.zeros_like(d), ls='', c=gray, zorder=-1) # Dummy data to set up ticks
# ax2.set_xlabel(r'Density $n$ [fm$^{-3}$]')
# y_label = self.compute_y_label()
# ax.set_ylabel(y_label)
# ax.set_xlabel(r'Fermi Momentum $k_\mathrm{F}$ [fm$^{-1}$]')
# ax.set_xticks(self.X_valid.ravel(), minor=True)
if len(orders) > 4:
ax.legend(ncol=3, loc=loc)
else:
ax.legend(ncol=2, loc=loc)
ax.margins(x=0)
self.setup_ticks(
ax, is_density_primary, train=train, valid=self.valid, show_2nd_axis=show_2nd_axis,
show_train_valid=show_train_valid
)
ylim = np.max(np.abs(ax.get_ylim()))
ax.set_ylim(-ylim, ylim)
if savefig is None:
savefig = self.savefigs
if savefig:
fig = plt.gcf()
name = self.figure_name('coeffs', breakdown=breakdown)
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
name = path.relpath(name, self.fig_path)
info['name'] = name
return ax, info
return ax
def plot_observables(self, breakdown=None, ax=None, show_process=False, savefig=None, return_info=False,
show_excluded=False, show_2nd_axis=True, panels=False):
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
if ax is None:
if panels:
fig, axes = plt.subplots(2, 2, figsize=(3.4, 3.4), sharex=True, sharey=True)
else:
fig, ax = plt.subplots(figsize=(3.4, 3.4))
axes = np.atleast_2d(ax)
else:
axes = np.atleast_2d(ax)
for ax in axes.ravel():
ax.margins(x=0.)
kf = self.X.ravel()
is_density_primary = True
if is_density_primary:
x = self.density
else:
x = kf
if show_process:
model = gm.TruncationGP(
ratio=self.ratio, ref=self.ref, excluded=self.excluded,
ratio_kws=dict(breakdown=breakdown), **self.kwargs
)
model.fit(self.X_train, y=self.y_train, orders=self.orders)
if self.body == 'NN-only':
y = self.y2
elif self.body == 'NN+3N':
y = self.y3
elif self.body == 'Appended':
y = self.y3
elif self.body == '3N':
y = self.y3
else:
raise ValueError('body not in allowed values')
# Loop through all orders and throw them out later if needed
orders = self.orders_original
colors = self.colors_original
# if not show_excluded:
# # coeffs = coeffs[:, self.excluded_mask]
# colors = self.colors_original_not_excluded
# orders = self.orders_original_not_excluded
light_colors = [lighten_color(c, 0.5) for c in colors]
print(orders)
for j in range(4):
if panels:
cycle_orders = orders[:j+1]
else:
cycle_orders = orders
if j > 0 and not panels:
break
ax = axes.ravel()[j]
order_labels = []
for i, n in enumerate(cycle_orders):
z = i / 20
if n not in self.orders_not_excluded and not show_excluded:
# Don't plot orders if we've excluded them
continue
order_label = n if n in [0, 1] else n - 1
if order_label == 0:
order_str = 'LO'
elif order_label == 1:
order_str = 'NLO'
else:
order_str = fr'N$^{order_label}$LO'
order_labels.append(order_str)
ax.plot(x, y[:, i], c=colors[i], label=order_str, zorder=z)
# ax.plot(kf[train], self.y[train, i], marker='o', ls='', c=colors[i], zorder=z)
if show_process:
_, std = model.predict(self.X, order=n, return_std=True, kind='trunc')
if self.body == 'Appended':
n_3bf = n if n >= 3 else 3 # 3-body forces don't enter until N3LO
_, std_3bf = model.predict(self.X, order=n_3bf, return_std=True, kind='trunc')
try:
ref3_vals = self.ref3(self.X)
except TypeError:
ref3_vals = self.ref3
try:
ref2_vals = self.ref2(self.X)
except TypeError:
ref2_vals = self.ref2
# For appended, the standard reference is the 2-body one. So swap for the 3-body ref
std_3bf *= ref3_vals / ref2_vals
std = np.sqrt(std**2 + std_3bf**2)
# ax.plot(x, y[:, i], c=colors[i], zorder=z, ls='--')
ax.fill_between(
x, y[:, i] + std, y[:, i] - std, zorder=z,
lw=0.5, alpha=1, facecolor=light_colors[i], edgecolor=colors[i]
)
# ax2.plot(d, self.y[:, 0], ls='', c=gray, zorder=-1) # Dummy data to set up ticks
# ax.axhline(0, 0, 1, ls='--', c=gray, zorder=-1)
# if self.system == 'neutron':
# y_label = fr'Energy per Neutron '
# elif self.system == 'symmetric':
# y_label = 'Energy per Particle '
# elif self.system == 'difference':
# y_label = 'Symmetry Energy '
# else:
# raise ValueError('system has wrong value')
#
# y_label += fr'${self.system_math_strings[self.system]}$'
# y_label = self.compute_y_label()
# ax.set_ylabel(y_label)
# ax.set_xlabel(r'Fermi Momentum $k_\mathrm{F}$ [fm$^{-1}$]')
# ax.set_xticks(self.X_valid.ravel(), minor=True)
# if self.system == 'neutron':
# kf_ticks = np.array([1.2, 1.4, 1.6, 1.8])
# elif self.system == 'symmetric':
# kf_ticks = np.array([1., 1.2, 1.4])
# else:
# kf_ticks = np.array([1., 1.2, 1.4])
# ax.set_xticks(kf_ticks)
for ax in axes.ravel():
ax.xaxis.set_major_locator(MultipleLocator(0.2))
# ax2 = ax.twiny()
# ax2.margins(x=0.)
ax.set_xlim(x[0], x[-1])
if self.system == 'symmetric':
self.plot_empirical_saturation(ax, is_density_primary=is_density_primary)
if panels:
# both_axes = self.setup_ticks(
# ax, is_density_primary, train=self.train, valid=self.valid, show_2nd_axis=False)
for ax in axes.ravel():
if is_density_primary:
ax.xaxis.set_major_locator(MultipleLocator(0.1))
else:
ax.xaxis.set_major_locator(MultipleLocator(0.2))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(right=True, top=True, which='both')
d_label = r'Density $n$ [fm$^{-3}$]'
axes[1, 0].set_xlabel(d_label)
axes[1, 1].set_xlabel(d_label)
from .graphs import add_top_order_legend
fig = plt.gcf()
dark_colors = [darken_color(color) for color in colors]
add_top_order_legend(fig, axes[0, 0], axes[0, 1], order_labels, colors, light_colors, dark_colors)
else:
ax.legend()
both_axes = self.setup_ticks(
ax, is_density_primary, train=self.train, valid=self.valid, show_2nd_axis=show_2nd_axis)
if show_2nd_axis:
both_axes[-1].set_xlim(x[0], x[-1])
if savefig is None:
savefig = self.savefigs
if savefig:
fig = plt.gcf()
name = self.figure_name('obs_', breakdown=breakdown)
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
info['name'] = path.relpath(name, self.fig_path)
return ax, info
return ax
def plot_joint_breakdown_ls(self, max_idx, return_info=False):
system_str = fr'${self.system_math_string}$'
order_str = fr'N$^{max_idx}$LO'
fig = joint2dplot(self.df_ls, self.df_breakdown, self.df_joint, system=system_str,
order=order_str, data_str=self.system_math_string)
breakdown = (self.breakdown_min, self.breakdown_max, self.breakdown_num)
ls = (self.ls_min, self.ls_max, self.ls_num)
if self.savefigs:
name = self.figure_name('ls-Lb-2d_', breakdown=breakdown, ls=ls, max_idx=max_idx)
fig.savefig(name)
if return_info:
info = self.model_info(max_idx=max_idx)
info['name'] = path.relpath(name, self.fig_path)
return fig, info
return fig
def plot_md_squared(
self, breakdown=None, ax=None, savefig=None, return_info=False, interp=False, kernel=None,
show_excluded=False
):
R"""Plots the squared Mahalanobis distance.
Parameters
----------
breakdown : float, optional
The value for the breakdown scale to use in the diagnostics. If `None`, then its MAP value is used.
ax : matplotlib.axes.Axes, optional
The axis on which to draw the coefficient plots and diagnostics
savefig : bool, optional
Whether to save the figure. If `None`, this is taken from `self.savefigs`.
Returns
-------
ax : matplotlib.axes.Axes
The axis object
"""
if ax is None:
fig, ax = plt.subplots(figsize=(1, 3.2))
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
graph = self.compute_underlying_graphical_diagnostic(
breakdown=breakdown, interp=interp, kernel=kernel, show_excluded=show_excluded)
obs = self.system_math_string
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.margins(y=0)
ax = graph.md_squared(type='box', trim=False, title=None, xlabel=rf'${self.MD_label}({obs})$', ax=ax)
ax.set_xticks([0])
ax.set_xticklabels(['0'], fontdict=dict(color='w'))
ax.tick_params(width=0, axis='x')
# plt.xticklabels()
ymin, ymax = ax.get_ylim()
ax.set_ylim(np.max([np.floor(ymin), 0]), np.ceil(ymax))
if savefig is None:
savefig = self.savefigs
if savefig:
fig = plt.gcf()
name = self.figure_name('md_under_', breakdown=breakdown)
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
info['name'] = path.relpath(name, self.fig_path)
return ax, info
return ax
def plot_pchol(
self, breakdown=None, ax=None, savefig=None, return_info=False, interp=False, kernel=None,
show_excluded=False
):
R"""Plots the pivoted Cholesky diagnostic.
Parameters
----------
breakdown : float, optional
The value for the breakdown scale to use in the diagnostic. If `None`, then its MAP value is used.
ax : matplotlib.axes.Axes, optional
The axis on which to draw the coefficient plots and diagnostics
savefig : bool, optional
Whether to save the figure. If `None`, this is taken from `self.savefigs`.
Returns
-------
ax : matplotlib.axes.Axes
The axis object
"""
if ax is None:
fig, ax = plt.subplots(figsize=(3.2, 3.2))
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
graph = self.compute_underlying_graphical_diagnostic(
breakdown=breakdown, interp=interp, kernel=kernel, show_excluded=show_excluded
)
obs = self.system_math_string
with plt.rc_context({"text.usetex": True, "text.latex.preview": True}):
ax = graph.pivoted_cholesky_errors(ax=ax, title=None)
# ax = graph.individual_errors(ax=ax, title=None)
# ax.text(0.5, 0.95, rf'${self.PC_label}({obs})$', bbox=text_bbox, transform=ax.transAxes, va='top',
# ha='center')
# Hijack a legend to get the 'best' location to place the text
line, = ax.plot([])
# Remove the handle from the legend box.
ax.legend(
[line], [rf'${self.PC_label}({obs})$'], handlelength=0,
loc='best', handletextpad=0)
fig = plt.gcf()
if savefig is None:
savefig = self.savefigs
if savefig:
name = self.figure_name('pc_under_', breakdown=breakdown)
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
info['name'] = path.relpath(name, self.fig_path)
return ax, info
return ax
def plot_coeff_diagnostics(
self, breakdown=None, fig=None, savefig=None, return_info=False,
interp=False, kernel=None, show_excluded=False):
R"""Plots coefficients, the squared Mahalanobis distance, and the pivoted Cholesky diagnostic.
Parameters
----------
breakdown : float, optional
The value for the breakdown scale to use in the diagnostics. If `None`, then its MAP value is used.
fig : matplotlib.figure.Figure, optional
The Figure on which to draw the coefficient plots and diagnostics
savefig : bool, optional
Whether to save the figure. If `None`, this is taken from `self.savefigs`.
Returns
-------
fig : matplotlib.figure.Figure
The figure object
"""
if fig is None:
fig = plt.figure(figsize=(7, 3.2), constrained_layout=True)
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
spec = fig.add_gridspec(nrows=1, ncols=7)
ax_cs = fig.add_subplot(spec[:, :3])
ax_md = fig.add_subplot(spec[:, 3])
ax_pc = fig.add_subplot(spec[:, 4:])
show_2nd_axis = self.system != self.system_strings['difference']
self.plot_coefficients(
breakdown=breakdown, ax=ax_cs, show_process=True, savefig=False, show_2nd_axis=show_2nd_axis,
kernel=kernel, show_excluded=show_excluded,
)
self.plot_md_squared(
breakdown=breakdown, ax=ax_md, savefig=False, interp=interp, kernel=kernel,
show_excluded=show_excluded,
)
self.plot_pchol(
breakdown=breakdown, ax=ax_pc, savefig=False, interp=interp, kernel=kernel,
show_excluded=show_excluded,
)
if savefig is None:
savefig = self.savefigs
if savefig:
name = self.figure_name('cn_diags_', breakdown=breakdown)
# fig.savefig(name, metadata={'hi': [1, 2, 3], 'wtf': 7})
fig.savefig(name)
if return_info:
info = self.model_info(breakdown=breakdown)
info['name'] = path.relpath(name, self.fig_path)
return fig, info
return fig
def plot_credible_diagnostic(
self, breakdown=None, ax=None, savefig=None, truncation=False, show_excluded=False, all_points=False,
show_legend=True, ylabel=r'Empirical Coverage [$\%$]',
):
if ax is None:
fig, ax = plt.subplots(figsize=(3.2, 3.2))
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
if truncation:
model = gm.TruncationGP(
ratio=self.ratio, ref=self.ref, excluded=self.excluded,
ratio_kws=dict(breakdown=breakdown), **self.kwargs
)
model.fit(self.X_train, y=self.y_train, orders=self.orders)
if all_points:
X = self.X
y = self.y
else:
X = self.X_valid
y = self.y_valid
if show_excluded:
orders = self.orders
colors = self.colors
else:
y = y[:, self.excluded_mask]
orders = self.orders_not_excluded
colors = self.colors_not_excluded
# Get the covariance without any Q junk
# norm_trunc_cov = model.cov(X, start=0, end=0)
ref = model.ref(X)
norm_trunc_cov = ref[:, None] * ref * model.coeffs_process.cov(X=X)
# Get the between-order residuals
residuals = np.diff(y)
Q = self.ratio(X)
# Normalize them based on the approximate size of the next order correction
# This is so that we can use the same Q-less covariance for each correction
norm_residuals = residuals / Q[:, None] ** orders[1:]
graph = gm.GraphicalDiagnostic(
norm_residuals, mean=np.zeros(X.shape[0]),
cov=norm_trunc_cov, colors=colors, gray=gray, black=softblack
)
else:
graph = self.compute_underlying_graphical_diagnostic(breakdown=breakdown, show_excluded=show_excluded)
obs = self.system_math_string
intervals = np.linspace(1e-5, 1, 100)
band_perc = [0.68, 0.95]
if show_excluded:
linestyles = self.linestyles
else:
linestyles = self.linestyles_not_excluded
ax = graph.credible_interval(
intervals=intervals, band_perc=band_perc,
# title=rf'${self.CI_label}({obs})$',
title=None,
ax=ax,
xlabel=r'Credible Interval [$\%$]', ylabel=ylabel,
linestyles=linestyles
)
ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax.set_xticklabels([0, 20, 40, 60, 80, 100])
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
ax.set_yticklabels([0, 20, 40, 60, 80, 100])
if truncation and show_legend:
handles, labels = ax.get_legend_handles_labels()
ax.set_title('')
ax.legend(handles=handles, labels=[r'LO', r'NLO', r'N$^{2}$LO'], title=rf'${self.CI_label}({obs})$')
fig = plt.gcf()
if savefig is None:
savefig = self.savefigs
if savefig:
name = self.figure_name(f'ci_diag_trunc-{truncation}_', breakdown=breakdown)
fig.savefig(name)
return fig
def plot_empirical_saturation(self, ax=None, is_density_primary=True):
from matplotlib.patches import Rectangle
# From Drischler 2018 arXiv:1710.08220
n0 = 0.164
n0_std = 0.007
y0 = -15.86
# y0_std = np.sqrt(0.37 ** 2 + 0.2 ** 2)
y0_std = 0.57 # They add the errors linearly
left = n0 - n0_std
right = n0 + n0_std
if not is_density_primary:
left = self.compute_momentum(left)
right = self.compute_momentum(right)
rect = Rectangle(
(left, y0 - y0_std), width=right - left, height=2 * y0_std,
facecolor='lightgray', edgecolor='gray', alpha=0.4, zorder=9,
)
ax.add_patch(rect)
return ax
def plot_saturation(self, breakdown=None, order=4, ax=None, savefig=None, color=None, nugget=0, X=None,
cond=None, n_samples=1000, is_density_primary=True, **kwargs):
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
if ax is None:
ax = plt.gca()
if X is None:
X = self.X
x_min_no_trunc, y_min_no_trunc, x_min, y_min, pred, cov = self.compute_minimum(
order=order, n_samples=n_samples, breakdown=breakdown, X=X, nugget=nugget, cond=cond
)
if 'zorder' not in kwargs:
zorder = order / 10
else:
zorder = kwargs.copy().pop('zorder')
if cond is None:
cond = slice(None, None)
# ord_idx = self.order_index(order)
ord_idx = np.squeeze(np.argwhere(self.orders_original == order))
approx_xlim = x_min.min() - 0.03, x_min.max() + 0.03
approx_xlim_mask = (self.X[cond].ravel() >= approx_xlim[0]) & (self.X[cond].ravel() <= approx_xlim[1])
# is_density_primary = True
if is_density_primary:
x_min_no_trunc = self.compute_density(x_min_no_trunc)
x_min = self.compute_density(x_min)
x_all = self.compute_density(X.ravel())
else:
x_all = X.ravel()
if color is None:
color = self.colors_original[ord_idx]
light_color = lighten_color(color)
# TODO: Add scatter plots
# compute z-scores from all EDFs?
stdv = np.sqrt(np.diag(cov))
from matplotlib.collections import LineCollection
# ax.fill_between(X.ravel(), pred+stdv, pred-stdv, color=color, zorder=0, alpha=0.5)
# ax.plot(X.ravel(), pred, c=color)
ax.fill_between(
x_all, pred+2*stdv, pred-2*stdv, facecolor=light_color,
edgecolor=color, alpha=0.3, zorder=zorder
)
print('Order', order)
print('x:', np.mean(x_min), '+/-', np.std(x_min))
print('y:', np.mean(y_min), '+/-', np.std(y_min))
print('mean:\n', np.array([np.mean(x_min), np.mean(y_min)]))
print('cov:\n', np.cov(x_min, y_min))
ellipse = confidence_ellipse(
x_min, y_min, ax=ax, n_std=2, facecolor=light_color,
edgecolor=color, zorder=zorder, show_scatter=True, **kwargs
)
col = LineCollection([
np.column_stack((x_all, pred)),
np.column_stack((x_all, pred + 2 * stdv)),
np.column_stack((x_all, pred - 2 * stdv))
], colors=[color, color, color], linewidths=[1.2, 0.7, 0.7], linestyles=['-', '-', '-'], zorder=zorder + 1e-2)
ax.add_collection(col, autolim=False)
# ax.plot(x_min_no_trunc, y_min_no_trunc, marker='x', ls='', markerfacecolor=color,
# markeredgecolor='k', markeredgewidth=0.5, label='True', zorder=10)
ax.scatter(x_min_no_trunc, y_min_no_trunc, marker='X', facecolor=color,
edgecolors='k', label=fr'min($y_{order}$)', zorder=10)
# ax.scatter(x_min, y_min, marker='X', facecolor=color,
# edgecolors='k', label=fr'min($y_{order}$)', zorder=10)
if self.body == 'NN-only':
y = self.y2
elif self.body == 'NN+3N':
y = self.y3
elif self.body == 'Appended':
y = self.y3
elif self.body == '3N':
y = self.y3
else:
raise ValueError('body not in allowed values')
if is_density_primary:
# ax.plot(self.density[cond][approx_xlim_mask], y[cond, ord_idx][approx_xlim_mask],
# ls='', marker='o', c=color, zorder=zorder)
ax.set_xlabel(r'Density $n$ [fm$^{-3}$]')
else:
# ax.plot(self.X[cond][approx_xlim_mask], y[cond, ord_idx][approx_xlim_mask],
# ls='', marker='o', c=color, zorder=zorder)
ax.set_xlabel(r'Fermi Momentum $k_\mathrm{F}$ [fm$^{-1}$]')
ax.set_ylabel(r'Energy per Particle $E/A$')
# kf_ticks = ax.get_xticks()
# d_ticks = self.compute_momentum(kf_ticks)
# k_min, k_max = ax.get_xlim()
# d = self.compute_density(np.array([k_min, k_max]))
# ax2 = ax.twiny()
# ax2.plot(d_ticks, np.average(y_min) * np.ones_like(d_ticks), ls='')
# ax2.set_xticks(d_ticks)
# is_density_primary = True
self.plot_empirical_saturation(ax=ax, is_density_primary=is_density_primary)
if savefig:
pass
return ax, ellipse
def plot_multi_saturation(self, breakdown=None, orders=None, ax=None, savefig=None, nugget=0, X=None,
cond=None, n_samples=1000, legend_kwargs=None, **kwargs):
if orders is None:
orders = [3, 4]
if ax is None:
ax = plt.gca()
if legend_kwargs is None:
legend_kwargs = dict()
if breakdown is None:
breakdown = self.breakdown_map[-1]
print('Using breakdown =', breakdown, 'MeV')
ellipses = []
ellipses_labels = []
for order in orders:
# idx = self.order_index(order)
idx = np.squeeze(np.argwhere(self.orders_original == order))
_, ellipse = self.plot_saturation(
breakdown=breakdown, order=order, ax=ax, savefig=False, color=self.colors_original[idx],
nugget=nugget, X=X, cond=cond, n_samples=n_samples, **kwargs)
ellipses.append(ellipse)
ellipses_labels.append(rf'$2\sigma(y_{{{order}}}+\delta y_{{{order}}})$')
ax.margins(x=0)
handles, labels = ax.get_legend_handles_labels()
handles = handles + ellipses
labels = labels + ellipses_labels
ax.legend(handles, labels, **legend_kwargs)
fig = plt.gcf()
# fig.tight_layout()
if savefig:
ords = [f'-{order}' for order in orders]
ords = ''.join(ords)
name = self.figure_name(f'sat_ellipse_ords{ords}_', breakdown=breakdown)
print(name)
fig.savefig(name)
return ax
class CorrKernel(Kernel):
R"""A basic kernel with rho on the off-diagonal blocks. Will assume that all 4 blocks are the same size.
The diagonal blocks are filled with ones, and the off-diagonal blocks are filled with rho.
"""
def __init__(self, rho=0.5, rho_bounds=(1e-5, 1), std1=1, std2=1):
self.rho = rho
self.rho_bounds = rho_bounds
self.std1 = std1
self.std2 = std2
@property
def hyperparameter_rho(self):
from sklearn.gaussian_process.kernels import Hyperparameter
return Hyperparameter("rho", "numeric", self.rho_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
nx = ny = len(X)
if Y is not None:
ny = len(Y)
ix = nx // 2
iy = ny // 2
stds_x = np.concatenate((self.std1 * np.ones(ix), self.std2 * np.ones(ix)))
stds_y = np.concatenate((self.std1 * np.ones(iy), self.std2 * np.ones(iy)))
K = np.ones((nx, ny), dtype=float)
K[ix:, :iy] = K[:ix, iy:] = self.rho
K *= stds_x[:, None] * stds_y
if eval_gradient:
dK = np.zeros((nx, ny, 1), dtype=float)
dK[ix:, :iy] = dK[:ix, iy:] = 1.
dK *= stds_x[:, None, None] * stds_y[None, :, None]
return K, dK
return K
def diag(self, X):
return np.ones(X.shape[0])
def is_stationary(self):
return False
def __repr__(self):
return "{0}(rho={1:.3g})".format(
self.__class__.__name__, self.rho)
|
[
"seaborn.utils.despine",
"matplotlib.pyplot.rc_context",
"numpy.log",
"numpy.column_stack",
"numpy.isin",
"matplotlib.ticker.MaxNLocator",
"numpy.cov",
"numpy.arange",
"numpy.exp",
"numpy.concatenate",
"warnings.warn",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"numpy.result_type",
"numpy.floor",
"gsum.coefficients",
"sklearn.gaussian_process.kernels.Hyperparameter",
"gsum.median_pdf",
"numpy.diag",
"joblib.Parallel",
"numpy.true_divide",
"matplotlib.pyplot.subplots",
"numpy.block",
"scipy.stats.multivariate_normal",
"gsum.partials",
"multiprocessing.cpu_count",
"gsum.TruncationGP",
"matplotlib.pyplot.margins",
"numpy.diag_indices_from",
"numpy.mean",
"numpy.max",
"gsum.hpd_pdf",
"docrep.DocstringProcessor",
"numpy.ceil",
"numpy.ones",
"numpy.average",
"matplotlib.patches.Patch",
"numpy.around",
"numpy.std",
"numpy.ones_like",
"matplotlib.patches.Rectangle",
"numpy.argwhere",
"gsum.GraphicalDiagnostic",
"pandas.concat",
"matplotlib.pyplot.GridSpec",
"matplotlib.colors.to_rgb",
"gsum.cartesian",
"numpy.atleast_2d",
"numpy.diff",
"sklearn.gaussian_process.kernels.ConstantKernel",
"numpy.linspace",
"numpy.min",
"numpy.squeeze",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.show",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.figure",
"gsum.ConjugateGaussianProcess",
"numpy.zeros",
"joblib.delayed",
"numpy.sqrt",
"numpy.array",
"matplotlib.ticker.AutoMinorLocator",
"seaborn.color_palette",
"numpy.asarray",
"numpy.unravel_index",
"numpy.argmin",
"pandas.DataFrame",
"os.path.relpath",
"numpy.eye",
"numpy.trapz",
"sklearn.gaussian_process.kernels.RBF",
"numpy.argmax",
"matplotlib.pyplot.draw",
"numpy.atleast_1d",
"matplotlib.pyplot.get_cmap",
"colorsys.hls_to_rgb",
"os.path.join",
"numpy.zeros_like"
] |
[((768, 795), 'docrep.DocstringProcessor', 'docrep.DocstringProcessor', ([], {}), '()\n', (793, 795), False, 'import docrep\n'), ((2330, 2371), 'numpy.trapz', 'np.trapz', (['posterior_2d'], {'x': 'lengths', 'axis': '(0)'}), '(posterior_2d, x=lengths, axis=0)\n', (2338, 2371), True, 'import numpy as np\n'), ((2393, 2430), 'numpy.trapz', 'np.trapz', (['breakdown_pdf'], {'x': 'breakdowns'}), '(breakdown_pdf, x=breakdowns)\n', (2401, 2430), True, 'import numpy as np\n'), ((3011, 3038), 'gsum.median_pdf', 'gm.median_pdf', ([], {'pdf': 'pdf', 'x': 'x'}), '(pdf=pdf, x=x)\n', (3024, 3038), True, 'import gsum as gm\n'), ((3737, 3773), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(height, height)'}), '(figsize=(height, height))\n', (3747, 3773), True, 'import matplotlib.pyplot as plt\n'), ((3784, 3818), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(ratio + 1)', '(ratio + 1)'], {}), '(ratio + 1, ratio + 1)\n', (3796, 3818), True, 'import matplotlib.pyplot as plt\n'), ((4745, 4783), 'seaborn.utils.despine', 'utils.despine', ([], {'ax': 'ax_marg_x', 'left': '(True)'}), '(ax=ax_marg_x, left=True)\n', (4758, 4783), False, 'from seaborn import utils\n'), ((4788, 4828), 'seaborn.utils.despine', 'utils.despine', ([], {'ax': 'ax_marg_y', 'bottom': '(True)'}), '(ax=ax_marg_y, bottom=True)\n', (4801, 4828), False, 'from seaborn import utils\n'), ((6106, 6123), 'numpy.atleast_1d', 'np.atleast_1d', (['ls'], {}), '(ls)\n', (6119, 6123), True, 'import numpy as np\n'), ((6358, 6385), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (6383, 6385), False, 'import multiprocessing\n'), ((6900, 6941), 'numpy.trapz', 'np.trapz', (['joint_pdf'], {'x': 'breakdown', 'axis': '(-1)'}), '(joint_pdf, x=breakdown, axis=-1)\n', (6908, 6941), True, 'import numpy as np\n'), ((6981, 7021), 'numpy.trapz', 'np.trapz', (['ratio_pdf'], {'x': 'breakdown', 'axis': '(0)'}), '(ratio_pdf, x=breakdown, axis=0)\n', (6989, 7021), True, 'import numpy as np\n'), ((14106, 14151), 'scipy.stats.multivariate_normal', 'stats.multivariate_normal', ([], {'mean': 'mean', 'cov': 'cov'}), '(mean=mean, cov=cov)\n', (14131, 14151), False, 'from scipy import stats\n'), ((14191, 14217), 'numpy.argmin', 'np.argmin', (['samples'], {'axis': '(1)'}), '(samples, axis=1)\n', (14200, 14217), True, 'import numpy as np\n'), ((14230, 14253), 'numpy.min', 'np.min', (['samples'], {'axis': '(1)'}), '(samples, axis=1)\n', (14236, 14253), True, 'import numpy as np\n'), ((16855, 16911), 'colorsys.hls_to_rgb', 'colorsys.hls_to_rgb', (['c[0]', '(1 - amount * (1 - c[1]))', 'c[2]'], {}), '(c[0], 1 - amount * (1 - c[1]), c[2])\n', (16874, 16911), False, 'import colorsys\n'), ((21936, 21949), 'numpy.asarray', 'np.asarray', (['m'], {}), '(m)\n', (21946, 21949), True, 'import numpy as np\n'), ((22269, 22299), 'numpy.array', 'array', (['m'], {'ndmin': '(2)', 'dtype': 'dtype'}), '(m, ndmin=2, dtype=dtype)\n', (22274, 22299), False, 'from numpy import array, average, dot\n'), ((23922, 23966), 'numpy.average', 'average', (['X'], {'axis': '(1)', 'weights': 'w', 'returned': '(True)'}), '(X, axis=1, weights=w, returned=True)\n', (23929, 23966), False, 'from numpy import array, average, dot\n'), ((24520, 24543), 'numpy.true_divide', 'np.true_divide', (['(1)', 'fact'], {}), '(1, fact)\n', (24534, 24543), True, 'import numpy as np\n'), ((25296, 25334), 'numpy.block', 'np.block', (['[[K1, K_off], [K_off.T, K2]]'], {}), '([[K1, K_off], [K_off.T, K2]])\n', (25304, 25334), True, 'import numpy as np\n'), ((25622, 25641), 'numpy.atleast_1d', 'np.atleast_1d', (['ref1'], {}), '(ref1)\n', (25635, 25641), True, 'import numpy as np\n'), ((25653, 25672), 'numpy.atleast_1d', 'np.atleast_1d', (['ref2'], {}), '(ref2)\n', (25666, 25672), True, 'import numpy as np\n'), ((26133, 26152), 'numpy.atleast_1d', 'np.atleast_1d', (['ref1'], {}), '(ref1)\n', (26146, 26152), True, 'import numpy as np\n'), ((26164, 26183), 'numpy.atleast_1d', 'np.atleast_1d', (['ref2'], {}), '(ref2)\n', (26177, 26183), True, 'import numpy as np\n'), ((26976, 27014), 'numpy.block', 'np.block', (['[[K1, K_off], [K_off.T, K2]]'], {}), '([[K1, K_off], [K_off.T, K2]])\n', (26984, 27014), True, 'import numpy as np\n'), ((1275, 1352), 'matplotlib.patches.Ellipse', 'mpatches.Ellipse', ([], {'xy': 'center', 'width': '(width + xdescent)', 'height': '(height + ydescent)'}), '(xy=center, width=width + xdescent, height=height + ydescent)\n', (1291, 1352), True, 'import matplotlib.patches as mpatches\n'), ((2053, 2068), 'numpy.exp', 'np.exp', (['log_ell'], {}), '(log_ell)\n', (2059, 2068), True, 'import numpy as np\n'), ((2097, 2112), 'numpy.log', 'np.log', (['lengths'], {}), '(lengths)\n', (2103, 2112), True, 'import numpy as np\n'), ((2964, 2997), 'gsum.hpd_pdf', 'gm.hpd_pdf', ([], {'pdf': 'pdf', 'alpha': 'p', 'x': 'x'}), '(pdf=pdf, alpha=p, x=x)\n', (2974, 2997), True, 'import gsum as gm\n'), ((3190, 3199), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3197, 3199), True, 'import matplotlib.pyplot as plt\n'), ((6019, 6061), 'numpy.exp', 'np.exp', (['model.coeffs_process.kernel_.theta'], {}), '(model.coeffs_process.kernel_.theta)\n', (6025, 6061), True, 'import numpy as np\n'), ((6801, 6834), 'numpy.trapz', 'np.trapz', (['joint_pdf'], {'x': 'ls', 'axis': '(0)'}), '(joint_pdf, x=ls, axis=0)\n', (6809, 6834), True, 'import numpy as np\n'), ((6865, 6886), 'numpy.squeeze', 'np.squeeze', (['joint_pdf'], {}), '(joint_pdf)\n', (6875, 6886), True, 'import numpy as np\n'), ((7060, 7090), 'numpy.trapz', 'np.trapz', (['ls_pdf'], {'x': 'ls', 'axis': '(0)'}), '(ls_pdf, x=ls, axis=0)\n', (7068, 7090), True, 'import numpy as np\n'), ((7389, 7454), 'matplotlib.pyplot.rc_context', 'plt.rc_context', (["{'text.usetex': True, 'text.latex.preview': True}"], {}), "({'text.usetex': True, 'text.latex.preview': True})\n", (7403, 7454), True, 'import matplotlib.pyplot as plt\n'), ((7499, 7518), 'matplotlib.cm.get_cmap', 'get_cmap', (['cmap_name'], {}), '(cmap_name)\n', (7507, 7518), False, 'from matplotlib.cm import get_cmap\n'), ((8743, 8753), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8751, 8753), True, 'import matplotlib.pyplot as plt\n'), ((10448, 10486), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(3.4, 3.4)'}), '(1, 1, figsize=(3.4, 3.4))\n', (10460, 10486), True, 'import matplotlib.pyplot as plt\n'), ((10905, 10968), 'seaborn.color_palette', 'sns.color_palette', (['palette'], {'n_colors': 'n_colors', 'desat': 'saturation'}), '(palette, n_colors=n_colors, desat=saturation)\n', (10922, 10968), True, 'import seaborn as sns\n'), ((11140, 11153), 'matplotlib.pyplot.margins', 'plt.margins', ([], {}), '()\n', (11151, 11153), True, 'import matplotlib.pyplot as plt\n'), ((22061, 22090), 'numpy.result_type', 'np.result_type', (['m', 'np.float64'], {}), '(m, np.float64)\n', (22075, 22090), True, 'import numpy as np\n'), ((22113, 22126), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (22123, 22126), True, 'import numpy as np\n'), ((22227, 22259), 'numpy.result_type', 'np.result_type', (['m', 'y', 'np.float64'], {}), '(m, y, np.float64)\n', (22241, 22259), True, 'import numpy as np\n'), ((22455, 22497), 'numpy.array', 'array', (['y'], {'copy': '(False)', 'ndmin': '(2)', 'dtype': 'dtype'}), '(y, copy=False, ndmin=2, dtype=dtype)\n', (22460, 22497), False, 'from numpy import array, average, dot\n'), ((22573, 22603), 'numpy.concatenate', 'np.concatenate', (['(X, y)'], {'axis': '(0)'}), '((X, y), axis=0)\n', (22587, 22603), True, 'import numpy as np\n'), ((22815, 22848), 'numpy.asarray', 'np.asarray', (['fweights'], {'dtype': 'float'}), '(fweights, dtype=float)\n', (22825, 22848), True, 'import numpy as np\n'), ((23415, 23448), 'numpy.asarray', 'np.asarray', (['aweights'], {'dtype': 'float'}), '(aweights, dtype=float)\n', (23425, 23448), True, 'import numpy as np\n'), ((24263, 24348), 'warnings.warn', 'warnings.warn', (['"""Degrees of freedom <= 0 for slice"""', 'RuntimeWarning'], {'stacklevel': '(3)'}), "('Degrees of freedom <= 0 for slice', RuntimeWarning, stacklevel=3\n )\n", (24276, 24348), False, 'import warnings\n'), ((24679, 24713), 'numpy.sqrt', 'np.sqrt', (['((ls1 ** 2 + ls2 ** 2) / 2)'], {}), '((ls1 ** 2 + ls2 ** 2) / 2)\n', (24686, 24713), True, 'import numpy as np\n'), ((24728, 24774), 'numpy.sqrt', 'np.sqrt', (['(2 * ls1 * ls2 / (ls1 ** 2 + ls2 ** 2))'], {}), '(2 * ls1 * ls2 / (ls1 ** 2 + ls2 ** 2))\n', (24735, 24774), True, 'import numpy as np\n'), ((24814, 24841), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', (['(std1 * std2)'], {}), '(std1 * std2)\n', (24828, 24841), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((24844, 24855), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['ls_off'], {}), '(ls_off)\n', (24847, 24855), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((25010, 25035), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', (['(std1 ** 2)'], {}), '(std1 ** 2)\n', (25024, 25035), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((25038, 25046), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['ls1'], {}), '(ls1)\n', (25041, 25046), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((25363, 25386), 'numpy.diag_indices_from', 'np.diag_indices_from', (['K'], {}), '(K)\n', (25383, 25386), True, 'import numpy as np\n'), ((25838, 25858), 'numpy.sqrt', 'np.sqrt', (['(1 - Q1 ** 2)'], {}), '(1 - Q1 ** 2)\n', (25845, 25858), True, 'import numpy as np\n'), ((25881, 25901), 'numpy.sqrt', 'np.sqrt', (['(1 - Q2 ** 2)'], {}), '(1 - Q2 ** 2)\n', (25888, 25901), True, 'import numpy as np\n'), ((26422, 26437), 'numpy.sqrt', 'np.sqrt', (['Q_num1'], {}), '(Q_num1)\n', (26429, 26437), True, 'import numpy as np\n'), ((26440, 26460), 'numpy.sqrt', 'np.sqrt', (['(1 - Q1 ** 2)'], {}), '(1 - Q1 ** 2)\n', (26447, 26460), True, 'import numpy as np\n'), ((26474, 26489), 'numpy.sqrt', 'np.sqrt', (['Q_num2'], {}), '(Q_num2)\n', (26481, 26489), True, 'import numpy as np\n'), ((26492, 26512), 'numpy.sqrt', 'np.sqrt', (['(1 - Q2 ** 2)'], {}), '(1 - Q2 ** 2)\n', (26499, 26512), True, 'import numpy as np\n'), ((26523, 26548), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', (['(std1 ** 2)'], {}), '(std1 ** 2)\n', (26537, 26548), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((26551, 26559), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['ls1'], {}), '(ls1)\n', (26554, 26559), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((27043, 27066), 'numpy.diag_indices_from', 'np.diag_indices_from', (['K'], {}), '(K)\n', (27063, 27066), True, 'import numpy as np\n'), ((30632, 30653), 'numpy.atleast_1d', 'np.atleast_1d', (['orders'], {}), '(orders)\n', (30645, 30653), True, 'import numpy as np\n'), ((35133, 35158), 'numpy.atleast_1d', 'np.atleast_1d', (['colors_all'], {}), '(colors_all)\n', (35146, 35158), True, 'import numpy as np\n'), ((35303, 35333), 'numpy.atleast_1d', 'np.atleast_1d', (['colors_original'], {}), '(colors_original)\n', (35316, 35333), True, 'import numpy as np\n'), ((35542, 35564), 'numpy.atleast_1d', 'np.atleast_1d', (['markers'], {}), '(markers)\n', (35555, 35564), True, 'import numpy as np\n'), ((35676, 35707), 'numpy.atleast_1d', 'np.atleast_1d', (['markerfillstyles'], {}), '(markerfillstyles)\n', (35689, 35707), True, 'import numpy as np\n'), ((35825, 35850), 'numpy.atleast_1d', 'np.atleast_1d', (['linestyles'], {}), '(linestyles)\n', (35838, 35850), True, 'import numpy as np\n'), ((36172, 36220), 'gsum.coefficients', 'gm.coefficients', (['self.y', 'ratio', 'ref', 'self.orders'], {}), '(self.y, ratio, ref, self.orders)\n', (36187, 36220), True, 'import gsum as gm\n'), ((43274, 43330), 'numpy.linspace', 'np.linspace', (['breakdown_min', 'breakdown_max', 'breakdown_num'], {}), '(breakdown_min, breakdown_max, breakdown_num)\n', (43285, 43330), True, 'import numpy as np\n'), ((43552, 43574), 'numpy.atleast_1d', 'np.atleast_1d', (['max_idx'], {}), '(max_idx)\n', (43565, 43574), True, 'import numpy as np\n'), ((45312, 45355), 'pandas.concat', 'pd.concat', (['dfs_breakdown'], {'ignore_index': '(True)'}), '(dfs_breakdown, ignore_index=True)\n', (45321, 45355), True, 'import pandas as pd\n'), ((45480, 45519), 'pandas.concat', 'pd.concat', (['dfs_joint'], {'ignore_index': '(True)'}), '(dfs_joint, ignore_index=True)\n', (45489, 45519), True, 'import pandas as pd\n'), ((46834, 46874), 'gsum.ConjugateGaussianProcess', 'gm.ConjugateGaussianProcess', ([], {}), '(**gp_kwargs)\n', (46861, 46874), True, 'import gsum as gm\n'), ((47542, 47681), 'gsum.GraphicalDiagnostic', 'gm.GraphicalDiagnostic', (['data', 'mean', 'cov'], {'colors': 'colors', 'gray': 'gray', 'black': 'softblack', 'markerfillstyles': 'markerfillstyles', 'markers': 'markers'}), '(data, mean, cov, colors=colors, gray=gray, black=\n softblack, markerfillstyles=markerfillstyles, markers=markers)\n', (47564, 47681), True, 'import gsum as gm\n'), ((47913, 48004), 'gsum.TruncationGP', 'gm.TruncationGP', ([], {'ref': 'self.ref', 'ratio': 'self.ratio', 'excluded': 'self.excluded'}), '(ref=self.ref, ratio=self.ratio, excluded=self.excluded, **\n self.kwargs)\n', (47928, 48004), True, 'import gsum as gm\n'), ((51710, 51725), 'numpy.argmin', 'np.argmin', (['pred'], {}), '(pred)\n', (51719, 51725), True, 'import numpy as np\n'), ((53579, 53609), 'os.path.join', 'join', (['self.fig_path', 'full_name'], {}), '(self.fig_path, full_name)\n', (53583, 53609), False, 'from os.path import join\n'), ((58177, 58219), 'gsum.ConjugateGaussianProcess', 'gm.ConjugateGaussianProcess', ([], {}), '(**self.kwargs)\n', (58204, 58219), True, 'import gsum as gm\n'), ((79519, 79545), 'numpy.linspace', 'np.linspace', (['(1e-05)', '(1)', '(100)'], {}), '(1e-05, 1, 100)\n', (79530, 79545), True, 'import numpy as np\n'), ((80469, 80478), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (80476, 80478), True, 'import matplotlib.pyplot as plt\n'), ((81239, 81374), 'matplotlib.patches.Rectangle', 'Rectangle', (['(left, y0 - y0_std)'], {'width': '(right - left)', 'height': '(2 * y0_std)', 'facecolor': '"""lightgray"""', 'edgecolor': '"""gray"""', 'alpha': '(0.4)', 'zorder': '(9)'}), "((left, y0 - y0_std), width=right - left, height=2 * y0_std,\n facecolor='lightgray', edgecolor='gray', alpha=0.4, zorder=9)\n", (81248, 81374), False, 'from matplotlib.patches import Rectangle\n'), ((87333, 87342), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (87340, 87342), True, 'import matplotlib.pyplot as plt\n'), ((88190, 88239), 'sklearn.gaussian_process.kernels.Hyperparameter', 'Hyperparameter', (['"""rho"""', '"""numeric"""', 'self.rho_bounds'], {}), "('rho', 'numeric', self.rho_bounds)\n", (88204, 88239), False, 'from sklearn.gaussian_process.kernels import Hyperparameter\n'), ((88597, 88627), 'numpy.ones', 'np.ones', (['(nx, ny)'], {'dtype': 'float'}), '((nx, ny), dtype=float)\n', (88604, 88627), True, 'import numpy as np\n'), ((88981, 89000), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (88988, 89000), True, 'import numpy as np\n'), ((2291, 2307), 'numpy.max', 'np.max', (['log_like'], {}), '(log_like)\n', (2297, 2307), True, 'import numpy as np\n'), ((6742, 6758), 'numpy.max', 'np.max', (['log_like'], {}), '(log_like)\n', (6748, 6758), True, 'import numpy as np\n'), ((8012, 8034), 'numpy.zeros_like', 'np.zeros_like', (['like_Lb'], {}), '(like_Lb)\n', (8025, 8034), True, 'import numpy as np\n'), ((8204, 8226), 'numpy.zeros_like', 'np.zeros_like', (['ls_vals'], {}), '(ls_vals)\n', (8217, 8226), True, 'import numpy as np\n'), ((11678, 11704), 'numpy.trapz', 'np.trapz', (['pdf_vals', 'x_vals'], {}), '(pdf_vals, x_vals)\n', (11686, 11704), True, 'import numpy as np\n'), ((13085, 13142), 'matplotlib.patches.Patch', 'Patch', ([], {'facecolor': 'color', 'edgecolor': 'darkgray', 'label': 'leg_val'}), '(facecolor=color, edgecolor=darkgray, label=leg_val)\n', (13090, 13142), False, 'from matplotlib.patches import Patch\n'), ((16830, 16842), 'matplotlib.colors.to_rgb', 'mc.to_rgb', (['c'], {}), '(c)\n', (16839, 16842), True, 'import matplotlib.colors as mc\n'), ((25080, 25105), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', (['(std2 ** 2)'], {}), '(std2 ** 2)\n', (25094, 25105), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((25108, 25116), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['ls2'], {}), '(ls2)\n', (25111, 25116), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((25140, 25165), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', (['(std2 ** 2)'], {}), '(std2 ** 2)\n', (25154, 25165), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((25168, 25176), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['ls1'], {}), '(ls1)\n', (25171, 25176), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((26593, 26618), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', (['(std2 ** 2)'], {}), '(std2 ** 2)\n', (26607, 26618), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((26621, 26629), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['ls2'], {}), '(ls2)\n', (26624, 26629), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((26653, 26678), 'sklearn.gaussian_process.kernels.ConstantKernel', 'ConstantKernel', (['(std2 ** 2)'], {}), '(std2 ** 2)\n', (26667, 26678), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((26681, 26689), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (['ls1'], {}), '(ls1)\n', (26684, 26689), False, 'from sklearn.gaussian_process.kernels import RBF, ConstantKernel, Kernel\n'), ((31321, 31371), 'gsum.coefficients', 'gm.coefficients', (['y2', 'ratio_vals', 'ref2_vals', 'orders'], {}), '(y2, ratio_vals, ref2_vals, orders)\n', (31336, 31371), True, 'import gsum as gm\n'), ((31389, 31444), 'gsum.coefficients', 'gm.coefficients', (['(y3 - y2)', 'ratio_vals', 'ref3_vals', 'orders'], {}), '(y3 - y2, ratio_vals, ref3_vals, orders)\n', (31404, 31444), True, 'import gsum as gm\n'), ((32427, 32447), 'numpy.array', 'np.array', (['orders_all'], {}), '(orders_all)\n', (32435, 32447), True, 'import numpy as np\n'), ((32521, 32570), 'gsum.partials', 'gm.partials', (['c', 'ratio_vals', 'ref2_vals', 'orders_all'], {}), '(c, ratio_vals, ref2_vals, orders_all)\n', (32532, 32570), True, 'import gsum as gm\n'), ((34655, 34692), 'numpy.ones_like', 'np.ones_like', (['self.orders'], {'dtype': 'bool'}), '(self.orders, dtype=bool)\n', (34667, 34692), True, 'import numpy as np\n'), ((34939, 34971), 'numpy.ones_like', 'np.ones_like', (['orders'], {'dtype': 'bool'}), '(orders, dtype=bool)\n', (34951, 34971), True, 'import numpy as np\n'), ((38400, 38418), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['name'], {}), '(name)\n', (38412, 38418), True, 'import matplotlib.pyplot as plt\n'), ((43449, 43484), 'numpy.linspace', 'np.linspace', (['ls_min', 'ls_max', 'ls_num'], {}), '(ls_min, ls_max, ls_num)\n', (43460, 43484), True, 'import numpy as np\n'), ((44634, 44661), 'gsum.cartesian', 'gm.cartesian', (['ls', 'breakdown'], {}), '(ls, breakdown)\n', (44646, 44661), True, 'import gsum as gm\n'), ((44685, 44755), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': "['$\\\\ell$ [fm$^{-1}$]', '$\\\\Lambda_b$ [MeV]']"}), "(X, columns=['$\\\\ell$ [fm$^{-1}$]', '$\\\\Lambda_b$ [MeV]'])\n", (44697, 44755), True, 'import pandas as pd\n'), ((45068, 45088), 'numpy.argmax', 'np.argmax', (['joint_pdf'], {}), '(joint_pdf)\n', (45077, 45088), True, 'import numpy as np\n'), ((45111, 45153), 'numpy.unravel_index', 'np.unravel_index', (['map_idx', 'joint_pdf.shape'], {}), '(map_idx, joint_pdf.shape)\n', (45127, 45153), True, 'import numpy as np\n'), ((45424, 45460), 'pandas.concat', 'pd.concat', (['dfs_ls'], {'ignore_index': '(True)'}), '(dfs_ls, ignore_index=True)\n', (45433, 45460), True, 'import pandas as pd\n'), ((48679, 48712), 'numpy.argwhere', 'np.argwhere', (['(self.orders == order)'], {}), '(self.orders == order)\n', (48690, 48712), True, 'import numpy as np\n'), ((49835, 49863), 'numpy.argwhere', 'np.argwhere', (['(orders == order)'], {}), '(orders == order)\n', (49846, 49863), True, 'import numpy as np\n'), ((50805, 50829), 'numpy.atleast_1d', 'np.atleast_1d', (['ref2_vals'], {}), '(ref2_vals)\n', (50818, 50829), True, 'import numpy as np\n'), ((50854, 50878), 'numpy.atleast_1d', 'np.atleast_1d', (['ref3_vals'], {}), '(ref3_vals)\n', (50867, 50878), True, 'import numpy as np\n'), ((58300, 58328), 'numpy.sqrt', 'np.sqrt', (['model.cbar_sq_mean_'], {}), '(model.cbar_sq_mean_)\n', (58307, 58328), True, 'import numpy as np\n'), ((58751, 58783), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.4, 3.4)'}), '(figsize=(3.4, 3.4))\n', (58763, 58783), True, 'import matplotlib.pyplot as plt\n'), ((59141, 59181), 'gsum.ConjugateGaussianProcess', 'gm.ConjugateGaussianProcess', ([], {}), '(**gp_kwargs)\n', (59168, 59181), True, 'import gsum as gm\n'), ((59805, 59833), 'numpy.sqrt', 'np.sqrt', (['model.cbar_sq_mean_'], {}), '(model.cbar_sq_mean_)\n', (59812, 59833), True, 'import numpy as np\n'), ((62444, 62453), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (62451, 62453), True, 'import matplotlib.pyplot as plt\n'), ((63400, 63417), 'numpy.atleast_2d', 'np.atleast_2d', (['ax'], {}), '(ax)\n', (63413, 63417), True, 'import numpy as np\n'), ((69116, 69125), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (69123, 69125), True, 'import matplotlib.pyplot as plt\n'), ((69677, 69686), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (69684, 69686), True, 'import matplotlib.pyplot as plt\n'), ((71601, 71631), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(1, 3.2)'}), '(figsize=(1, 3.2))\n', (71613, 71631), True, 'import matplotlib.pyplot as plt\n'), ((71993, 72018), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (72004, 72018), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((72395, 72408), 'numpy.ceil', 'np.ceil', (['ymax'], {}), '(ymax)\n', (72402, 72408), True, 'import numpy as np\n'), ((72514, 72523), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (72521, 72523), True, 'import matplotlib.pyplot as plt\n'), ((73620, 73652), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.2, 3.2)'}), '(figsize=(3.2, 3.2))\n', (73632, 73652), True, 'import matplotlib.pyplot as plt\n'), ((74001, 74066), 'matplotlib.pyplot.rc_context', 'plt.rc_context', (["{'text.usetex': True, 'text.latex.preview': True}"], {}), "({'text.usetex': True, 'text.latex.preview': True})\n", (74015, 74066), True, 'import matplotlib.pyplot as plt\n'), ((74662, 74671), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (74669, 74671), True, 'import matplotlib.pyplot as plt\n'), ((75962, 76015), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 3.2)', 'constrained_layout': '(True)'}), '(figsize=(7, 3.2), constrained_layout=True)\n', (75972, 76015), True, 'import matplotlib.pyplot as plt\n'), ((77673, 77705), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.2, 3.2)'}), '(figsize=(3.2, 3.2))\n', (77685, 77705), True, 'import matplotlib.pyplot as plt\n'), ((78854, 78864), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (78861, 78864), True, 'import numpy as np\n'), ((81821, 81830), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (81828, 81830), True, 'import matplotlib.pyplot as plt\n'), ((82338, 82380), 'numpy.argwhere', 'np.argwhere', (['(self.orders_original == order)'], {}), '(self.orders_original == order)\n', (82349, 82380), True, 'import numpy as np\n'), ((83052, 83064), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (83059, 83064), True, 'import numpy as np\n'), ((83470, 83484), 'numpy.mean', 'np.mean', (['x_min'], {}), '(x_min)\n', (83477, 83484), True, 'import numpy as np\n'), ((83493, 83506), 'numpy.std', 'np.std', (['x_min'], {}), '(x_min)\n', (83499, 83506), True, 'import numpy as np\n'), ((83528, 83542), 'numpy.mean', 'np.mean', (['y_min'], {}), '(y_min)\n', (83535, 83542), True, 'import numpy as np\n'), ((83551, 83564), 'numpy.std', 'np.std', (['y_min'], {}), '(y_min)\n', (83557, 83564), True, 'import numpy as np\n'), ((83659, 83679), 'numpy.cov', 'np.cov', (['x_min', 'y_min'], {}), '(x_min, y_min)\n', (83665, 83679), True, 'import numpy as np\n'), ((86343, 86352), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (86350, 86352), True, 'import matplotlib.pyplot as plt\n'), ((88756, 88790), 'numpy.zeros', 'np.zeros', (['(nx, ny, 1)'], {'dtype': 'float'}), '((nx, ny, 1), dtype=float)\n', (88764, 88790), True, 'import numpy as np\n'), ((11900, 11916), 'numpy.max', 'np.max', (['pdf_vals'], {}), '(pdf_vals)\n', (11906, 11916), True, 'import numpy as np\n'), ((22394, 22406), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (22402, 22406), True, 'import numpy as np\n'), ((32388, 32399), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (32396, 32399), True, 'import numpy as np\n'), ((34736, 34766), 'numpy.isin', 'np.isin', (['self.orders', 'excluded'], {}), '(self.orders, excluded)\n', (34743, 34766), True, 'import numpy as np\n'), ((35024, 35049), 'numpy.isin', 'np.isin', (['orders', 'excluded'], {}), '(orders, excluded)\n', (35031, 35049), True, 'import numpy as np\n'), ((56299, 56309), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (56307, 56309), True, 'import matplotlib.pyplot as plt\n'), ((59311, 59339), 'numpy.sqrt', 'np.sqrt', (['model.cbar_sq_mean_'], {}), '(model.cbar_sq_mean_)\n', (59318, 59339), True, 'import numpy as np\n'), ((59399, 59439), 'gsum.ConjugateGaussianProcess', 'gm.ConjugateGaussianProcess', ([], {}), '(**gp_kwargs)\n', (59426, 59439), True, 'import gsum as gm\n'), ((62663, 62696), 'os.path.relpath', 'path.relpath', (['name', 'self.fig_path'], {}), '(name, self.fig_path)\n', (62675, 62696), False, 'from os import path\n'), ((63184, 63248), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(3.4, 3.4)', 'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, figsize=(3.4, 3.4), sharex=True, sharey=True)\n', (63196, 63248), True, 'import matplotlib.pyplot as plt\n'), ((63293, 63325), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.4, 3.4)'}), '(figsize=(3.4, 3.4))\n', (63305, 63325), True, 'import matplotlib.pyplot as plt\n'), ((63349, 63366), 'numpy.atleast_2d', 'np.atleast_2d', (['ax'], {}), '(ax)\n', (63362, 63366), True, 'import numpy as np\n'), ((68059, 68079), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.2)'], {}), '(0.2)\n', (68074, 68079), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((69902, 69935), 'os.path.relpath', 'path.relpath', (['name', 'self.fig_path'], {}), '(name, self.fig_path)\n', (69914, 69935), False, 'from os import path\n'), ((70716, 70749), 'os.path.relpath', 'path.relpath', (['name', 'self.fig_path'], {}), '(name, self.fig_path)\n', (70728, 70749), False, 'from os import path\n'), ((72744, 72777), 'os.path.relpath', 'path.relpath', (['name', 'self.fig_path'], {}), '(name, self.fig_path)\n', (72756, 72777), False, 'from os import path\n'), ((77319, 77352), 'os.path.relpath', 'path.relpath', (['name', 'self.fig_path'], {}), '(name, self.fig_path)\n', (77331, 77352), False, 'from os import path\n'), ((83911, 83941), 'numpy.column_stack', 'np.column_stack', (['(x_all, pred)'], {}), '((x_all, pred))\n', (83926, 83941), True, 'import numpy as np\n'), ((83955, 83996), 'numpy.column_stack', 'np.column_stack', (['(x_all, pred + 2 * stdv)'], {}), '((x_all, pred + 2 * stdv))\n', (83970, 83996), True, 'import numpy as np\n'), ((84010, 84051), 'numpy.column_stack', 'np.column_stack', (['(x_all, pred - 2 * stdv)'], {}), '((x_all, pred - 2 * stdv))\n', (84025, 84051), True, 'import numpy as np\n'), ((86709, 86751), 'numpy.argwhere', 'np.argwhere', (['(self.orders_original == order)'], {}), '(self.orders_original == order)\n', (86720, 86751), True, 'import numpy as np\n'), ((6419, 6465), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores', 'prefer': '"""processes"""'}), "(n_jobs=num_cores, prefer='processes')\n", (6427, 6465), False, 'from joblib import Parallel, delayed\n'), ((22883, 22902), 'numpy.around', 'np.around', (['fweights'], {}), '(fweights)\n', (22892, 22902), True, 'import numpy as np\n'), ((43895, 43931), 'numpy.array', 'np.array', (['[breakdown, breakdown_pdf]'], {}), '([breakdown, breakdown_pdf])\n', (43903, 43931), True, 'import numpy as np\n'), ((51343, 51363), 'numpy.eye', 'np.eye', (['cov.shape[0]'], {}), '(cov.shape[0])\n', (51349, 51363), True, 'import numpy as np\n'), ((55321, 55341), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (55336, 55341), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((55386, 55405), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(2)'], {}), '(2)\n', (55402, 55405), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((56196, 56217), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.02)'], {}), '(0.02)\n', (56211, 56217), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((56262, 56281), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(2)'], {}), '(2)\n', (56278, 56281), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((57612, 57622), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (57619, 57622), True, 'import numpy as np\n'), ((68757, 68776), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(2)'], {}), '(2)\n', (68773, 68776), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((68821, 68840), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', (['(2)'], {}), '(2)\n', (68837, 68840), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((72374, 72388), 'numpy.floor', 'np.floor', (['ymin'], {}), '(ymin)\n', (72382, 72388), True, 'import numpy as np\n'), ((75010, 75043), 'os.path.relpath', 'path.relpath', (['name', 'self.fig_path'], {}), '(name, self.fig_path)\n', (75022, 75043), False, 'from os import path\n'), ((79218, 79238), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (79226, 79238), True, 'import numpy as np\n'), ((83601, 83615), 'numpy.mean', 'np.mean', (['x_min'], {}), '(x_min)\n', (83608, 83615), True, 'import numpy as np\n'), ((83617, 83631), 'numpy.mean', 'np.mean', (['y_min'], {}), '(y_min)\n', (83624, 83631), True, 'import numpy as np\n'), ((88462, 88473), 'numpy.ones', 'np.ones', (['ix'], {}), '(ix)\n', (88469, 88473), True, 'import numpy as np\n'), ((88487, 88498), 'numpy.ones', 'np.ones', (['ix'], {}), '(ix)\n', (88494, 88498), True, 'import numpy as np\n'), ((88546, 88557), 'numpy.ones', 'np.ones', (['iy'], {}), '(iy)\n', (88553, 88557), True, 'import numpy as np\n'), ((88571, 88582), 'numpy.ones', 'np.ones', (['iy'], {}), '(iy)\n', (88578, 88582), True, 'import numpy as np\n'), ((7730, 7751), 'numpy.exp', 'np.exp', (['(-0.5 * r ** 2)'], {}), '(-0.5 * r ** 2)\n', (7736, 7751), True, 'import numpy as np\n'), ((44308, 44330), 'numpy.array', 'np.array', (['[ls, ls_pdf]'], {}), '([ls, ls_pdf])\n', (44316, 44330), True, 'import numpy as np\n'), ((66611, 66643), 'numpy.sqrt', 'np.sqrt', (['(std ** 2 + std_3bf ** 2)'], {}), '(std ** 2 + std_3bf ** 2)\n', (66618, 66643), True, 'import numpy as np\n'), ((68601, 68621), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.1)'], {}), '(0.1)\n', (68616, 68621), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((68692, 68712), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.2)'], {}), '(0.2)\n', (68707, 68712), False, 'from matplotlib.ticker import MultipleLocator, AutoMinorLocator, MaxNLocator\n'), ((6479, 6517), 'joblib.delayed', 'delayed', (['model.log_marginal_likelihood'], {}), '(model.log_marginal_likelihood)\n', (6486, 6517), False, 'from joblib import Parallel, delayed\n'), ((7757, 7778), 'numpy.arange', 'np.arange', (['(9)', '(0)', '(-0.5)'], {}), '(9, 0, -0.5)\n', (7766, 7778), True, 'import numpy as np\n'), ((6525, 6536), 'numpy.log', 'np.log', (['ls_'], {}), '(ls_)\n', (6531, 6536), True, 'import numpy as np\n')]
|
# Standard imports
import keras as k
import tensorflow as tf
import numpy as np
"""
NOTE:
All functions in this file are directly adopted from continuum mechanics fundamentals
and hence do not need further introduction. In order to not inflate this file
unnecessarily with comments (and sacrificing readability), we decided to mostly omit
the type of comment blocks we put in all other functions of this project. These
comment blocks are only present when we deemed them necessary.
Explanations to the continuum mechanics basics utilized can be found in the article
and/or standard textbooks on the subject.
The functions in this file can be sorted into three groups:
- basic continuum mechanics functions
- collective continuum mechanics functions (that chain basic functions in a meaningful
order)
- Simple analytical strain-energy functions (for test runs or as artificial data
sources)
"""
##########################################################################################
##########################################################################################
###### BASIC CONTINUUM MECHANICS FUNCTIONS ###############################################
##########################################################################################
##########################################################################################
def wrapper(numTens, numDir):
"""
Returns all basic continuum mechanics functions which have a direct dependency on the
number of generalized structural tensors or preferred directions to use. The returned
functions are tailored to the desired specifications.
Parameters
----------
numTens : int
Number of generalized structural tensors to use (at least 1).
numDir : int
Number of preferred directions to use (0 for isotropy, more than 0 for anisotropy).
Returns
-------
ten2_H : function
A function for generalized structural tensors.
invariants_I : function
A function for generalized invariants I.
invariants_J : function
A function for generalized invariants J.
"""
def ten2_H(L, w): # Generalized structural tensors: H_r = \sum_i w_ri * L_i [?,numTens,3,3]
batchSize = tf.shape(w)[0]
# Create L_0 and add it to L
shaper = batchSize*tf.constant([1,0,0,0]) + tf.constant([0,1,1,1])
L_0 = 1.0/3.0 * tf.tile(tf.keras.backend.expand_dims(tf.keras.backend.expand_dims(tf.eye(3),0),0), shaper)
if numDir > 0:
L = tf.concat([L_0, L], axis=1)
else:
L = L_0
# Expand L (to get one for each numTens)
shaper = numTens*tf.constant([0,1,0,0,0]) + tf.constant([1,0,1,1,1])
L = tf.tile(tf.keras.backend.expand_dims(L, 1), shaper)
# Expand w
shaper = tf.constant([1,1,1,3])
w = tf.tile(tf.keras.backend.expand_dims(w, 3), shaper)
shaper = tf.constant([1,1,1,1,3])
w = tf.tile(tf.keras.backend.expand_dims(w, 4), shaper)
# Multiply L with weights
L_weighted = tf.math.multiply(L, w)
# Sum them up for the corresponding H
H = tf.math.reduce_sum(L_weighted, axis=2)
return H
def invariants_I(C, H): # Generalized invariants I: I_r = trace(C*H_r) [?,numTens]
shaper = tf.constant([1,numTens,1,1])
C_tile = tf.tile(tf.keras.backend.expand_dims(C, 1), shaper)
return tf.linalg.trace(tf.matmul(C_tile,H))
def invariants_J(C, H): # Generalized invariants J: J_r = trace(cofactor(C)*H_r) [?,numTens]
shaper = tf.constant([1,numTens,1,1])
C_tile = tf.tile(tf.keras.backend.expand_dims(C, 1), shaper)
detC_tile = tf.linalg.det(C_tile)
shaper = tf.constant([1,1,3])
detC_tile = tf.tile(tf.keras.backend.expand_dims(detC_tile, 2), shaper)
shaper = tf.constant([1,1,1,3])
detC_tile = tf.tile(tf.keras.backend.expand_dims(detC_tile, 3), shaper)
invTransC = tf.linalg.inv(tf.transpose(C_tile, perm=[0, 1, 3, 2]))
mul = tf.math.multiply(detC_tile, invTransC)
matmul = tf.matmul(mul, H)
return tf.linalg.trace(matmul)
return ten2_H, invariants_I, invariants_J
def defGrad_ut(lam): # Deformation gradient for incompressible uniaxial tension loading [?,3,3]
F = np.zeros([len(lam), 3, 3])
F[:,0,0] = lam
F[:,1,1] = 1.0/(np.sqrt(lam))
F[:,2,2] = 1.0/(np.sqrt(lam))
return F
def defGrad_bt(lam): # Deformation gradient for incompressible equi-biaxial loading [?,3,3]
F = np.zeros([len(lam), 3, 3])
F[:,0,0] = lam
F[:,1,1] = lam
F[:,2,2] = 1.0/lam**2
return F
def defGrad_ps(lam): # Deformation gradient for incompressible pure shear loading [?,3,3]
F = np.zeros([len(lam), 3, 3])
F[:,0,0] = lam
F[:,1,1] = 1/lam
F[:,2,2] = 1.0
return F
def ten2_C(F): # Right Cauchy-Green tensor: C = F^T * F [?,3,3]
return tf.linalg.matmul(F,F,transpose_a=True)
def ten2_F_isoRef(F): # Deformation gradient in reference configuration [?,3,3]
# In Order for the other formulae to work we need the correct dimension required to produce enough eye matrices/tensors
shaper = tf.shape(F)[0]
shaper = shaper*tf.constant([1,0,0]) + tf.constant([0,1,1])
F_isoRef = tf.tile(tf.keras.backend.expand_dims(tf.eye(3),0), shaper)
return F_isoRef
def ten2_L(dir): # Structural tensor L_i = l_i (x) l_i [?,numDir,3,3]
dir = tf.keras.backend.expand_dims(dir, 3)
dir_t = tf.transpose(dir, perm=[0, 1, 3, 2])
L = tf.linalg.matmul(dir, dir_t)
return L
def invariant_I3(C): # Third invariant of a tensor C: I3 = det(C) [?,1]
return tf.keras.backend.expand_dims(tf.linalg.det(C), 1)
def invariants2principalStretches(I1_arr, I2_arr, I3_arr): # Calculates the principal stretches based on invariants of C [only used for one specific kind of plot]
# Itskov, 2015, Tensor Algebra and Tensor Analysis for Engineers, 4th edition, p. 103-104
dim = I1_arr.shape
eig = np.empty((dim[0],3,), dtype=np.complex_)
eig[:,:] = np.NaN
for i in range(dim[0]):
I1 = I1_arr[i]
I2 = I2_arr[i]
I3 = I3_arr[i]
if np.abs(np.power(I1,2)-3.*I2) > 1e-6:
nom = 2.*np.power(I1,3) - 9.*np.multiply(I1,I2) + 27.*I3
denom = 2.*np.power(np.power(I1,2) - 3*I2,1.5)
theta = np.arccos(nom/denom)
for k in [1, 2, 3]:
eig[i,k-1] = (I1 + 2*np.sqrt(np.power(I1,2)-3.*I2)*np.cos((theta+2*np.pi*(k-1.))/3.))/3.
else:
for k in [1, 2, 3]:
eig[i,k-1] = I1/3. + 1./3.*np.power(27.*I3-np.power(I1,3), 1./3.) * (np.cos(2./3.*np.pi*k) + (0+1j)*np.sin(2./3.*np.pi*k))
principalStretch = np.sqrt(eig)
return principalStretch
def ten2_P(Psi, F): # First Piola Kirchhoff stress tensor: P = dPsi / dF [?,3,3]
der = tf.gradients(Psi, F, unconnected_gradients='zero')
return der[0]
def ten2_P_lagMul(P_iso, F, lagMul): # Lagrange multiplier for incompressibility [?,1]
FtransInv = tf.linalg.inv(tf.transpose(F, perm=[0, 2, 1]))
lagMul = tf.tile(tf.keras.backend.expand_dims(lagMul,2), tf.constant([1,3,3]))
lastTerm = tf.math.multiply(lagMul, FtransInv)
return tf.math.subtract(P_iso, lastTerm)
def ten2_S(P, F): # Second Piola Kirchhoff stress tensor: S = F^-1 * P [?,3,3]
return tf.matmul(tf.linalg.inv(F), P)
def ten2_sigma(P, F, J): # Cauchy stress tensor: sigma = J^-1 * P * F^T [?,3,3]
OneOverJ = tf.tile(tf.keras.backend.expand_dims(tf.math.divide(1.0,J),2), tf.constant([1,3,3]))
return tf.math.multiply(OneOverJ, tf.matmul(P, tf.transpose(F, perm=[0, 2, 1])))
##########################################################################################
##########################################################################################
###### COLLECTIVE CONTINUUM MECHANICS FUNCTIONS ##########################################
##########################################################################################
##########################################################################################
def pre_Psi(numExtra, numTens, numDir, w_model, dir_model): # Deals with everything before the strain-energy is used (deformation measures, structural tensors, invariants)
ten2_H, invariants_I, invariants_J = wrapper(numTens, numDir)
if numExtra == 0:
extra = []
else:
extra = k.layers.Input(shape=(numExtra,), name='extra') # INPUT
# Deformation measures
F = k.layers.Input(shape=(3,3,), name='F') # INPUT
C = k.layers.Lambda(lambda F: ten2_C(F), name='C' )(F)
# Directions and structure tensors
if numDir == 0:
dir = [] # we do not need directions (and hence their sub-ANN) at all
w = tf.ones([tf.shape(F)[0],numTens,1]) # we do not need a sub-ANN to get the weights
L = []
else:
dir = dir_model(extra)
w = w_model(extra)
L = k.layers.Lambda(lambda dir: ten2_L(dir), name='L')(dir)
# Generalized structure tensors
H = k.layers.Lambda(lambda x: ten2_H(x[0], x[1]), name='H')([L, w])
# Generalized invariants
inv_I = k.layers.Lambda(lambda x: invariants_I(x[0], x[1]), name='invariants_I' )([C,H])
inv_J = k.layers.Lambda(lambda x: invariants_J(x[0], x[1]), name='invariants_J' )([C,H])
inv_III_C = k.layers.Lambda(lambda C: invariant_I3(C) , name='invariant_III_C')(C)
# Determination of the eact reference configuration
F_isoRef = k.layers.Lambda(lambda F: ten2_F_isoRef(F), output_shape=(None,3,3), name='F_isoRef' )(F)
C_isoRef = k.layers.Lambda(lambda F: ten2_C(F) , name='C_isoRef' )(F_isoRef)
inv_I_isoRef = k.layers.Lambda(lambda x: invariants_I(x[0], x[1]) , name='invariants_I_isoRef' )([C_isoRef,H])
inv_J_isoRef = k.layers.Lambda(lambda x: invariants_J(x[0], x[1]) , name='invariants_J_isoRef' )([C_isoRef,H])
inv_III_C_isoRef = k.layers.Lambda(lambda C_isoRef: invariant_I3(C_isoRef) , name='invariant_III_C_isoRef')(C_isoRef)
return F, extra, C, inv_I, inv_J, inv_III_C, F_isoRef, C_isoRef, inv_I_isoRef, inv_J_isoRef, inv_III_C_isoRef
def post_Psi(Psi, F): # Deals with everything after the strain-energy is used [variant for compressible materials] (stresses)
P = k.layers.Lambda(lambda x: ten2_P(x[0], x[1]), name='P' )([Psi, F])
return post_Psi_both(Psi, P, F)
def post_Psi_incomp(Psi, Psi_isoRef, F, F_isoRef): # Deals with everything after the strain-energy is used [variant for incompressible materials] (stresses)
P_iso = k.layers.Lambda(lambda x: ten2_P(x[0], x[1]) , name='P_iso' )([Psi, F])
P_isoRef = k.layers.Lambda(lambda x: ten2_P(x[0], x[1]) , name='P_isoRef')([Psi_isoRef, F_isoRef])
lagMul = k.layers.Lambda(lambda P: tf.keras.backend.expand_dims(P[:,0,0],1), name='lagMul' )(P_isoRef)
P = k.layers.Lambda(lambda x: ten2_P_lagMul(x[0], x[1], x[2]) , name='P' )([P_iso, F, lagMul])
return post_Psi_both(Psi, P, F)
def post_Psi_both(Psi, P, F): # Common parts from post_Psi & post_Psi_incomp
S = k.layers.Lambda(lambda x: ten2_S(x[0], x[1]) , name='S' )([P, F])
J = k.layers.Lambda(lambda F: tf.keras.backend.expand_dims(tf.linalg.det(F),1), name='J' )(F)
sigma = k.layers.Lambda(lambda x: ten2_sigma(x[0], x[1], x[2]) , name='sigma')([P, F, J])
P11 = k.layers.Lambda(lambda P: tf.keras.backend.expand_dims(P[:,0,0],1) , name='P11' )(P)
return P11, P, S, sigma
##########################################################################################
##########################################################################################
###### ANALYTICAL STRAIN ENERGY DENSITY FUNCTIONS ########################################
##########################################################################################
##########################################################################################
def MooneyRivlin6term_wrapper(c10, c20, c30, c01, c02, c03):
def MooneyRivlin6term(I, J, I3):
I1 = I*3.0
I2 = J*3.0
Psi = k.layers.Lambda(lambda x: c10*(x[0]-3.0) + c20*(x[0]-3.0)**2 + c30*(x[0]-3.0)**3 + c01*(x[1]-3.0) + c02*(x[1]-3.0)**2 + c03*(x[1]-3.0)**3, name='Psi')([I1, I2, I3])
return Psi
return MooneyRivlin6term
def NeoHookean_wrapper(c):
def NeoHookean(I, J, I3):
I1 = I*3.0
I2 = J*3.0
Psi = k.layers.Lambda(lambda x: c*(x[0]-3.0), name='Psi')([I1, I2, I3])
return Psi
return NeoHookean
def MooneyRivlin_wrapper(c1, c2):
def MooneyRivlin(I, J, I3):
I1 = I*3.0
I2 = J*3.0
Psi = k.layers.Lambda(lambda x: c1*(x[0]-3.0) + c2*(x[1]-3.0), name='Psi')([I1, I2, I3])
return Psi
return MooneyRivlin
|
[
"numpy.sqrt",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.math.subtract",
"numpy.arccos",
"tensorflow.linalg.inv",
"tensorflow.math.divide",
"tensorflow.gradients",
"numpy.sin",
"tensorflow.keras.backend.expand_dims",
"tensorflow.eye",
"numpy.multiply",
"tensorflow.concat",
"tensorflow.math.multiply",
"numpy.empty",
"tensorflow.matmul",
"tensorflow.linalg.det",
"numpy.cos",
"tensorflow.linalg.matmul",
"numpy.power",
"keras.layers.Lambda",
"tensorflow.linalg.trace",
"keras.layers.Input",
"tensorflow.constant",
"tensorflow.math.reduce_sum"
] |
[((4611, 4651), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['F', 'F'], {'transpose_a': '(True)'}), '(F, F, transpose_a=True)\n', (4627, 4651), True, 'import tensorflow as tf\n'), ((5108, 5144), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['dir', '(3)'], {}), '(dir, 3)\n', (5136, 5144), True, 'import tensorflow as tf\n'), ((5154, 5190), 'tensorflow.transpose', 'tf.transpose', (['dir'], {'perm': '[0, 1, 3, 2]'}), '(dir, perm=[0, 1, 3, 2])\n', (5166, 5190), True, 'import tensorflow as tf\n'), ((5196, 5224), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['dir', 'dir_t'], {}), '(dir, dir_t)\n', (5212, 5224), True, 'import tensorflow as tf\n'), ((5652, 5692), 'numpy.empty', 'np.empty', (['(dim[0], 3)'], {'dtype': 'np.complex_'}), '((dim[0], 3), dtype=np.complex_)\n', (5660, 5692), True, 'import numpy as np\n'), ((6269, 6281), 'numpy.sqrt', 'np.sqrt', (['eig'], {}), '(eig)\n', (6276, 6281), True, 'import numpy as np\n'), ((6397, 6447), 'tensorflow.gradients', 'tf.gradients', (['Psi', 'F'], {'unconnected_gradients': '"""zero"""'}), "(Psi, F, unconnected_gradients='zero')\n", (6409, 6447), True, 'import tensorflow as tf\n'), ((6704, 6739), 'tensorflow.math.multiply', 'tf.math.multiply', (['lagMul', 'FtransInv'], {}), '(lagMul, FtransInv)\n', (6720, 6739), True, 'import tensorflow as tf\n'), ((6749, 6782), 'tensorflow.math.subtract', 'tf.math.subtract', (['P_iso', 'lastTerm'], {}), '(P_iso, lastTerm)\n', (6765, 6782), True, 'import tensorflow as tf\n'), ((7994, 8032), 'keras.layers.Input', 'k.layers.Input', ([], {'shape': '(3, 3)', 'name': '"""F"""'}), "(shape=(3, 3), name='F')\n", (8008, 8032), True, 'import keras as k\n'), ((2682, 2707), 'tensorflow.constant', 'tf.constant', (['[1, 1, 1, 3]'], {}), '([1, 1, 1, 3])\n', (2693, 2707), True, 'import tensorflow as tf\n'), ((2774, 2802), 'tensorflow.constant', 'tf.constant', (['[1, 1, 1, 1, 3]'], {}), '([1, 1, 1, 1, 3])\n', (2785, 2802), True, 'import tensorflow as tf\n'), ((2902, 2924), 'tensorflow.math.multiply', 'tf.math.multiply', (['L', 'w'], {}), '(L, w)\n', (2918, 2924), True, 'import tensorflow as tf\n'), ((2973, 3011), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['L_weighted'], {'axis': '(2)'}), '(L_weighted, axis=2)\n', (2991, 3011), True, 'import tensorflow as tf\n'), ((3123, 3154), 'tensorflow.constant', 'tf.constant', (['[1, numTens, 1, 1]'], {}), '([1, numTens, 1, 1])\n', (3134, 3154), True, 'import tensorflow as tf\n'), ((3370, 3401), 'tensorflow.constant', 'tf.constant', (['[1, numTens, 1, 1]'], {}), '([1, numTens, 1, 1])\n', (3381, 3401), True, 'import tensorflow as tf\n'), ((3479, 3500), 'tensorflow.linalg.det', 'tf.linalg.det', (['C_tile'], {}), '(C_tile)\n', (3492, 3500), True, 'import tensorflow as tf\n'), ((3512, 3534), 'tensorflow.constant', 'tf.constant', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (3523, 3534), True, 'import tensorflow as tf\n'), ((3618, 3643), 'tensorflow.constant', 'tf.constant', (['[1, 1, 1, 3]'], {}), '([1, 1, 1, 3])\n', (3629, 3643), True, 'import tensorflow as tf\n'), ((3798, 3836), 'tensorflow.math.multiply', 'tf.math.multiply', (['detC_tile', 'invTransC'], {}), '(detC_tile, invTransC)\n', (3814, 3836), True, 'import tensorflow as tf\n'), ((3848, 3865), 'tensorflow.matmul', 'tf.matmul', (['mul', 'H'], {}), '(mul, H)\n', (3857, 3865), True, 'import tensorflow as tf\n'), ((3878, 3901), 'tensorflow.linalg.trace', 'tf.linalg.trace', (['matmul'], {}), '(matmul)\n', (3893, 3901), True, 'import tensorflow as tf\n'), ((4110, 4122), 'numpy.sqrt', 'np.sqrt', (['lam'], {}), '(lam)\n', (4117, 4122), True, 'import numpy as np\n'), ((4141, 4153), 'numpy.sqrt', 'np.sqrt', (['lam'], {}), '(lam)\n', (4148, 4153), True, 'import numpy as np\n'), ((4863, 4874), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (4871, 4874), True, 'import tensorflow as tf\n'), ((4918, 4940), 'tensorflow.constant', 'tf.constant', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (4929, 4940), True, 'import tensorflow as tf\n'), ((5347, 5363), 'tensorflow.linalg.det', 'tf.linalg.det', (['C'], {}), '(C)\n', (5360, 5363), True, 'import tensorflow as tf\n'), ((6579, 6610), 'tensorflow.transpose', 'tf.transpose', (['F'], {'perm': '[0, 2, 1]'}), '(F, perm=[0, 2, 1])\n', (6591, 6610), True, 'import tensorflow as tf\n'), ((6630, 6669), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['lagMul', '(2)'], {}), '(lagMul, 2)\n', (6658, 6669), True, 'import tensorflow as tf\n'), ((6670, 6692), 'tensorflow.constant', 'tf.constant', (['[1, 3, 3]'], {}), '([1, 3, 3])\n', (6681, 6692), True, 'import tensorflow as tf\n'), ((6881, 6897), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['F'], {}), '(F)\n', (6894, 6897), True, 'import tensorflow as tf\n'), ((7058, 7080), 'tensorflow.constant', 'tf.constant', (['[1, 3, 3]'], {}), '([1, 3, 3])\n', (7069, 7080), True, 'import tensorflow as tf\n'), ((7907, 7954), 'keras.layers.Input', 'k.layers.Input', ([], {'shape': '(numExtra,)', 'name': '"""extra"""'}), "(shape=(numExtra,), name='extra')\n", (7921, 7954), True, 'import keras as k\n'), ((2188, 2199), 'tensorflow.shape', 'tf.shape', (['w'], {}), '(w)\n', (2196, 2199), True, 'import tensorflow as tf\n'), ((2281, 2306), 'tensorflow.constant', 'tf.constant', (['[0, 1, 1, 1]'], {}), '([0, 1, 1, 1])\n', (2292, 2306), True, 'import tensorflow as tf\n'), ((2437, 2464), 'tensorflow.concat', 'tf.concat', (['[L_0, L]'], {'axis': '(1)'}), '([L_0, L], axis=1)\n', (2446, 2464), True, 'import tensorflow as tf\n'), ((2574, 2602), 'tensorflow.constant', 'tf.constant', (['[1, 0, 1, 1, 1]'], {}), '([1, 0, 1, 1, 1])\n', (2585, 2602), True, 'import tensorflow as tf\n'), ((2613, 2647), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['L', '(1)'], {}), '(L, 1)\n', (2641, 2647), True, 'import tensorflow as tf\n'), ((2719, 2753), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['w', '(3)'], {}), '(w, 3)\n', (2747, 2753), True, 'import tensorflow as tf\n'), ((2813, 2847), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['w', '(4)'], {}), '(w, 4)\n', (2841, 2847), True, 'import tensorflow as tf\n'), ((3171, 3205), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['C', '(1)'], {}), '(C, 1)\n', (3199, 3205), True, 'import tensorflow as tf\n'), ((3241, 3261), 'tensorflow.matmul', 'tf.matmul', (['C_tile', 'H'], {}), '(C_tile, H)\n', (3250, 3261), True, 'import tensorflow as tf\n'), ((3418, 3452), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['C', '(1)'], {}), '(C, 1)\n', (3446, 3452), True, 'import tensorflow as tf\n'), ((3555, 3597), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['detC_tile', '(2)'], {}), '(detC_tile, 2)\n', (3583, 3597), True, 'import tensorflow as tf\n'), ((3663, 3705), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['detC_tile', '(3)'], {}), '(detC_tile, 3)\n', (3691, 3705), True, 'import tensorflow as tf\n'), ((3746, 3785), 'tensorflow.transpose', 'tf.transpose', (['C_tile'], {'perm': '[0, 1, 3, 2]'}), '(C_tile, perm=[0, 1, 3, 2])\n', (3758, 3785), True, 'import tensorflow as tf\n'), ((4895, 4917), 'tensorflow.constant', 'tf.constant', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4906, 4917), True, 'import tensorflow as tf\n'), ((4989, 4998), 'tensorflow.eye', 'tf.eye', (['(3)'], {}), '(3)\n', (4995, 4998), True, 'import tensorflow as tf\n'), ((5952, 5974), 'numpy.arccos', 'np.arccos', (['(nom / denom)'], {}), '(nom / denom)\n', (5961, 5974), True, 'import numpy as np\n'), ((7032, 7054), 'tensorflow.math.divide', 'tf.math.divide', (['(1.0)', 'J'], {}), '(1.0, J)\n', (7046, 7054), True, 'import tensorflow as tf\n'), ((7128, 7159), 'tensorflow.transpose', 'tf.transpose', (['F'], {'perm': '[0, 2, 1]'}), '(F, perm=[0, 2, 1])\n', (7140, 7159), True, 'import tensorflow as tf\n'), ((11662, 11853), 'keras.layers.Lambda', 'k.layers.Lambda', (['(lambda x: c10 * (x[0] - 3.0) + c20 * (x[0] - 3.0) ** 2 + c30 * (x[0] - 3.0\n ) ** 3 + c01 * (x[1] - 3.0) + c02 * (x[1] - 3.0) ** 2 + c03 * (x[1] - \n 3.0) ** 3)'], {'name': '"""Psi"""'}), "(lambda x: c10 * (x[0] - 3.0) + c20 * (x[0] - 3.0) ** 2 + \n c30 * (x[0] - 3.0) ** 3 + c01 * (x[1] - 3.0) + c02 * (x[1] - 3.0) ** 2 +\n c03 * (x[1] - 3.0) ** 3, name='Psi')\n", (11677, 11853), True, 'import keras as k\n'), ((11958, 12013), 'keras.layers.Lambda', 'k.layers.Lambda', (['(lambda x: c * (x[0] - 3.0))'], {'name': '"""Psi"""'}), "(lambda x: c * (x[0] - 3.0), name='Psi')\n", (11973, 12013), True, 'import keras as k\n'), ((12157, 12233), 'keras.layers.Lambda', 'k.layers.Lambda', (['(lambda x: c1 * (x[0] - 3.0) + c2 * (x[1] - 3.0))'], {'name': '"""Psi"""'}), "(lambda x: c1 * (x[0] - 3.0) + c2 * (x[1] - 3.0), name='Psi')\n", (12172, 12233), True, 'import keras as k\n'), ((2256, 2281), 'tensorflow.constant', 'tf.constant', (['[1, 0, 0, 0]'], {}), '([1, 0, 0, 0])\n', (2267, 2281), True, 'import tensorflow as tf\n'), ((2547, 2575), 'tensorflow.constant', 'tf.constant', (['[0, 1, 0, 0, 0]'], {}), '([0, 1, 0, 0, 0])\n', (2558, 2575), True, 'import tensorflow as tf\n'), ((10328, 10371), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['P[:, 0, 0]', '(1)'], {}), '(P[:, 0, 0], 1)\n', (10356, 10371), True, 'import tensorflow as tf\n'), ((10980, 11023), 'tensorflow.keras.backend.expand_dims', 'tf.keras.backend.expand_dims', (['P[:, 0, 0]', '(1)'], {}), '(P[:, 0, 0], 1)\n', (11008, 11023), True, 'import tensorflow as tf\n'), ((5801, 5816), 'numpy.power', 'np.power', (['I1', '(2)'], {}), '(I1, 2)\n', (5809, 5816), True, 'import numpy as np\n'), ((8239, 8250), 'tensorflow.shape', 'tf.shape', (['F'], {}), '(F)\n', (8247, 8250), True, 'import tensorflow as tf\n'), ((10797, 10813), 'tensorflow.linalg.det', 'tf.linalg.det', (['F'], {}), '(F)\n', (10810, 10813), True, 'import tensorflow as tf\n'), ((2388, 2397), 'tensorflow.eye', 'tf.eye', (['(3)'], {}), '(3)\n', (2394, 2397), True, 'import tensorflow as tf\n'), ((5843, 5858), 'numpy.power', 'np.power', (['I1', '(3)'], {}), '(I1, 3)\n', (5851, 5858), True, 'import numpy as np\n'), ((5863, 5882), 'numpy.multiply', 'np.multiply', (['I1', 'I2'], {}), '(I1, I2)\n', (5874, 5882), True, 'import numpy as np\n'), ((5914, 5929), 'numpy.power', 'np.power', (['I1', '(2)'], {}), '(I1, 2)\n', (5922, 5929), True, 'import numpy as np\n'), ((6052, 6097), 'numpy.cos', 'np.cos', (['((theta + 2 * np.pi * (k - 1.0)) / 3.0)'], {}), '((theta + 2 * np.pi * (k - 1.0)) / 3.0)\n', (6058, 6097), True, 'import numpy as np\n'), ((6194, 6223), 'numpy.cos', 'np.cos', (['(2.0 / 3.0 * np.pi * k)'], {}), '(2.0 / 3.0 * np.pi * k)\n', (6200, 6223), True, 'import numpy as np\n'), ((6225, 6254), 'numpy.sin', 'np.sin', (['(2.0 / 3.0 * np.pi * k)'], {}), '(2.0 / 3.0 * np.pi * k)\n', (6231, 6254), True, 'import numpy as np\n'), ((6168, 6183), 'numpy.power', 'np.power', (['I1', '(3)'], {}), '(I1, 3)\n', (6176, 6183), True, 'import numpy as np\n'), ((6030, 6045), 'numpy.power', 'np.power', (['I1', '(2)'], {}), '(I1, 2)\n', (6038, 6045), True, 'import numpy as np\n')]
|
#------------------------------------------------------------------------------
# IMPORT NECESSARY MODULES
#------------------------------------------------------------------------------
print (' ABOUT to Start Simulation:- Importing Modules')
import anuga, anuga.parallel, numpy, time, os, glob
from anuga.operators.rate_operators import Polygonal_rate_operator
from anuga import file_function, Polygon_function, read_polygon, create_mesh_from_regions, Domain, Inlet_operator
import anuga.utilities.spatialInputUtil as su
from anuga import distribute, myid, numprocs, finalize, barrier
from anuga.parallel.parallel_operator_factory import Inlet_operator, Boyd_box_operator, Boyd_pipe_operator
from anuga import Rate_operator
#------------------------------------------------------------------------------
# FILENAMES, MODEL DOMAIN and VARIABLES
#------------------------------------------------------------------------------
basename = 'terrain'
outname = 'boyd_pipe'
meshname = 'terrain.msh'
W=296600.
N=6180070.
E=296730.
S=6179960.
#------------------------------------------------------------------------------
# CREATING MESH
#------------------------------------------------------------------------------
bounding_polygon = [[W, S], [E, S], [E, N], [W, N]]
create_mesh_from_regions(bounding_polygon,
boundary_tags={'south': [0], 'east': [1], 'north': [2], 'west': [3]},
maximum_triangle_area=1.0,
filename=meshname,
use_cache=False,
verbose=True)
#------------------------------------------------------------------------------
# SETUP COMPUTATIONAL DOMAIN
#------------------------------------------------------------------------------
domain = Domain(meshname, use_cache=False, verbose=True)
domain.set_minimum_storable_height(0.0001)
domain.set_name(outname)
print (domain.statistics())
#------------------------------------------------------------------------------
# APPLY MANNING'S ROUGHNESSES
#------------------------------------------------------------------------------
domain.set_quantity('friction', 0.035)
domain.set_quantity('elevation', filename=basename+'.csv', use_cache=False, verbose=True, alpha=0.1)
#------------------------------------------------------------------------------
# BOYD PIPE CULVERT
#------------------------------------------------------------------------------
losses = {'inlet':0.5, 'outlet':1.0, 'bend':0.0, 'grate':0.0, 'pier': 0.0, 'other': 0.0}
ep0 = numpy.array([296660.390,6180017.186])
ep1 = numpy.array([296649.976,6180038.872])
invert_elevations=[12.40,12.20]
culvert = Boyd_pipe_operator(domain,
losses=losses,
diameter=1.0,
end_points=[ep0, ep1],
invert_elevations=invert_elevations,
use_momentum_jet=False,
use_velocity_head=False,
manning=0.013,
logging=True,
label='boyd_pipe',
verbose=False)
#------------------------------------------------------------------------------
# APPLY FLOW
#------------------------------------------------------------------------------
line=[[296669.258,6179974.191],[296677.321,6179976.449]]
anuga.parallel.Inlet_operator(domain, line, 1.0)
#------------------------------------------------------------------------------
# SETUP BOUNDARY CONDITIONS
#------------------------------------------------------------------------------
print ('Available boundary tags', domain.get_boundary_tags())
Br = anuga.Reflective_boundary(domain)
Bd = anuga.Dirichlet_boundary([0,0,0])
domain.set_boundary({'west': Bd, 'south': Br, 'north': Bd, 'east': Bd})
#------------------------------------------------------------------------------
# EVOLVE SYSTEM THROUGH TIME
#------------------------------------------------------------------------------
import time
t0 = time.time()
for t in domain.evolve(yieldstep = 1, finaltime = 4000):
print (domain.timestepping_statistics())
print (domain.boundary_statistics(quantities='stage'))
print ('Finished')
|
[
"anuga.Domain",
"anuga.Reflective_boundary",
"anuga.parallel.parallel_operator_factory.Boyd_pipe_operator",
"numpy.array",
"anuga.parallel.Inlet_operator",
"anuga.create_mesh_from_regions",
"time.time",
"anuga.Dirichlet_boundary"
] |
[((1311, 1508), 'anuga.create_mesh_from_regions', 'create_mesh_from_regions', (['bounding_polygon'], {'boundary_tags': "{'south': [0], 'east': [1], 'north': [2], 'west': [3]}", 'maximum_triangle_area': '(1.0)', 'filename': 'meshname', 'use_cache': '(False)', 'verbose': '(True)'}), "(bounding_polygon, boundary_tags={'south': [0],\n 'east': [1], 'north': [2], 'west': [3]}, maximum_triangle_area=1.0,\n filename=meshname, use_cache=False, verbose=True)\n", (1335, 1508), False, 'from anuga import file_function, Polygon_function, read_polygon, create_mesh_from_regions, Domain, Inlet_operator\n'), ((1733, 1780), 'anuga.Domain', 'Domain', (['meshname'], {'use_cache': '(False)', 'verbose': '(True)'}), '(meshname, use_cache=False, verbose=True)\n', (1739, 1780), False, 'from anuga import file_function, Polygon_function, read_polygon, create_mesh_from_regions, Domain, Inlet_operator\n'), ((2507, 2544), 'numpy.array', 'numpy.array', (['[296660.39, 6180017.186]'], {}), '([296660.39, 6180017.186])\n', (2518, 2544), False, 'import anuga, anuga.parallel, numpy, time, os, glob\n'), ((2553, 2591), 'numpy.array', 'numpy.array', (['[296649.976, 6180038.872]'], {}), '([296649.976, 6180038.872])\n', (2564, 2591), False, 'import anuga, anuga.parallel, numpy, time, os, glob\n'), ((2651, 2890), 'anuga.parallel.parallel_operator_factory.Boyd_pipe_operator', 'Boyd_pipe_operator', (['domain'], {'losses': 'losses', 'diameter': '(1.0)', 'end_points': '[ep0, ep1]', 'invert_elevations': 'invert_elevations', 'use_momentum_jet': '(False)', 'use_velocity_head': '(False)', 'manning': '(0.013)', 'logging': '(True)', 'label': '"""boyd_pipe"""', 'verbose': '(False)'}), "(domain, losses=losses, diameter=1.0, end_points=[ep0,\n ep1], invert_elevations=invert_elevations, use_momentum_jet=False,\n use_velocity_head=False, manning=0.013, logging=True, label='boyd_pipe',\n verbose=False)\n", (2669, 2890), False, 'from anuga.parallel.parallel_operator_factory import Inlet_operator, Boyd_box_operator, Boyd_pipe_operator\n'), ((3173, 3221), 'anuga.parallel.Inlet_operator', 'anuga.parallel.Inlet_operator', (['domain', 'line', '(1.0)'], {}), '(domain, line, 1.0)\n', (3202, 3221), False, 'import anuga, anuga.parallel, numpy, time, os, glob\n'), ((3488, 3521), 'anuga.Reflective_boundary', 'anuga.Reflective_boundary', (['domain'], {}), '(domain)\n', (3513, 3521), False, 'import anuga, anuga.parallel, numpy, time, os, glob\n'), ((3528, 3563), 'anuga.Dirichlet_boundary', 'anuga.Dirichlet_boundary', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3552, 3563), False, 'import anuga, anuga.parallel, numpy, time, os, glob\n'), ((3856, 3867), 'time.time', 'time.time', ([], {}), '()\n', (3865, 3867), False, 'import time\n')]
|
import unittest
import numpy as np
from mlscratch.models import losses
class TestBinaryCrossEntropy(unittest.TestCase):
def setUp(self):
self.y_true = np.array([0, 1, 0.5])
self.y_pred = np.array([0, 1, 0.5])
def test_return(self):
bce = losses.binary_cross_entropy(self.y_true, self.y_pred)
self.assertIsInstance(bce, np.float64)
class TestMeanSquaredError(unittest.TestCase):
def setUp(self):
self.y_true = np.array([0, 1])
self.y_pred = np.array([0.3, 0.4])
def test_return(self):
mse = losses.mean_squared_error(self.y_true, self.y_pred)
self.assertIsInstance(mse, np.float64)
|
[
"mlscratch.models.losses.binary_cross_entropy",
"numpy.array",
"mlscratch.models.losses.mean_squared_error"
] |
[((168, 189), 'numpy.array', 'np.array', (['[0, 1, 0.5]'], {}), '([0, 1, 0.5])\n', (176, 189), True, 'import numpy as np\n'), ((212, 233), 'numpy.array', 'np.array', (['[0, 1, 0.5]'], {}), '([0, 1, 0.5])\n', (220, 233), True, 'import numpy as np\n'), ((276, 329), 'mlscratch.models.losses.binary_cross_entropy', 'losses.binary_cross_entropy', (['self.y_true', 'self.y_pred'], {}), '(self.y_true, self.y_pred)\n', (303, 329), False, 'from mlscratch.models import losses\n'), ((470, 486), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (478, 486), True, 'import numpy as np\n'), ((509, 529), 'numpy.array', 'np.array', (['[0.3, 0.4]'], {}), '([0.3, 0.4])\n', (517, 529), True, 'import numpy as np\n'), ((572, 623), 'mlscratch.models.losses.mean_squared_error', 'losses.mean_squared_error', (['self.y_true', 'self.y_pred'], {}), '(self.y_true, self.y_pred)\n', (597, 623), False, 'from mlscratch.models import losses\n')]
|
import torch
import numpy as np
class ToTensorGjz(object):
def __call__(self, pic):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float()
def __repr__(self):
return self.__class__.__name__ + '()'
class NormalizeGjz(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
tensor.sub_(self.mean).div_(self.std)
return tensor
def crop_img(img, roi_box):
h, w = img.shape[:2]
sx, sy, ex, ey = [int(round(_)) for _ in roi_box]
dh, dw = ey - sy, ex - sx
if len(img.shape) == 3:
res = np.zeros((dh, dw, 3), dtype=np.uint8)
else:
res = np.zeros((dh, dw), dtype=np.uint8)
if sx < 0:
sx, dsx = 0, -sx
else:
dsx = 0
if ex > w:
ex, dex = w, dw - (ex - w)
else:
dex = dw
if sy < 0:
sy, dsy = 0, -sy
else:
dsy = 0
if ey > h:
ey, dey = h, dh - (ey - h)
else:
dey = dh
res[dsy:dey, dsx:dex] = img[sy:ey, sx:ex]
return res
|
[
"numpy.zeros"
] |
[((638, 675), 'numpy.zeros', 'np.zeros', (['(dh, dw, 3)'], {'dtype': 'np.uint8'}), '((dh, dw, 3), dtype=np.uint8)\n', (646, 675), True, 'import numpy as np\n'), ((700, 734), 'numpy.zeros', 'np.zeros', (['(dh, dw)'], {'dtype': 'np.uint8'}), '((dh, dw), dtype=np.uint8)\n', (708, 734), True, 'import numpy as np\n')]
|
"""Random Forest classification and computation of assessment metrics."""
import numpy as np
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from raster import is_raster
def transform_input(scene):
"""Transform input variables (here Landsat NDSV).
Parameters
----------
scene : landsat.Scene
Input Landsat scene.
Returns
-------
X : array
Transformed input data as an array of shape (n_samples, n_features).
"""
n_features = len(scene.ndsv_)
n_samples = scene.profile['width'] * scene.profile['height']
X = np.zeros(shape=(n_samples, n_features), dtype=np.float)
ndsv = scene.ndsv
for i in range(n_features):
X[:, i] = ndsv[i, :, :].ravel()
return X
def transform_test(true, pred):
"""Transform true and predicted raster data sets to
flat arrays.
Parameters
----------
true : array-like
Testing data set raster as a 2D NumPy array.
pred : array-like
Predicted values as a 2D NumPy array.
Returns
-------
y_true : array
1D array of true labels of shape (n_samples).
y_pred : array
1D array of predicted labels of shape (n_samples).
"""
y_pred = pred[true > 0].ravel()
y_true = true[true > 0].ravel()
return y_true, y_pred
def transform_training(scene, training):
"""Transform training data set.
Parameters
----------
scene : landsat.Scene
Input Landsat scene.
training : 2D numpy array
Training data raster as a 2D numpy array.
Returns
-------
X : array
Training samples as an array of shape (n_samples, n_features).
y : array
Training labels as an array of shape (n_samples).
"""
n_features = len(scene.ndsv_)
n_samples = np.count_nonzero(training)
X = np.zeros(shape=(n_samples, n_features), dtype=np.float)
ndsv = scene.ndsv
for i in range(n_features):
X[:, i] = ndsv[i, :, :][training > 0].ravel()
y = training[training > 0].ravel()
return X, y
def classify(
scene,
training,
oversampling=False,
undersampling=False,
water=None,
**kwargs):
"""Classify Landsat scene using Random Forest.
Parameters
----------
scene : landsat.Scene
Input Landsat scene.
training : 2D numpy array
Input training data set as a 2D numpy array.
oversampling : bool, optional
If set to `True`, random oversampling will be performed on the
minority class.
undersampling : bool, optional
If set to `True`, random undersampling will be performed on the
majority class.
water : 2D numpy array, optional
If provided, water pixels will be ignored and classified as
non-built.
kwargs : **kwargs
Additionnal arguments to the Random Forest classifier.
Returns
-------
classes : 2D numpy array
Binary output as a 2D numpy array.
probabilities : 2D numpy array
Probabilistic output as a 2D numpy array.
"""
X = transform_input(scene)
x_train, y_train = transform_training(scene, training)
random_state = kwargs.pop('random_state', None)
if oversampling:
ros = RandomOverSampler(random_state=random_state)
x_train, y_train = ros.fit_sample(x_train, y_train)
if undersampling:
ros = RandomUnderSampler(random_state=random_state)
x_train, y_train = ros.fit_sample(x_train, y_train)
rf = RandomForestClassifier(**kwargs)
rf.fit(x_train, y_train)
probabilities = rf.predict_proba(X)
probabilities = probabilities[:, 0].reshape(scene.red.shape)
if is_raster(water):
probabilities[water] = 0
return probabilities
def assess(probabilities, testing_dataset, threshold=0.75):
"""Compute validation metrics.
Parameters
----------
probabilities : 2D numpy array
Predicted probabilities of belonging to
the built-up class as a 2D NumPy array.
testing_dataset : 2D numpy array
Testing data set as as 2D NumPy array.
threshold : float
Threshold applied to the probabilistic output
to obtain a binary product (0-1).
Returns
-------
summary : dict
Assessment metrics in a dictionnary.
"""
summary = {}
# Binary product obtained by thresholding the probabilities
classes = np.zeros(shape=probabilities.shape, dtype=np.uint8)
classes[probabilities >= threshold] = 1
classes[probabilities < threshold] = 2
# 1. Binary classification metrics:
# Assign value 2 to all non-built land covers
true, pred = testing_dataset.copy(), classes.copy()
true[true >= 2] = 2
pred[pred >= 2] = 2
# Transform and binarize input data
y_true, y_pred = transform_test(true, pred)
y_true, y_pred = y_true == 1, y_pred == 1
summary['accuracy'] = metrics.accuracy_score(
y_true, y_pred
)
summary['balanced_accuracy'] = metrics.recall_score(
y_true, y_pred
)
summary['precision'] = metrics.precision_score(
y_true, y_pred
)
summary['recall'] = metrics.recall_score(
y_true, y_pred
)
summary['f1_score'] = metrics.f1_score(
y_true, y_pred
)
summary['confusion_matrix'] = metrics.confusion_matrix(
y_true, y_pred
)
# 2. Continuous metrics based on probabilities:
# Assign value 2 to all non-built land covers
true = testing_dataset.copy()
true[true >= 2] = 2
# Transform and binarize input data
y_true, y_pred = transform_test(true, probabilities)
y_true = y_true == 1
summary['pr_curve'] = metrics.precision_recall_curve(
y_true, y_pred
)
summary['avg_precision'] = metrics.average_precision_score(
y_true, y_pred, average='weighted'
)
# 3. Per land cover accuracies
land_covers = {
'builtup': 1,
'baresoil': 2,
'lowveg': 3,
'highveg': 4
}
for label, value in land_covers.items():
mask = testing_dataset == value
true = testing_dataset[mask]
pred = classes[mask]
total = np.count_nonzero(mask)
if label == 'builtup':
accuracy = np.count_nonzero(pred == 1) / total
else:
accuracy = np.count_nonzero(pred >= 2) / total
summary['{}_accuracy'.format(label)] = accuracy
return summary
|
[
"sklearn.metrics.f1_score",
"sklearn.metrics.average_precision_score",
"sklearn.metrics.precision_recall_curve",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.precision_score",
"numpy.count_nonzero",
"sklearn.metrics.recall_score",
"numpy.zeros",
"imblearn.over_sampling.RandomOverSampler",
"raster.is_raster",
"sklearn.metrics.accuracy_score",
"imblearn.under_sampling.RandomUnderSampler",
"sklearn.metrics.confusion_matrix"
] |
[((713, 768), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples, n_features)', 'dtype': 'np.float'}), '(shape=(n_samples, n_features), dtype=np.float)\n', (721, 768), True, 'import numpy as np\n'), ((1924, 1950), 'numpy.count_nonzero', 'np.count_nonzero', (['training'], {}), '(training)\n', (1940, 1950), True, 'import numpy as np\n'), ((1959, 2014), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n_samples, n_features)', 'dtype': 'np.float'}), '(shape=(n_samples, n_features), dtype=np.float)\n', (1967, 2014), True, 'import numpy as np\n'), ((3639, 3671), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '(**kwargs)\n', (3661, 3671), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3815, 3831), 'raster.is_raster', 'is_raster', (['water'], {}), '(water)\n', (3824, 3831), False, 'from raster import is_raster\n'), ((4546, 4597), 'numpy.zeros', 'np.zeros', ([], {'shape': 'probabilities.shape', 'dtype': 'np.uint8'}), '(shape=probabilities.shape, dtype=np.uint8)\n', (4554, 4597), True, 'import numpy as np\n'), ((5043, 5081), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5065, 5081), False, 'from sklearn import metrics\n'), ((5132, 5168), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5152, 5168), False, 'from sklearn import metrics\n'), ((5211, 5250), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5234, 5250), False, 'from sklearn import metrics\n'), ((5290, 5326), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5310, 5326), False, 'from sklearn import metrics\n'), ((5368, 5400), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5384, 5400), False, 'from sklearn import metrics\n'), ((5450, 5490), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5474, 5490), False, 'from sklearn import metrics\n'), ((5817, 5863), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5847, 5863), False, 'from sklearn import metrics\n'), ((5910, 5977), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['y_true', 'y_pred'], {'average': '"""weighted"""'}), "(y_true, y_pred, average='weighted')\n", (5941, 5977), False, 'from sklearn import metrics\n'), ((3381, 3425), 'imblearn.over_sampling.RandomOverSampler', 'RandomOverSampler', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3398, 3425), False, 'from imblearn.over_sampling import RandomOverSampler\n'), ((3523, 3568), 'imblearn.under_sampling.RandomUnderSampler', 'RandomUnderSampler', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (3541, 3568), False, 'from imblearn.under_sampling import RandomUnderSampler\n'), ((6311, 6333), 'numpy.count_nonzero', 'np.count_nonzero', (['mask'], {}), '(mask)\n', (6327, 6333), True, 'import numpy as np\n'), ((6389, 6416), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred == 1)'], {}), '(pred == 1)\n', (6405, 6416), True, 'import numpy as np\n'), ((6462, 6489), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred >= 2)'], {}), '(pred >= 2)\n', (6478, 6489), True, 'import numpy as np\n')]
|
from collections import deque
import pickle
import cv2
import numpy as np
import time
import ast
from utils import *
import tensorflow_hub as hub
import concurrent.futures
from tensorflow.keras import layers
import tensorflow as tf
# Load Yolo
net = cv2.dnn.readNet("./data/yolov4-tiny.weights", "./data/yolov4-tiny.cfg")
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
print(classes)
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
# Loading image
cap = cv2.VideoCapture('vid_short.mp4')
mouse_pts = []
model = tf.keras.models.load_model('./model/resnet191020.h5')
model.summary()
#lb = pickle.loads(open(args["label"], "rb").read())
#lb = ["football","tennis","weight_lifting"]
lb = ['Fire', 'Normal Car', 'Normal', 'Road Accident', 'Shooting', 'Violence']
#model.summary()
# initialize the image mean for mean subtraction along with the
# predictions queue
mean = np.array([123.68, 116.779, 103.939][::1], dtype="float32")
Q = deque(maxlen=128)
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255.,
)
my_file = open("./test.txt","a+")
def get_mouse_points(event, x, y, flags, param):
# Used to mark 4 points on the frame zero of the video that will be warped
# Used to mark 2 points on the frame zero of the video that are 6 feet away
global mouseX, mouseY, mouse_pts
if event == cv2.EVENT_LBUTTONDOWN:
mouseX, mouseY = x, y
file1=open("./test.txt","a")
cv2.circle(image, (x, y), 10, (0, 255, 255), 10)
if "mouse_pts" not in globals():
mouse_pts = []
if(len(mouse_pts)==6):
file1.write(str(mouse_pts))
file1.close()
mouse_pts.append((x, y))
print("Point detected")
print(mouse_pts)
def Check(a, b):
dist = ((a[0] - b[0]) ** 2 + 550 / ((a[1] + b[1]) / 2) * (a[1] - b[1]) ** 2) ** 0.5
calibration = (a[1] + b[1]) / 2
if 0 < dist < 0.25 * calibration:
return True
else:
return False
scale_w = 1.2 / 2
scale_h = 4 / 2
SOLID_BACK_COLOR = (41, 41, 41)
frame_num = 0
total_pedestrians_detected = 0
total_six_feet_violations = 0
total_pairs = 0
abs_six_feet_violations = 0
pedestrian_per_sec = 0
sh_index = 1
sc_index = 1
cv2.namedWindow("image")
cv2.setMouseCallback("image", get_mouse_points)
num_mouse_points = 0
first_frame_display = True
font = cv2.FONT_HERSHEY_PLAIN
starting_time = time.time()
frame_id = 0
while True:
_, frame = cap.read()
frame_id += 1
height, width, channels = frame.shape
if frame_id == 1:
# Ask user to mark parallel points and two points 6 feet apart. Order bl, br, tr, tl, p1, p2
while True:
image = frame
file = open('./test.txt','r')
s = file.read()
if s:
x = ast.literal_eval(s)
cv2.imshow("image", image)
cv2.waitKey(1)
if s:
if len(mouse_pts) == 7 or len(x) == 6:
cv2.destroyWindow("image")
mouse_pts = x
break
first_frame_display = False
four_points = mouse_pts
M = perspective(frame, four_points[0:4])
pts = src = np.float32(np.array([four_points[4:]]))
warped_pt = cv2.perspectiveTransform(pts, M)[0]
d_thresh = np.sqrt(
(warped_pt[0][0] - warped_pt[1][0]) ** 2
+ (warped_pt[0][1] - warped_pt[1][1]) ** 2
)
bird_image = np.zeros(
(int(height * scale_h), int(width * scale_w), 3), np.uint8
)
bird_image[:] = SOLID_BACK_COLOR
pedestrian_detect = frame
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5 and class_id == 0:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
h = int(detection[3] * height)
w = int(detection[2] * width)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
if len(indexes) > 0:
flat_box = indexes.flatten()
pairs = []
center = []
status = []
for i in flat_box:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
center.append([int(x + w / 2), int(y + h / 2)])
status.append(False)
for i in range(len(center)):
for j in range(len(center)):
close = Check(center[i], center[j])
if close:
pairs.append([center[i], center[j]])
status[i] = True
status[j] = True
index = 0
for i in flat_box:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
if status[index] == True:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 150), 2)
elif status[index] == False:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
index += 1
for h in pairs:
cv2.line(frame, tuple(h[0]), tuple(h[1]), (0, 0, 255), 2)
processedImg = frame.copy()
pedestrian_boxes, num_pedestrians = indexes, len(indexes)
# if len(indexes) > 0:
# pedestrian_detect = bird_eye_view_plot(frames, boxes, M, scale_w, scale_h)
canvas = np.zeros((200,200,3))
canvas[:] = (0,0,0)
text = "people:{}".format(len(pedestrian_boxes))
cv2.putText(canvas, text, (35,50), cv2.FONT_HERSHEY_SIMPLEX,
1.0, (0,255,0), 5)
cv2.imshow('info',canvas)
# make predictions on the frame and then update the predictions
# queue
canvas = np.zeros((250, 300, 3), dtype="uint8")
output = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224)).astype("float32")
frame = train_datagen.standardize(frame)
preds = model.predict(np.expand_dims(frame, axis=0),workers=6,use_multiprocessing=True)[0]
Q.append(preds)
for (i,(lab, prob)) in enumerate(zip(lb, preds)):
text= "{}:{:.2f}%".format(lab, prob*100)
w = int(prob*300)
cv2.rectangle(canvas, (7, (i*35) +5),
(w, (i*35)+35), (0,0,255), -1)
cv2.putText(canvas, text, (10,(i*35)+23), cv2.FONT_HERSHEY_SIMPLEX,0.45, (255,255,255),2)
results = np.array(Q).mean(axis=0)
i = np.argmax(results)
label = lb[i]
print(label)
# draw the activity on the output frame
text = "{}".format(label)
cv2.putText(output, text, (105, 50), cv2.FONT_HERSHEY_SIMPLEX,
1.0, (0, 255, 0), 5)
cv2.imshow("probs", canvas)
elapsed_time = time.time() - starting_time
fps = frame_id / elapsed_time
cv2.putText(output, "FPS: " + str(round(fps, 2)), (10, 50), font, 4, (0, 0, 0), 3)
cv2.imshow("Image", output)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = confidences[i]
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label + " " + str(round(confidence, 2)), (x, y + 30), font, 2, color, 1)
if len(pedestrian_boxes) > 0:
warped_pts, bird_image = display_points(
frame, boxes
)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.rectangle",
"numpy.sqrt",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"cv2.imshow",
"numpy.array",
"tensorflow.keras.models.load_model",
"cv2.destroyAllWindows",
"cv2.dnn.NMSBoxes",
"cv2.setMouseCallback",
"collections.deque",
"cv2.perspectiveTransform",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"numpy.argmax",
"cv2.putText",
"ast.literal_eval",
"cv2.circle",
"cv2.cvtColor",
"cv2.resize",
"time.time",
"cv2.namedWindow",
"cv2.destroyWindow",
"numpy.zeros",
"cv2.VideoCapture",
"numpy.expand_dims",
"cv2.dnn.readNet"
] |
[((254, 325), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['"""./data/yolov4-tiny.weights"""', '"""./data/yolov4-tiny.cfg"""'], {}), "('./data/yolov4-tiny.weights', './data/yolov4-tiny.cfg')\n", (269, 325), False, 'import cv2\n'), ((741, 774), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""vid_short.mp4"""'], {}), "('vid_short.mp4')\n", (757, 774), False, 'import cv2\n'), ((801, 854), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""./model/resnet191020.h5"""'], {}), "('./model/resnet191020.h5')\n", (827, 854), True, 'import tensorflow as tf\n'), ((1158, 1216), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939][::1]'], {'dtype': '"""float32"""'}), "([123.68, 116.779, 103.939][::1], dtype='float32')\n", (1166, 1216), True, 'import numpy as np\n'), ((1221, 1238), 'collections.deque', 'deque', ([], {'maxlen': '(128)'}), '(maxlen=128)\n', (1226, 1238), False, 'from collections import deque\n'), ((1256, 1324), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'tf.keras.preprocessing.image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255.0)'}), '(rescale=1.0 / 255.0)\n', (1303, 1324), True, 'import tensorflow as tf\n'), ((2530, 2554), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (2545, 2554), False, 'import cv2\n'), ((2555, 2602), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'get_mouse_points'], {}), "('image', get_mouse_points)\n", (2575, 2602), False, 'import cv2\n'), ((2698, 2709), 'time.time', 'time.time', ([], {}), '()\n', (2707, 2709), False, 'import time\n'), ((8319, 8342), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8340, 8342), False, 'import cv2\n'), ((3969, 4047), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(0.00392)', '(416, 416)', '(0, 0, 0)', '(True)'], {'crop': '(False)'}), '(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n', (3990, 4047), False, 'import cv2\n'), ((4828, 4874), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', '(0.5)', '(0.4)'], {}), '(boxes, confidences, 0.5, 0.4)\n', (4844, 4874), False, 'import cv2\n'), ((6231, 6254), 'numpy.zeros', 'np.zeros', (['(200, 200, 3)'], {}), '((200, 200, 3))\n', (6239, 6254), True, 'import numpy as np\n'), ((6334, 6420), 'cv2.putText', 'cv2.putText', (['canvas', 'text', '(35, 50)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1.0)', '(0, 255, 0)', '(5)'], {}), '(canvas, text, (35, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255,\n 0), 5)\n', (6345, 6420), False, 'import cv2\n'), ((6429, 6455), 'cv2.imshow', 'cv2.imshow', (['"""info"""', 'canvas'], {}), "('info', canvas)\n", (6439, 6455), False, 'import cv2\n'), ((6555, 6593), 'numpy.zeros', 'np.zeros', (['(250, 300, 3)'], {'dtype': '"""uint8"""'}), "((250, 300, 3), dtype='uint8')\n", (6563, 6593), True, 'import numpy as np\n'), ((6632, 6670), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (6644, 6670), False, 'import cv2\n'), ((7272, 7290), 'numpy.argmax', 'np.argmax', (['results'], {}), '(results)\n', (7281, 7290), True, 'import numpy as np\n'), ((7404, 7491), 'cv2.putText', 'cv2.putText', (['output', 'text', '(105, 50)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1.0)', '(0, 255, 0)', '(5)'], {}), '(output, text, (105, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255,\n 0), 5)\n', (7415, 7491), False, 'import cv2\n'), ((7505, 7532), 'cv2.imshow', 'cv2.imshow', (['"""probs"""', 'canvas'], {}), "('probs', canvas)\n", (7515, 7532), False, 'import cv2\n'), ((7706, 7733), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'output'], {}), "('Image', output)\n", (7716, 7733), False, 'import cv2\n'), ((8257, 8271), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (8268, 8271), False, 'import cv2\n'), ((1758, 1806), 'cv2.circle', 'cv2.circle', (['image', '(x, y)', '(10)', '(0, 255, 255)', '(10)'], {}), '(image, (x, y), 10, (0, 255, 255), 10)\n', (1768, 1806), False, 'import cv2\n'), ((3619, 3715), 'numpy.sqrt', 'np.sqrt', (['((warped_pt[0][0] - warped_pt[1][0]) ** 2 + (warped_pt[0][1] - warped_pt[1]\n [1]) ** 2)'], {}), '((warped_pt[0][0] - warped_pt[1][0]) ** 2 + (warped_pt[0][1] -\n warped_pt[1][1]) ** 2)\n', (3626, 3715), True, 'import numpy as np\n'), ((7044, 7117), 'cv2.rectangle', 'cv2.rectangle', (['canvas', '(7, i * 35 + 5)', '(w, i * 35 + 35)', '(0, 0, 255)', '(-1)'], {}), '(canvas, (7, i * 35 + 5), (w, i * 35 + 35), (0, 0, 255), -1)\n', (7057, 7117), False, 'import cv2\n'), ((7134, 7234), 'cv2.putText', 'cv2.putText', (['canvas', 'text', '(10, i * 35 + 23)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(255, 255, 255)', '(2)'], {}), '(canvas, text, (10, i * 35 + 23), cv2.FONT_HERSHEY_SIMPLEX, 0.45,\n (255, 255, 255), 2)\n', (7145, 7234), False, 'import cv2\n'), ((7553, 7564), 'time.time', 'time.time', ([], {}), '()\n', (7562, 7564), False, 'import time\n'), ((3128, 3154), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (3138, 3154), False, 'import cv2\n'), ((3167, 3181), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3178, 3181), False, 'import cv2\n'), ((3515, 3542), 'numpy.array', 'np.array', (['[four_points[4:]]'], {}), '([four_points[4:]])\n', (3523, 3542), True, 'import numpy as np\n'), ((3564, 3596), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['pts', 'M'], {}), '(pts, M)\n', (3588, 3596), False, 'import cv2\n'), ((4275, 4292), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (4284, 4292), True, 'import numpy as np\n'), ((6683, 6712), 'cv2.resize', 'cv2.resize', (['frame', '(224, 224)'], {}), '(frame, (224, 224))\n', (6693, 6712), False, 'import cv2\n'), ((6811, 6840), 'numpy.expand_dims', 'np.expand_dims', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (6825, 6840), True, 'import numpy as np\n'), ((7239, 7250), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (7247, 7250), True, 'import numpy as np\n'), ((7967, 8021), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(frame, (x, y), (x + w, y + h), color, 2)\n', (7980, 8021), False, 'import cv2\n'), ((3096, 3115), 'ast.literal_eval', 'ast.literal_eval', (['s'], {}), '(s)\n', (3112, 3115), False, 'import ast\n'), ((5717, 5777), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 0, 150)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 0, 150), 2)\n', (5730, 5777), False, 'import cv2\n'), ((3275, 3301), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""image"""'], {}), "('image')\n", (3292, 3301), False, 'import cv2\n'), ((5835, 5895), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (5848, 5895), False, 'import cv2\n')]
|
# Code made for <NAME>
# 12 Abril 2021
# License MIT
# Transport Phenomena: Python Program-Assessment 4.3
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.optimize import minimize
sns.set()
# Solve for Temperature of Steam at given Pressure
class enviroment_convective:
def temp_steam_sat_reg(self, Sat_pressure_1, Sat_pressure_2, Temp_from_pressure_1, Temp_from_pressure_2,
Sat_pressure_system):
p1 = Sat_pressure_1 # [kPa]
p2 = Sat_pressure_2 # [kPa]
T1 = Temp_from_pressure_1 + 273.15 # [K]
T2 = Temp_from_pressure_2 + 273.15 # [K]
P_x = Sat_pressure_system # [kPa]
m = (T2 - T1) / (p2 - p1)
T = m * P_x - (m * p1) + T1
return T
# Optimice for the maximum difference allow
class Optimice:
def objective_T(self, x, *args):
T_supp, r = args[0], args[1]
thk = 0.015
x1 = x[0] # C1
x2 = x[1] # C2
return T_supp - ((x1 * np.log(r + thk)) - x2)
def constraint_BC1_BC2(self, x):
r, T_in = (0.025, 484.8362745098039)
K, thk, h_in, T_out, h_out, e = (15.6, 0.015, 30, 25 + 273.15, 5, 0.3)
x1 = x[0] # C1
x2 = x[1] # C2
R_conv_1 = (1 / (2 * np.pi * (r)) * h_in)
h_comb = (2 * np.pi * (r + thk)) * (h_out + e * 5.670e-8 * (x1 * np.log(r + thk) + x2 - T_out)
* ((x1 * np.log(r + thk) + x2) ** 2 + T_out ** 2))
R_cond = np.log(thk) / (2 * np.pi * K)
return ((T_in - T_out) / (R_conv_1 + R_cond + (1 / h_comb))) + ((K * x1) / r)
def objective_T_II(self, x, *args):
T_supp, r = args[0], args[1]
x1 = x[0] # C1
x2 = x[1] # C2
return T_supp - ((x1 * np.log(r)) - x2)
def constraint_BC1_BC2_II(self, x):
r, T_in = (0.025, 484.8362745098039)
K, thk_1, h_in, T_out, h_out = (15.6, 0.015, 30, 25 + 273.15, 5)
K_2, thk_2, e = (0.25, 0.012, 0.8)
x1 = x[0] # C1
x2 = x[1] # C2
R_conv_1 = (1 / (2 * np.pi * r) * h_in)
R_cond = np.log(thk_1) / (2 * np.pi * K)
R_cond_2 = np.log(thk_2) / (2 * np.pi * K_2)
h_comb = (2 * np.pi * (r + thk_1 + thk_2)) * (
h_out + e * 5.670e-8 * (x1 * np.log(r + thk_1 + thk_2) + x2 - T_out)
* ((x1 * np.log(r + thk_1 + thk_2) + x2) ** 2 + T_out ** 2))
return ((T_in - T_out) / (R_conv_1 + R_cond + R_cond_2 + (1 / h_comb))) + ((K * x1) / r)
# Determine the Q flux with cover and without cover
class Q_determine:
def Q_uncover(self, r, T_in, K, thk, h_in, T_out, h_out, e, Delta_T):
T_surf = (T_int - Delta_T) + 273.15
R_conv_1 = (1 / (2 * np.pi * r) * h_in)
h_comb = (2 * np.pi * (r + thk)) * (h_out + e * 5.670e-8 * (T_surf - T_out)
* (T_surf ** 2 + T_out ** 2))
R_cond = np.log(thk) / (2 * np.pi * K)
Q = ((T_in - T_out) / (R_conv_1 + R_cond + (1 / h_comb)))
return Q
def Q_cover(self, r, T_in, K, K_2, thk_1, thk_2, h_in, T_out, h_out, e, Delta_T):
T_surf = (T_int - Delta_T) + 273.15
R_conv_1 = (1 / (2 * np.pi * r) * h_in)
R_cond = np.log(thk_1) / (2 * np.pi * K)
R_cond_2 = np.log(thk_2) / (2 * np.pi * K_2)
h_comb = (2 * np.pi * (r + thk_1 + thk_2)) * (
h_out + e * 5.670e-8 * (T_surf - T_out)
* (T_surf ** 2 + T_out ** 2))
Q = ((T_in - T_out) / (R_conv_1 + R_cond + R_cond_2 + (1 / h_comb)))
return Q
# Temperature of T in of the cylinder iron
class T_profile_iron:
def T_in_II(self, Q_tot, r, K, thk, T_surf_out):
R_cond = np.log(r - thk) / (2 * np.pi * K)
T_surf_in = (-Q_tot * R_cond) + T_surf_out
return T_surf_in
env_conv = enviroment_convective()
Opt = Optimice()
Q_s = Q_determine()
T_iron = T_profile_iron()
T_int = env_conv.temp_steam_sat_reg(1553, 2318, 200, 220, 2000)
constraint_equal1 = {'type': 'eq', 'fun': Opt.constraint_BC1_BC2}
constraint = [constraint_equal1]
# T_suppose, Radius_max, T_in
arguments = (T_int, 0.025)
x0 = [0, 0] # This initial values are extracted from a first solution given by the method
sol = minimize(Opt.objective_T, x0, method='SLSQP', args=arguments, constraints=constraint, options={'maxiter': 5})
# BIG NOTE: modify the iteration to reach values according to reality--> You need more restrictions
# In the result you find the maximum difference that the system reach between the suppose and the reality
Q_1 = Q_s.Q_uncover(0.025, T_int, 15.6, 0.015, 30, 25 + 273.15, 5, 0.3, sol.fun)
T_in_iron = T_iron.T_in_II(Q_1, 0.025, 30, 0.015, (T_int - sol.fun) + 273.15)
########################################### Case 2 #####################################################################
constraint_equal1_II = {'type': 'eq', 'fun': Opt.constraint_BC1_BC2_II}
constraint_II = [constraint_equal1_II]
# T_suppose, Radius_max
arguments_II = (T_int, 0.025 + 0.015 + 0.012)
x0 = [0, 0] # This initial values are extracted from a first solution given by the method
sol_II = minimize(Opt.objective_T, x0, method='SLSQP', args=arguments_II, constraints=constraint_II,
options={'maxiter': 5})
# BIG NOTE: modify the iteration to reach values according to reality--> You need more restrictions
# In the result you find the maximum difference that the system reach between the suppose and the reality
Q_2 = Q_s.Q_cover(0.025, T_int, 15.6, 0.25, 0.015, 0.012, 30, 25 + 273.15, 5, 0.3, sol_II.fun)
print("========================= WITH UNCOVER ==============================================\n")
print("Temperature in the convective enviro. 1: {} [K]".format(T_int))
print("Temperature at the start of the cylinder: {} [K]".format(T_in_iron))
print("Temperature at the end of the cylinder: {} [K]".format((T_int - sol.fun) + 273.15))
print("Q for meter of cylinder: {} [W/m]\n".format(Q_1))
print("================================================================================")
print("========================= WITH COVER ==============================================\n")
print("Temperature in the convective enviro. 1: {} [K]".format(T_int))
print("Temperature at the end of the cylinder: {} [K]".format((T_int - sol_II.fun) + 273.15))
print("Q for meter of cylinder: {} [W/m]\n".format(Q_2))
print("================================================================================\n")
|
[
"numpy.log",
"seaborn.set",
"scipy.optimize.minimize"
] |
[((217, 226), 'seaborn.set', 'sns.set', ([], {}), '()\n', (224, 226), True, 'import seaborn as sns\n'), ((4244, 4358), 'scipy.optimize.minimize', 'minimize', (['Opt.objective_T', 'x0'], {'method': '"""SLSQP"""', 'args': 'arguments', 'constraints': 'constraint', 'options': "{'maxiter': 5}"}), "(Opt.objective_T, x0, method='SLSQP', args=arguments, constraints=\n constraint, options={'maxiter': 5})\n", (4252, 4358), False, 'from scipy.optimize import minimize\n'), ((5137, 5256), 'scipy.optimize.minimize', 'minimize', (['Opt.objective_T', 'x0'], {'method': '"""SLSQP"""', 'args': 'arguments_II', 'constraints': 'constraint_II', 'options': "{'maxiter': 5}"}), "(Opt.objective_T, x0, method='SLSQP', args=arguments_II,\n constraints=constraint_II, options={'maxiter': 5})\n", (5145, 5256), False, 'from scipy.optimize import minimize\n'), ((1503, 1514), 'numpy.log', 'np.log', (['thk'], {}), '(thk)\n', (1509, 1514), True, 'import numpy as np\n'), ((2108, 2121), 'numpy.log', 'np.log', (['thk_1'], {}), '(thk_1)\n', (2114, 2121), True, 'import numpy as np\n'), ((2159, 2172), 'numpy.log', 'np.log', (['thk_2'], {}), '(thk_2)\n', (2165, 2172), True, 'import numpy as np\n'), ((2921, 2932), 'numpy.log', 'np.log', (['thk'], {}), '(thk)\n', (2927, 2932), True, 'import numpy as np\n'), ((3230, 3243), 'numpy.log', 'np.log', (['thk_1'], {}), '(thk_1)\n', (3236, 3243), True, 'import numpy as np\n'), ((3281, 3294), 'numpy.log', 'np.log', (['thk_2'], {}), '(thk_2)\n', (3287, 3294), True, 'import numpy as np\n'), ((3703, 3718), 'numpy.log', 'np.log', (['(r - thk)'], {}), '(r - thk)\n', (3709, 3718), True, 'import numpy as np\n'), ((1005, 1020), 'numpy.log', 'np.log', (['(r + thk)'], {}), '(r + thk)\n', (1011, 1020), True, 'import numpy as np\n'), ((1776, 1785), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (1782, 1785), True, 'import numpy as np\n'), ((1361, 1376), 'numpy.log', 'np.log', (['(r + thk)'], {}), '(r + thk)\n', (1367, 1376), True, 'import numpy as np\n'), ((1444, 1459), 'numpy.log', 'np.log', (['(r + thk)'], {}), '(r + thk)\n', (1450, 1459), True, 'import numpy as np\n'), ((2293, 2318), 'numpy.log', 'np.log', (['(r + thk_1 + thk_2)'], {}), '(r + thk_1 + thk_2)\n', (2299, 2318), True, 'import numpy as np\n'), ((2358, 2383), 'numpy.log', 'np.log', (['(r + thk_1 + thk_2)'], {}), '(r + thk_1 + thk_2)\n', (2364, 2383), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
from shapes import Myinit
class Triangle(Myinit):
def __init__(self):
super(Triangle, self).__init__()
self.vertices = np.array([[100,50], [150,150], [50,150]],np.int32)
self.vertices = self.vertices.reshape((-1, 1, 2))
self.color=(255,0,255)
def form_shape(self):
self.img = cv2.polylines(self.img, [self.vertices], True, self.color)
cv2.fillPoly(self.img, [self.vertices], self.color)
def welcome(self):
print('Printing Triangle...!')
def sides(self):
print("Triangle has 3 sides.")
def draw_shape(self):
self.welcome()
self.form_shape()
self.sides()
cv2.imshow("Triangle", self.img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.fillPoly",
"cv2.polylines",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey"
] |
[((177, 231), 'numpy.array', 'np.array', (['[[100, 50], [150, 150], [50, 150]]', 'np.int32'], {}), '([[100, 50], [150, 150], [50, 150]], np.int32)\n', (185, 231), True, 'import numpy as np\n'), ((368, 426), 'cv2.polylines', 'cv2.polylines', (['self.img', '[self.vertices]', '(True)', 'self.color'], {}), '(self.img, [self.vertices], True, self.color)\n', (381, 426), False, 'import cv2\n'), ((436, 487), 'cv2.fillPoly', 'cv2.fillPoly', (['self.img', '[self.vertices]', 'self.color'], {}), '(self.img, [self.vertices], self.color)\n', (448, 487), False, 'import cv2\n'), ((729, 761), 'cv2.imshow', 'cv2.imshow', (['"""Triangle"""', 'self.img'], {}), "('Triangle', self.img)\n", (739, 761), False, 'import cv2\n'), ((771, 785), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (782, 785), False, 'import cv2\n'), ((795, 818), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (816, 818), False, 'import cv2\n')]
|
import numpy as np
class vents:
def __init__(self, segments):
self.segments = segments
(self.maxx, self.maxy) = self._getmaxxy()
self.board = np.zeros((self.maxx+1, self.maxy+1), dtype=int)
def _getmaxxy(self):
allxs = [x[0] for x in self.segments]
allxs.extend([x[2] for x in self.segments])
allys = [x[1] for x in self.segments]
allys.extend([x[3] for x in self.segments])
print(f"segments: {self.segments}")
print([x[0] for x in self.segments])
print([x[2] for x in self.segments])
print(f"allxs: {allxs}")
maxx = max(allxs)
maxy = max(allys)
print(f"(maxx, maxy): ({maxx}, {maxy})")
return (int(maxx), int(maxy))
def _draw_vertical(self, s):
print(f"draw vertical: {s}")
x = s[0]
if s[3] < s[1]:
(start, fin) = (s[3], s[1])
else:
(start, fin) = (s[1], s[3])
for y in range(start, fin+1):
self.board[x, y] += 1
print(np.transpose(self.board))
def _draw_horizontal(self, s):
print(f"draw horizontal: {s}")
y = s[1]
if s[2] < s[0]:
(start, fin) = (s[2], s[0])
else:
(start, fin) = (s[0], s[2])
for x in range(start, fin+1):
self.board[x, y] += 1
print(np.transpose(self.board))
def _build_board(self):
for s in self.segments:
if s[0] == s[2]:
self._draw_vertical(s)
if s[1] == s[3]:
self._draw_horizontal(s)
def count_overlaps(self):
print(self.board)
self._build_board()
return np.count_nonzero(self.board > 1)
|
[
"numpy.count_nonzero",
"numpy.zeros",
"numpy.transpose"
] |
[((172, 223), 'numpy.zeros', 'np.zeros', (['(self.maxx + 1, self.maxy + 1)'], {'dtype': 'int'}), '((self.maxx + 1, self.maxy + 1), dtype=int)\n', (180, 223), True, 'import numpy as np\n'), ((1697, 1729), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.board > 1)'], {}), '(self.board > 1)\n', (1713, 1729), True, 'import numpy as np\n'), ((1041, 1065), 'numpy.transpose', 'np.transpose', (['self.board'], {}), '(self.board)\n', (1053, 1065), True, 'import numpy as np\n'), ((1372, 1396), 'numpy.transpose', 'np.transpose', (['self.board'], {}), '(self.board)\n', (1384, 1396), True, 'import numpy as np\n')]
|
# This is used for testing Fine Tune Hyper-Parameters
from datetime import datetime
import itertools
import json
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.wrappers.scikit_learn import KerasClassifier
from keras_preprocessing.sequence import pad_sequences
import joblib
from sklearn.model_selection import RandomizedSearchCV
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Embedding, Conv1D, GlobalMaxPooling1D, Flatten, LSTM
from keras.preprocessing import text, sequence
from keras import utils
import pandas as pd
testData = pd.read_csv("../data/test.csv")
dictData = pd.read_csv("../data/kata_dasar_kbbi.csv")
categories_file = open("../data/categories.json", "r")
categories = json.load(categories_file)
inverted_categories_mobile = {v: k.lower() for k, v in categories['Mobile'].items()}
inverted_categories_fashion = {v: k.lower() for k, v in categories['Fashion'].items()}
inverted_categories_beauty = {v: k.lower() for k, v in categories['Beauty'].items()}
all_subcategories = {k.lower(): v for k, v in categories['Mobile'].items()}
all_subcategories.update({k.lower(): v for k, v in categories['Fashion'].items()})
all_subcategories.update({k.lower(): v for k, v in categories['Beauty'].items()})
# Main settings
plot_history_check = True
gen_test = True
max_length = 35 # 32 is max word in train, think need to test this later of the actual words need to be used...
num_classes = len(all_subcategories)
# Training for more epochs will likely lead to overfitting on this dataset
# You can try tweaking these hyperparamaters when using this model with your own data
batch_size = 256
epochs = 10
max_words = 1000
print(all_subcategories)
print("no of categories: " + str(num_classes))
category_mapping = {
'fashion_image': 'Fashion',
'beauty_image': 'Beauty',
'mobile_image': 'Mobile',
}
directory_mapping = {
'Fashion': 'fashion_image',
'Beauty': 'beauty_image',
'Mobile': 'mobile_image',
}
trainData = pd.read_csv("../data/train.csv")
# Shuffle train data
trainData = shuffle(trainData)
max_data_size = int(len(trainData) * 1)
train_data_size = int(max_data_size * .95)
train_data_step = 1
validate_data_step = 1
print(train_data_size, max_data_size)
train_texts = trainData['title'][::train_data_step]
train_tags = trainData['Category'][::train_data_step]
test_texts = testData['title']
print(len(train_texts), len(train_tags))
y = train_tags.values
tokenize = text.Tokenizer(num_words=max_words, char_level=False)
tokenize.fit_on_texts(train_texts) # only fit on train
x_train = tokenize.texts_to_sequences(train_texts)
x_test = tokenize.texts_to_sequences(test_texts)
# Pad sequences with zeros
x_train = pad_sequences(x_train, padding='post', maxlen=max_length)
x_test = pad_sequences(x_test, padding='post', maxlen=max_length)
y_train = train_tags.values
y_train = utils.to_categorical(y_train)
vocab_size = len(tokenize.word_index) + 1
print(vocab_size)
def create_model(num_filters, kernel_size, max_words, embedding_dim, max_length):
model = Sequential()
model.add(Embedding(max_words,
embedding_dim,
input_length=max_length,
trainable=True))
model.add(Conv1D(num_filters, kernel_size, activation='relu'))
model.add(GlobalMaxPooling1D())
model.add(Dense(embedding_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def gen_filename_h5():
return 'epoch_'+str(epochs) + '_' + datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
def gen_filename_csv():
return 'epoch_'+str(epochs) + '_' + datetime.now().strftime("%m_%d_%Y_%H_%M_%S")
param_grid = dict(num_filters=[32, 64, 128],
kernel_size=[3, 5, 7],
max_words=[max_words],
embedding_dim=[64, 128],
max_length=[max_length])
filepath = "../checkpoints/"+gen_filename_h5()+"v2.hdf5"
checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
model = KerasClassifier(build_fn=create_model,
epochs=epochs,
batch_size=batch_size,
verbose=True,
)
grid = RandomizedSearchCV(estimator=model, param_distributions=param_grid,
cv=4, verbose=1, n_iter=10)
print(grid)
grid_result = grid.fit(x_train,
y_train,
validation_split=0.1,
callbacks=[checkpointer])
with open("../checkpoints/"+gen_filename_h5()+".pickle","w+") as f:
joblib.dump(grid_result, f)
def plot_history(history):
plt.style.use('ggplot')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
if plot_history_check:
plot_history(grid)
def perform_test():
prediction = grid.predict(x_test, batch_size=batch_size, verbose=1)
predicted_label = [np.argmax(prediction[i]) for i in range(len(x_test))]
# print(predicted_label)
df = pd.DataFrame({'itemid': testData['itemid'].astype(int), 'Category': predicted_label})
df.to_csv(path_or_buf='res_' + gen_filename_csv() + '.csv', index=False)
if gen_test:
perform_test()
# This utility function is from the sklearn docs:
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=30)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, fontsize=22)
plt.yticks(tick_marks, classes, fontsize=22)
fmt = '.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label', fontsize=25)
plt.xlabel('Predicted label', fontsize=25)
plt.show()
|
[
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"keras.layers.GlobalMaxPooling1D",
"keras.utils.to_categorical",
"keras.layers.Dense",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.yticks",
"joblib.dump",
"matplotlib.pyplot.xticks",
"keras_preprocessing.sequence.pad_sequences",
"numpy.argmax",
"keras.models.Sequential",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"keras.preprocessing.text.Tokenizer",
"keras.callbacks.ModelCheckpoint",
"keras.wrappers.scikit_learn.KerasClassifier",
"sklearn.utils.shuffle",
"matplotlib.pyplot.colorbar",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"json.load",
"keras.layers.Embedding",
"matplotlib.pyplot.subplot",
"keras.layers.Conv1D",
"sklearn.model_selection.RandomizedSearchCV"
] |
[((670, 701), 'pandas.read_csv', 'pd.read_csv', (['"""../data/test.csv"""'], {}), "('../data/test.csv')\n", (681, 701), True, 'import pandas as pd\n'), ((713, 755), 'pandas.read_csv', 'pd.read_csv', (['"""../data/kata_dasar_kbbi.csv"""'], {}), "('../data/kata_dasar_kbbi.csv')\n", (724, 755), True, 'import pandas as pd\n'), ((824, 850), 'json.load', 'json.load', (['categories_file'], {}), '(categories_file)\n', (833, 850), False, 'import json\n'), ((2084, 2116), 'pandas.read_csv', 'pd.read_csv', (['"""../data/train.csv"""'], {}), "('../data/train.csv')\n", (2095, 2116), True, 'import pandas as pd\n'), ((2151, 2169), 'sklearn.utils.shuffle', 'shuffle', (['trainData'], {}), '(trainData)\n', (2158, 2169), False, 'from sklearn.utils import shuffle\n'), ((2549, 2602), 'keras.preprocessing.text.Tokenizer', 'text.Tokenizer', ([], {'num_words': 'max_words', 'char_level': '(False)'}), '(num_words=max_words, char_level=False)\n', (2563, 2602), False, 'from keras.preprocessing import text, sequence\n'), ((2797, 2854), 'keras_preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_train'], {'padding': '"""post"""', 'maxlen': 'max_length'}), "(x_train, padding='post', maxlen=max_length)\n", (2810, 2854), False, 'from keras_preprocessing.sequence import pad_sequences\n'), ((2864, 2920), 'keras_preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_test'], {'padding': '"""post"""', 'maxlen': 'max_length'}), "(x_test, padding='post', maxlen=max_length)\n", (2877, 2920), False, 'from keras_preprocessing.sequence import pad_sequences\n'), ((2960, 2989), 'keras.utils.to_categorical', 'utils.to_categorical', (['y_train'], {}), '(y_train)\n', (2980, 2989), False, 'from keras import utils\n'), ((4190, 4282), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_acc', verbose=1, save_best_only=True,\n mode='max')\n", (4205, 4282), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4287, 4381), 'keras.wrappers.scikit_learn.KerasClassifier', 'KerasClassifier', ([], {'build_fn': 'create_model', 'epochs': 'epochs', 'batch_size': 'batch_size', 'verbose': '(True)'}), '(build_fn=create_model, epochs=epochs, batch_size=batch_size,\n verbose=True)\n', (4302, 4381), False, 'from keras.wrappers.scikit_learn import KerasClassifier\n'), ((4484, 4583), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'model', 'param_distributions': 'param_grid', 'cv': '(4)', 'verbose': '(1)', 'n_iter': '(10)'}), '(estimator=model, param_distributions=param_grid, cv=4,\n verbose=1, n_iter=10)\n', (4502, 4583), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((3146, 3158), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3156, 3158), False, 'from keras.models import Sequential\n'), ((4856, 4883), 'joblib.dump', 'joblib.dump', (['grid_result', 'f'], {}), '(grid_result, f)\n', (4867, 4883), False, 'import joblib\n'), ((4917, 4940), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (4930, 4940), True, 'import matplotlib.pyplot as plt\n'), ((5129, 5156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (5139, 5156), True, 'import matplotlib.pyplot as plt\n'), ((5161, 5181), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (5172, 5181), True, 'import matplotlib.pyplot as plt\n'), ((5186, 5229), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'acc', '"""b"""'], {'label': '"""Training acc"""'}), "(x, acc, 'b', label='Training acc')\n", (5194, 5229), True, 'import matplotlib.pyplot as plt\n'), ((5234, 5283), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_acc', '"""r"""'], {'label': '"""Validation acc"""'}), "(x, val_acc, 'r', label='Validation acc')\n", (5242, 5283), True, 'import matplotlib.pyplot as plt\n'), ((5288, 5333), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation accuracy"""'], {}), "('Training and validation accuracy')\n", (5297, 5333), True, 'import matplotlib.pyplot as plt\n'), ((5338, 5350), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5348, 5350), True, 'import matplotlib.pyplot as plt\n'), ((5355, 5375), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (5366, 5375), True, 'import matplotlib.pyplot as plt\n'), ((5380, 5425), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'loss', '"""b"""'], {'label': '"""Training loss"""'}), "(x, loss, 'b', label='Training loss')\n", (5388, 5425), True, 'import matplotlib.pyplot as plt\n'), ((5430, 5481), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_loss', '"""r"""'], {'label': '"""Validation loss"""'}), "(x, val_loss, 'r', label='Validation loss')\n", (5438, 5481), True, 'import matplotlib.pyplot as plt\n'), ((5486, 5527), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and validation loss"""'], {}), "('Training and validation loss')\n", (5495, 5527), True, 'import matplotlib.pyplot as plt\n'), ((5532, 5544), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5542, 5544), True, 'import matplotlib.pyplot as plt\n'), ((5549, 5559), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5557, 5559), True, 'import matplotlib.pyplot as plt\n'), ((6494, 6544), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (6504, 6544), True, 'import matplotlib.pyplot as plt\n'), ((6549, 6578), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(30)'}), '(title, fontsize=30)\n', (6558, 6578), True, 'import matplotlib.pyplot as plt\n'), ((6583, 6597), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6595, 6597), True, 'import matplotlib.pyplot as plt\n'), ((6643, 6700), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)', 'fontsize': '(22)'}), '(tick_marks, classes, rotation=45, fontsize=22)\n', (6653, 6700), True, 'import matplotlib.pyplot as plt\n'), ((6705, 6749), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {'fontsize': '(22)'}), '(tick_marks, classes, fontsize=22)\n', (6715, 6749), True, 'import matplotlib.pyplot as plt\n'), ((7033, 7070), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {'fontsize': '(25)'}), "('True label', fontsize=25)\n", (7043, 7070), True, 'import matplotlib.pyplot as plt\n'), ((7075, 7117), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {'fontsize': '(25)'}), "('Predicted label', fontsize=25)\n", (7085, 7117), True, 'import matplotlib.pyplot as plt\n'), ((7122, 7132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7130, 7132), True, 'import matplotlib.pyplot as plt\n'), ((3173, 3249), 'keras.layers.Embedding', 'Embedding', (['max_words', 'embedding_dim'], {'input_length': 'max_length', 'trainable': '(True)'}), '(max_words, embedding_dim, input_length=max_length, trainable=True)\n', (3182, 3249), False, 'from keras.layers import Dense, Activation, Dropout, Embedding, Conv1D, GlobalMaxPooling1D, Flatten, LSTM\n'), ((3337, 3388), 'keras.layers.Conv1D', 'Conv1D', (['num_filters', 'kernel_size'], {'activation': '"""relu"""'}), "(num_filters, kernel_size, activation='relu')\n", (3343, 3388), False, 'from keras.layers import Dense, Activation, Dropout, Embedding, Conv1D, GlobalMaxPooling1D, Flatten, LSTM\n'), ((3404, 3424), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (3422, 3424), False, 'from keras.layers import Dense, Activation, Dropout, Embedding, Conv1D, GlobalMaxPooling1D, Flatten, LSTM\n'), ((3440, 3479), 'keras.layers.Dense', 'Dense', (['embedding_dim'], {'activation': '"""relu"""'}), "(embedding_dim, activation='relu')\n", (3445, 3479), False, 'from keras.layers import Dense, Activation, Dropout, Embedding, Conv1D, GlobalMaxPooling1D, Flatten, LSTM\n'), ((3495, 3535), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (3500, 3535), False, 'from keras.layers import Dense, Activation, Dropout, Embedding, Conv1D, GlobalMaxPooling1D, Flatten, LSTM\n'), ((5725, 5749), 'numpy.argmax', 'np.argmax', (['prediction[i]'], {}), '(prediction[i])\n', (5734, 5749), True, 'import numpy as np\n'), ((3746, 3760), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3758, 3760), False, 'from datetime import datetime\n'), ((3857, 3871), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3869, 3871), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2017 Interstellar Technologies Inc. All Rights Reserved.
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics
class Rocket:
g0 = 1.0 # Gravity at surface [-]
def __init__(self):
self.H0 = 1.0 # Initial height
self.V0 = 0.0 # Initial velocity
self.M0 = 1.0 # Initial mass
self.Tc = 3.5 # Use for thrust
self.Hc = 500 # Use for drag
self.Vc = 620 # Use for drag
self.Mc = 0.6 # Fraction of initial mass left at end
self.c = 0.5 * np.sqrt(self.g0*self.H0) # Thrust-to-fuel mass
self.Mf = self.Mc * self.M0 # Final mass
self.Dc = 0.5 * self.Vc * self.M0 / self.g0 # Drag scaling
self.T_max = self.Tc * self.g0 * self.M0 # Maximum thrust
def dynamics(prob, obj, section):
h = prob.states(0, section)
v = prob.states(1, section)
m = prob.states(2, section)
T = prob.controls(0, section)
Dc = obj.Dc
c = obj.c
drag = 1 * Dc * v ** 2 * np.exp(-obj.Hc * (h - obj.H0) / obj.H0)
g = obj.g0 * (obj.H0 / h)**2
dx = Dynamics(prob, section)
dx[0] = v
dx[1] = (T - drag) / m - g
dx[2] = - T / c
return dx()
def equality(prob, obj):
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
tf = prob.time_final(-1)
result = Condition()
# event condition
result.equal(h[0], obj.H0)
result.equal(v[0], obj.V0)
result.equal(m[0], obj.M0)
result.equal(v[-1], 0.0)
result.equal(m[-1], obj.Mf)
return result()
def inequality(prob, obj):
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
tf = prob.time_final(-1)
result = Condition()
# lower bounds
result.lower_bound(h, obj.H0)
result.lower_bound(v, 0.0)
result.lower_bound(m, obj.Mf)
result.lower_bound(T, 0.0)
result.lower_bound(tf, 0.1)
# upper bounds
result.upper_bound(m, obj.M0)
result.upper_bound(T, obj.T_max)
return result()
def cost(prob, obj):
h = prob.states_all_section(0)
return -h[-1]
# ========================
plt.close("all")
# Program Starting Point
time_init = [0.0, 0.3]
n = [50]
num_states = [3]
num_controls = [1]
max_iteration = 30
flag_savefig = True
savefig_file = "04_Goddard/04_0knot_"
# ------------------------
# set OpenGoddard class for algorithm determination
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
# ------------------------
# create instance of operating object
# Nondimensionalization of parameters
obj = Rocket()
# ========================
# Initial parameter guess
# altitude profile
H_init = Guess.cubic(prob.time_all_section, 1.0, 0.0, 1.010, 0.0)
# Guess.plot(prob.time_all_section, H_init, "Altitude", "time", "Altitude")
# if(flag_savefig):plt.savefig(savefig_file + "guess_alt" + ".png")
# velocity
V_init = Guess.linear(prob.time_all_section, 0.0, 0.0)
# Guess.plot(prob.time_all_section, V_init, "Velocity", "time", "Velocity")
# mass profile
M_init = Guess.cubic(prob.time_all_section, 1.0, -0.6, 0.6, 0.0)
# Guess.plot(prob.time_all_section, M_init, "Mass", "time", "Mass")
# if(flag_savefig):plt.savefig(savefig_file + "guess_mass" + ".png")
# thrust profile
T_init = Guess.cubic(prob.time_all_section, 3.5, 0.0, 0.0, 0.0)
# Guess.plot(prob.time_all_section, T_init, "Thrust Guess", "time", "Thrust")
# if(flag_savefig):plt.savefig(savefig_file + "guess_thrust" + ".png")
plt.show()
# ========================
# Substitution initial value to parameter vector to be optimized
prob.set_states_all_section(0, H_init)
prob.set_states_all_section(1, V_init)
prob.set_states_all_section(2, M_init)
prob.set_controls_all_section(0, T_init)
# ========================
# Main Process
# Assign problem to SQP solver
prob.dynamics = [dynamics]
prob.knot_states_smooth = []
prob.cost = cost
prob.cost_derivative = None
prob.equality = equality
prob.inequality = inequality
def display_func():
h = prob.states_all_section(0)
print("max altitude: {0:.5f}".format(h[-1]))
prob.solve(obj, display_func, ftol=1e-10)
# ========================
# Post Process
# ------------------------
# Convert parameter vector to variable
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
time = prob.time_update()
# ------------------------
# Calculate necessary variables
Dc = 0.5 * 620 * 1.0 / 1.0
drag = 1 * Dc * v ** 2 * np.exp(-500 * (h - 1.0) / 1.0)
g = 1.0 * (1.0 / h)**2
# ------------------------
# Visualizetion
plt.figure()
plt.title("Altitude profile")
plt.plot(time, h, marker="o", label="Altitude")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Altitude [-]")
if(flag_savefig): plt.savefig(savefig_file + "altitude" + ".png")
plt.figure()
plt.title("Velocity")
plt.plot(time, v, marker="o", label="Velocity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Velocity [-]")
if(flag_savefig): plt.savefig(savefig_file + "velocity" + ".png")
plt.figure()
plt.title("Mass")
plt.plot(time, m, marker="o", label="Mass")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Mass [-]")
if(flag_savefig): plt.savefig(savefig_file + "mass" + ".png")
plt.figure()
plt.title("Thrust profile")
plt.plot(time, T, marker="o", label="Thrust")
plt.plot(time, drag, marker="o", label="Drag")
plt.plot(time, g, marker="o", label="Gravity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Thrust [-]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "force" + ".png")
plt.show()
|
[
"OpenGoddard.optimize.Guess.cubic",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"OpenGoddard.optimize.Problem",
"numpy.sqrt",
"OpenGoddard.optimize.Guess.linear",
"matplotlib.pyplot.ylabel",
"OpenGoddard.optimize.Dynamics",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"OpenGoddard.optimize.Condition",
"matplotlib.pyplot.figure",
"numpy.exp",
"matplotlib.pyplot.title",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((2361, 2377), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2370, 2377), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2698), 'OpenGoddard.optimize.Problem', 'Problem', (['time_init', 'n', 'num_states', 'num_controls', 'max_iteration'], {}), '(time_init, n, num_states, num_controls, max_iteration)\n', (2643, 2698), False, 'from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics\n'), ((2901, 2956), 'OpenGoddard.optimize.Guess.cubic', 'Guess.cubic', (['prob.time_all_section', '(1.0)', '(0.0)', '(1.01)', '(0.0)'], {}), '(prob.time_all_section, 1.0, 0.0, 1.01, 0.0)\n', (2912, 2956), False, 'from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics\n'), ((3123, 3168), 'OpenGoddard.optimize.Guess.linear', 'Guess.linear', (['prob.time_all_section', '(0.0)', '(0.0)'], {}), '(prob.time_all_section, 0.0, 0.0)\n', (3135, 3168), False, 'from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics\n'), ((3270, 3325), 'OpenGoddard.optimize.Guess.cubic', 'Guess.cubic', (['prob.time_all_section', '(1.0)', '(-0.6)', '(0.6)', '(0.0)'], {}), '(prob.time_all_section, 1.0, -0.6, 0.6, 0.0)\n', (3281, 3325), False, 'from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics\n'), ((3490, 3544), 'OpenGoddard.optimize.Guess.cubic', 'Guess.cubic', (['prob.time_all_section', '(3.5)', '(0.0)', '(0.0)', '(0.0)'], {}), '(prob.time_all_section, 3.5, 0.0, 0.0, 0.0)\n', (3501, 3544), False, 'from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics\n'), ((3695, 3705), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3703, 3705), True, 'import matplotlib.pyplot as plt\n'), ((4806, 4818), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4816, 4818), True, 'import matplotlib.pyplot as plt\n'), ((4819, 4848), 'matplotlib.pyplot.title', 'plt.title', (['"""Altitude profile"""'], {}), "('Altitude profile')\n", (4828, 4848), True, 'import matplotlib.pyplot as plt\n'), ((4849, 4896), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'h'], {'marker': '"""o"""', 'label': '"""Altitude"""'}), "(time, h, marker='o', label='Altitude')\n", (4857, 4896), True, 'import matplotlib.pyplot as plt\n'), ((4972, 4982), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4980, 4982), True, 'import matplotlib.pyplot as plt\n'), ((4983, 5005), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (4993, 5005), True, 'import matplotlib.pyplot as plt\n'), ((5006, 5032), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude [-]"""'], {}), "('Altitude [-]')\n", (5016, 5032), True, 'import matplotlib.pyplot as plt\n'), ((5100, 5112), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5110, 5112), True, 'import matplotlib.pyplot as plt\n'), ((5113, 5134), 'matplotlib.pyplot.title', 'plt.title', (['"""Velocity"""'], {}), "('Velocity')\n", (5122, 5134), True, 'import matplotlib.pyplot as plt\n'), ((5135, 5182), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'v'], {'marker': '"""o"""', 'label': '"""Velocity"""'}), "(time, v, marker='o', label='Velocity')\n", (5143, 5182), True, 'import matplotlib.pyplot as plt\n'), ((5258, 5268), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5266, 5268), True, 'import matplotlib.pyplot as plt\n'), ((5269, 5291), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (5279, 5291), True, 'import matplotlib.pyplot as plt\n'), ((5292, 5318), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity [-]"""'], {}), "('Velocity [-]')\n", (5302, 5318), True, 'import matplotlib.pyplot as plt\n'), ((5386, 5398), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5396, 5398), True, 'import matplotlib.pyplot as plt\n'), ((5399, 5416), 'matplotlib.pyplot.title', 'plt.title', (['"""Mass"""'], {}), "('Mass')\n", (5408, 5416), True, 'import matplotlib.pyplot as plt\n'), ((5417, 5460), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'm'], {'marker': '"""o"""', 'label': '"""Mass"""'}), "(time, m, marker='o', label='Mass')\n", (5425, 5460), True, 'import matplotlib.pyplot as plt\n'), ((5536, 5546), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5544, 5546), True, 'import matplotlib.pyplot as plt\n'), ((5547, 5569), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (5557, 5569), True, 'import matplotlib.pyplot as plt\n'), ((5570, 5592), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mass [-]"""'], {}), "('Mass [-]')\n", (5580, 5592), True, 'import matplotlib.pyplot as plt\n'), ((5656, 5668), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5666, 5668), True, 'import matplotlib.pyplot as plt\n'), ((5669, 5696), 'matplotlib.pyplot.title', 'plt.title', (['"""Thrust profile"""'], {}), "('Thrust profile')\n", (5678, 5696), True, 'import matplotlib.pyplot as plt\n'), ((5697, 5742), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'T'], {'marker': '"""o"""', 'label': '"""Thrust"""'}), "(time, T, marker='o', label='Thrust')\n", (5705, 5742), True, 'import matplotlib.pyplot as plt\n'), ((5743, 5789), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'drag'], {'marker': '"""o"""', 'label': '"""Drag"""'}), "(time, drag, marker='o', label='Drag')\n", (5751, 5789), True, 'import matplotlib.pyplot as plt\n'), ((5790, 5836), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'g'], {'marker': '"""o"""', 'label': '"""Gravity"""'}), "(time, g, marker='o', label='Gravity')\n", (5798, 5836), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5922), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5920, 5922), True, 'import matplotlib.pyplot as plt\n'), ((5923, 5945), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [s]"""'], {}), "('time [s]')\n", (5933, 5945), True, 'import matplotlib.pyplot as plt\n'), ((5946, 5970), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Thrust [-]"""'], {}), "('Thrust [-]')\n", (5956, 5970), True, 'import matplotlib.pyplot as plt\n'), ((5971, 5993), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5981, 5993), True, 'import matplotlib.pyplot as plt\n'), ((6058, 6068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6066, 6068), True, 'import matplotlib.pyplot as plt\n'), ((1211, 1234), 'OpenGoddard.optimize.Dynamics', 'Dynamics', (['prob', 'section'], {}), '(prob, section)\n', (1219, 1234), False, 'from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics\n'), ((1528, 1539), 'OpenGoddard.optimize.Condition', 'Condition', ([], {}), '()\n', (1537, 1539), False, 'from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics\n'), ((1952, 1963), 'OpenGoddard.optimize.Condition', 'Condition', ([], {}), '()\n', (1961, 1963), False, 'from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics\n'), ((4708, 4738), 'numpy.exp', 'np.exp', (['(-500 * (h - 1.0) / 1.0)'], {}), '(-500 * (h - 1.0) / 1.0)\n', (4714, 4738), True, 'import numpy as np\n'), ((4932, 4971), 'matplotlib.pyplot.axvline', 'plt.axvline', (['line'], {'color': '"""k"""', 'alpha': '(0.5)'}), "(line, color='k', alpha=0.5)\n", (4943, 4971), True, 'import matplotlib.pyplot as plt\n'), ((5051, 5098), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savefig_file + 'altitude' + '.png')"], {}), "(savefig_file + 'altitude' + '.png')\n", (5062, 5098), True, 'import matplotlib.pyplot as plt\n'), ((5218, 5257), 'matplotlib.pyplot.axvline', 'plt.axvline', (['line'], {'color': '"""k"""', 'alpha': '(0.5)'}), "(line, color='k', alpha=0.5)\n", (5229, 5257), True, 'import matplotlib.pyplot as plt\n'), ((5337, 5384), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savefig_file + 'velocity' + '.png')"], {}), "(savefig_file + 'velocity' + '.png')\n", (5348, 5384), True, 'import matplotlib.pyplot as plt\n'), ((5496, 5535), 'matplotlib.pyplot.axvline', 'plt.axvline', (['line'], {'color': '"""k"""', 'alpha': '(0.5)'}), "(line, color='k', alpha=0.5)\n", (5507, 5535), True, 'import matplotlib.pyplot as plt\n'), ((5611, 5654), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savefig_file + 'mass' + '.png')"], {}), "(savefig_file + 'mass' + '.png')\n", (5622, 5654), True, 'import matplotlib.pyplot as plt\n'), ((5872, 5911), 'matplotlib.pyplot.axvline', 'plt.axvline', (['line'], {'color': '"""k"""', 'alpha': '(0.5)'}), "(line, color='k', alpha=0.5)\n", (5883, 5911), True, 'import matplotlib.pyplot as plt\n'), ((6012, 6056), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savefig_file + 'force' + '.png')"], {}), "(savefig_file + 'force' + '.png')\n", (6023, 6056), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1167), 'numpy.exp', 'np.exp', (['(-obj.Hc * (h - obj.H0) / obj.H0)'], {}), '(-obj.Hc * (h - obj.H0) / obj.H0)\n', (1134, 1167), True, 'import numpy as np\n'), ((653, 679), 'numpy.sqrt', 'np.sqrt', (['(self.g0 * self.H0)'], {}), '(self.g0 * self.H0)\n', (660, 679), True, 'import numpy as np\n')]
|
import os
import re
import json
from os.path import expanduser
import zipfile
import datetime
import tensorflow as tf
import numpy as np
from utils import mkdir_p
from inoutdoor_dataset_download import InoutdoorDatasetDownload
from inoutdoor_versions import *
from tf_features import *
from PIL import Image
class InoutdoorDatasetWriter(object):
feature_dict = {
'image/height': None,
'image/width': None,
'image/object/bbox/id': None,
'image/object/bbox/xmin': None,
'image/object/bbox/xmax': None,
'image/object/bbox/ymin': None,
'image/object/bbox/ymax': None,
'image/object/bbox/truncated': None,
'image/object/bbox/occluded': None,
'image/object/class/label/name': None,
'image/object/class/label/id': None,
'image/object/class/label': None,
'image/format': None,
'image/id': None,
'image/source_id': None,
'image/filename': None,
# new
'image/object/class/text': None,
'image/rgb/encoded': None,
'image/depth/encoded': None,
'image/encoded': None,
'image/depth': None,
'boxes/length': None,
}
def get_image_sets(self):
imagesets = dict()
for f in os.listdir(self.image_set_definition_path):
# check if it is a file
if not os.path.isfile(os.path.join(
self.image_set_definition_path, f)):
continue
imagesets[f] = []
with open(os.path.join(
self.image_set_definition_path, f), 'r') as setfile:
for line in setfile.readlines():
imagesets[f].append(
line if not line.endswith('\n') else line[:-1]
)
return imagesets
def __init__(self):
self.input_path = os.path.join(expanduser('~'), 'dataset', 'inoutdoorpeoplergbd')
assert (os.path.exists(self.input_path))
expected_paths = ['Images', 'Depth', 'Annotations', 'ImageSets']
for path in expected_paths:
if not os.path.exists(os.path.join(self.input_path, path)):
raise ValueError('Expected subdirectory {0} does not exist. {1}'.format(
path, os.path.join(self.input_path, path))
)
self.tracking_path = os.path.join(self.input_path, 'Annotations')
self.rgb_path = os.path.join(self.input_path, 'Images')
self.depth_path = os.path.join(self.input_path, 'DepthJet')
self.image_set_definition_path = os.path.join(self.input_path, 'ImageSets')
self.dataset_path = self.input_path
self.image_sets = self.get_image_sets()
@staticmethod
def feature_dict_description(type='feature_dict'):
"""
Get the feature dict. In the default case it is filled with all the keys and the items set to None. If the
type=reading_shape the shape description required for reading elements from a tfrecord is returned)
:param type: (anything = returns the feature_dict with empty elements, reading_shape = element description for
reading the tfrecord files is returned)
:return:
"""
obj = InoutdoorDatasetWriter.feature_dict
if type == 'reading_shape':
obj['image/height'] = tf.FixedLenFeature((), tf.int64, 1)
obj['image/width'] = tf.FixedLenFeature((), tf.int64, 1)
obj['image/object/bbox/id'] = tf.VarLenFeature(tf.int64)
obj['image/object/bbox/xmin'] = tf.VarLenFeature(tf.float32)
obj['image/object/bbox/xmax'] = tf.VarLenFeature(tf.float32)
obj['image/object/bbox/ymin'] = tf.VarLenFeature(tf.float32)
obj['image/object/bbox/ymax'] = tf.VarLenFeature(tf.float32)
obj['image/object/bbox/truncated'] = tf.VarLenFeature(tf.string)
obj['image/object/bbox/occluded'] = tf.VarLenFeature(tf.string)
obj['image/encoded'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/format'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/filename'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/id'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/source_id'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/object/class/label/id'] = tf.VarLenFeature(tf.int64)
obj['image/object/class/label'] = tf.VarLenFeature(tf.int64)
obj['image/object/class/label/name'] = tf.VarLenFeature(tf.string)
#
obj['image/object/class/label'] = tf.VarLenFeature(tf.int64)
obj['image/object/class/text'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/rgb/encoded'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/depth/encoded'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/encoded'] = tf.FixedLenFeature((), tf.string, default_value='')
obj['image/depth'] = tf.FixedLenFeature((), tf.int64, 1)
obj['boxes/length'] = tf.FixedLenFeature((), tf.int64, 1)
return obj
def unzip_file_to_folder(self, filename, folder, remove_file_after_creating=True):
assert(os.path.exists(filename) and os.path.isfile(filename))
assert(os.path.exists(folder) and os.path.isdir(folder))
with zipfile.ZipFile(filename, 'r') as zf:
zf.extractall(folder)
if remove_file_after_creating:
print('\nRemoving file: {0}'.format(filename))
os.remove(folder)
def get_image_label_folder(self, fold_type=None, version=None):
"""
Returns the folder containing all images and the folder containing all label information
:param fold_type:
:param version:
:return: Raises BaseExceptions if expectations are not fulfilled
"""
download_folder = os.path.join(self.input_path, 'download')
expansion_images_folder = os.path.join(self.input_path, 'Images')
expansion_depthjet_folder = os.path.join(self.input_path, 'DepthJet')
expansion_labels_folder = os.path.join(self.input_path, 'Annotations')
#
if not os.path.exists(expansion_images_folder):
mkdir_p(expansion_images_folder)
if not os.path.exists(expansion_depthjet_folder):
mkdir_p(expansion_depthjet_folder)
if not os.path.exists(expansion_labels_folder):
mkdir_p(expansion_labels_folder)
full_images_path = expansion_images_folder
full_depthjet_path = expansion_depthjet_folder
full_labels_path = expansion_labels_folder
extract_files = True
if len(InoutdoorDatasetDownload.filter_files(full_labels_path)) == \
len(InoutdoorDatasetDownload.filter_files(full_images_path)):
print('Do not check the download folder. Pictures seem to exist.')
extract_files = False
elif os.path.exists(download_folder):
raise BaseException('not yet implemented')
# files_in_directory = InoutdoorDatasetDownload.filter_files(
# download_folder, False, re.compile('\.zip$'))
# if len(files_in_directory) < 2:
# raise BaseException('Not enough files found in {0}. All files present: {1}'.format(
# download_folder, files_in_directory
# ))
else:
mkdir_p(download_folder)
raise BaseException('Download folder: {0} did not exist. It had been created. '
'Please put images, labels there.'.format(download_folder))
# unzip the elements
if extract_files:
print('Starting to unzip the files')
raise BaseException('Starting to unzip the files')
if fold_type == 'test':
return full_images_path, full_depthjet_path, None
return full_images_path, full_depthjet_path, full_labels_path
def _get_boundingboxes(self, annotations_for_picture_id):
boxid, xmin, xmax, ymin, ymax, label_id, label, truncated, occluded = \
[], [], [], [], [], [], [], [], []
if annotations_for_picture_id is None:
return boxid, xmin, xmax, ymin, ymax, label_id, label, truncated, occluded
for i, object in enumerate(annotations_for_picture_id.get('object', [])):
if 'bndbox' not in object:
continue
boxid.append(i)
xmin.append(float(object['bndbox']['xmin']))
xmax.append(float(object['bndbox']['xmax']))
ymin.append(float(object['bndbox']['ymin']))
ymax.append(float(object['bndbox']['ymax']))
label.append(object['name'])
label_id.append(INOUTDOOR_LABELS.index(object['name']) + 1)
truncated.append(False)
occluded.append(False)
return boxid, xmin, xmax, ymin, ymax, label_id, label, truncated, occluded
def _get_tf_feature_dict(self, image_id, image_path, image_format, annotations):
assert(isinstance(image_path, dict))
boxid, xmin, xmax, ymin, ymax, label_id, label, truncated, occluded = \
self._get_boundingboxes(annotations)
truncated = np.asarray(truncated)
occluded = np.asarray(occluded)
# convert things to bytes
label_bytes = [tf.compat.as_bytes(l) for l in label]
default_image_path = image_path['rgb'] \
if image_path.get('rgb', None) is not None \
else image_path['depth']
im = Image.open(default_image_path)
image_width, image_height = im.size
image_filename = os.path.basename(default_image_path)
xmin = [x / float(image_width) for x in xmin]
xmax = [x / float(image_width) for x in xmax]
ymin = [y / float(image_height) for y in ymin]
ymax = [y / float(image_height) for y in ymax]
image_fileid = re.search('^(.*)(\.png)$', image_filename).group(1)
assert(image_fileid == image_id)
tmp_feat_dict = InoutdoorDatasetWriter.feature_dict
tmp_feat_dict['image/id'] = bytes_feature(image_fileid)
tmp_feat_dict['image/source_id'] = bytes_feature(image_fileid)
tmp_feat_dict['image/height'] = int64_feature(image_height)
tmp_feat_dict['image/width'] = int64_feature(image_width)
tmp_feat_dict['image/depth'] = int64_feature([3])
for key, item in image_path.items():
if item is None:
continue
with open(item, 'rb') as f:
tmp_feat_dict['image/{0}/encoded'.format(key)] = bytes_feature(f.read())
tmp_feat_dict['image/format'] = bytes_feature(image_format)
tmp_feat_dict['image/filename'] = bytes_feature(image_filename)
tmp_feat_dict['image/object/bbox/id'] = int64_feature(boxid)
tmp_feat_dict['image/object/bbox/xmin'] = float_feature(xmin)
tmp_feat_dict['image/object/bbox/xmax'] = float_feature(xmax)
tmp_feat_dict['image/object/bbox/ymin'] = float_feature(ymin)
tmp_feat_dict['image/object/bbox/ymax'] = float_feature(ymax)
tmp_feat_dict['image/object/bbox/truncated'] = bytes_feature(
truncated.tobytes())
tmp_feat_dict['image/object/bbox/occluded'] = bytes_feature(
occluded.tobytes())
tmp_feat_dict['image/object/class/label/id'] = int64_feature(label_id)
tmp_feat_dict['image/object/class/label'] = int64_feature(label_id)
tmp_feat_dict['image/object/class/label/name'] = bytes_feature(
label_bytes)
items_to_remove = [
key for key, item in tmp_feat_dict.items() if item is None
]
for it in items_to_remove:
del tmp_feat_dict[it]
return tmp_feat_dict
def _get_tf_feature(self, image_id, image_path, image_format, annotations):
feature_dict = self._get_tf_feature_dict(
image_id, image_path, image_format, annotations)
return tf.train.Features(feature=feature_dict)
def write_tfrecord(self, fold_type=None, version=None,
max_elements_per_file=1000, maximum_files_to_write=None,
write_masks=False):
assert(version is None or version in ['rgb', 'depth', 'both'])
assert(fold_type in self.image_sets.keys())
assert(fold_type is not None and
re.match('^(seq\d)\.txt$', fold_type))
if version is None:
version = 'rgb'
sequence_type = re.match('^(seq\d)\.txt$', fold_type).group(1)
output_path = os.path.join(self.input_path, 'tfrecord')
if not os.path.exists(output_path):
mkdir_p(output_path)
full_images_path, full_depthjet_path, full_labels_path = \
self.get_image_label_folder(fold_type, version)
def get_annotation(picture_id):
if full_labels_path is None:
return None
with open(os.path.join(
full_labels_path, picture_id + '.yml'), 'r') as f:
import yaml
obj = yaml.load(f.read())
obj_annotation = obj['annotation']
return obj_annotation
image_filename_regex = re.compile('^(.*)\.(png)$')
tfrecord_file_id, writer = 0, None
tfrecord_filename_template = os.path.join(
output_path,
'output_modality_{modality}_'
'sequence_{version}_'
'split_{{iteration:06d}}.tfrecord'.format(
modality=version,
version=sequence_type
))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
files_written = 0
for _, f in enumerate(self.image_sets[fold_type]):
f = '{0}.png'.format(f)
if files_written % max_elements_per_file == 0:
if writer is not None:
writer.close()
tfrecord_file_id += 1
tmp_filename_tfrecord = tfrecord_filename_template.format(
iteration=tfrecord_file_id)
print('{0}: Create TFRecord filename: {1} after '
'processing {2}/{3} files'.format(
str(datetime.datetime.now()), tmp_filename_tfrecord,
files_written, len(self.image_sets[fold_type])
))
writer = tf.python_io.TFRecordWriter(
tmp_filename_tfrecord
)
if files_written % 250 == 0:
print('\t{0}: Processed file: {1}/{2}'.format(
str(datetime.datetime.now()),
files_written, len(self.image_sets[fold_type])))
# match the filename with the regex
m = image_filename_regex.search(f)
if m is None:
print('Filename did not match regex: {0}'.format(f))
continue
picture_id = m.group(1)
picture_id_annotations = get_annotation(picture_id)
filenames = {'rgb': None, 'depth': None}
if version == 'rgb' or version is None:
filenames['rgb'] = os.path.join(full_images_path, f)
elif version == 'depth':
filenames['depth'] = os.path.join(full_depthjet_path, f)
else:
filenames = {
'rgb': os.path.join(full_images_path, f),
'depth': os.path.join(full_depthjet_path, f)
}
feature = self._get_tf_feature(
picture_id, filenames, m.group(2), picture_id_annotations)
example = tf.train.Example(features=feature)
writer.write(example.SerializeToString())
if maximum_files_to_write is not None:
if files_written < maximum_files_to_write:
break
files_written += 1
# Close the last files
if writer is not None:
writer.close()
|
[
"zipfile.ZipFile",
"re.compile",
"utils.mkdir_p",
"tensorflow.compat.as_bytes",
"os.remove",
"re.search",
"os.path.exists",
"os.listdir",
"tensorflow.train.Example",
"tensorflow.Session",
"numpy.asarray",
"os.path.isdir",
"tensorflow.python_io.TFRecordWriter",
"os.path.expanduser",
"tensorflow.VarLenFeature",
"re.match",
"os.path.isfile",
"tensorflow.train.Features",
"PIL.Image.open",
"os.path.join",
"tensorflow.global_variables_initializer",
"datetime.datetime.now",
"os.path.basename",
"inoutdoor_dataset_download.InoutdoorDatasetDownload.filter_files",
"tensorflow.FixedLenFeature"
] |
[((1270, 1312), 'os.listdir', 'os.listdir', (['self.image_set_definition_path'], {}), '(self.image_set_definition_path)\n', (1280, 1312), False, 'import os\n'), ((1958, 1989), 'os.path.exists', 'os.path.exists', (['self.input_path'], {}), '(self.input_path)\n', (1972, 1989), False, 'import os\n'), ((2372, 2416), 'os.path.join', 'os.path.join', (['self.input_path', '"""Annotations"""'], {}), "(self.input_path, 'Annotations')\n", (2384, 2416), False, 'import os\n'), ((2441, 2480), 'os.path.join', 'os.path.join', (['self.input_path', '"""Images"""'], {}), "(self.input_path, 'Images')\n", (2453, 2480), False, 'import os\n'), ((2507, 2548), 'os.path.join', 'os.path.join', (['self.input_path', '"""DepthJet"""'], {}), "(self.input_path, 'DepthJet')\n", (2519, 2548), False, 'import os\n'), ((2590, 2632), 'os.path.join', 'os.path.join', (['self.input_path', '"""ImageSets"""'], {}), "(self.input_path, 'ImageSets')\n", (2602, 2632), False, 'import os\n'), ((6020, 6061), 'os.path.join', 'os.path.join', (['self.input_path', '"""download"""'], {}), "(self.input_path, 'download')\n", (6032, 6061), False, 'import os\n'), ((6096, 6135), 'os.path.join', 'os.path.join', (['self.input_path', '"""Images"""'], {}), "(self.input_path, 'Images')\n", (6108, 6135), False, 'import os\n'), ((6172, 6213), 'os.path.join', 'os.path.join', (['self.input_path', '"""DepthJet"""'], {}), "(self.input_path, 'DepthJet')\n", (6184, 6213), False, 'import os\n'), ((6248, 6292), 'os.path.join', 'os.path.join', (['self.input_path', '"""Annotations"""'], {}), "(self.input_path, 'Annotations')\n", (6260, 6292), False, 'import os\n'), ((9376, 9397), 'numpy.asarray', 'np.asarray', (['truncated'], {}), '(truncated)\n', (9386, 9397), True, 'import numpy as np\n'), ((9417, 9437), 'numpy.asarray', 'np.asarray', (['occluded'], {}), '(occluded)\n', (9427, 9437), True, 'import numpy as np\n'), ((9692, 9722), 'PIL.Image.open', 'Image.open', (['default_image_path'], {}), '(default_image_path)\n', (9702, 9722), False, 'from PIL import Image\n'), ((9792, 9828), 'os.path.basename', 'os.path.basename', (['default_image_path'], {}), '(default_image_path)\n', (9808, 9828), False, 'import os\n'), ((12144, 12183), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature_dict'}), '(feature=feature_dict)\n', (12161, 12183), True, 'import tensorflow as tf\n'), ((12734, 12775), 'os.path.join', 'os.path.join', (['self.input_path', '"""tfrecord"""'], {}), "(self.input_path, 'tfrecord')\n", (12746, 12775), False, 'import os\n'), ((13390, 13418), 're.compile', 're.compile', (['"""^(.*)\\\\.(png)$"""'], {}), "('^(.*)\\\\.(png)$')\n", (13400, 13418), False, 'import re\n'), ((1891, 1906), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (1901, 1906), False, 'from os.path import expanduser\n'), ((3350, 3385), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64', '(1)'], {}), '((), tf.int64, 1)\n', (3368, 3385), True, 'import tensorflow as tf\n'), ((3419, 3454), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64', '(1)'], {}), '((), tf.int64, 1)\n', (3437, 3454), True, 'import tensorflow as tf\n'), ((3497, 3523), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.int64'], {}), '(tf.int64)\n', (3513, 3523), True, 'import tensorflow as tf\n'), ((3568, 3596), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3584, 3596), True, 'import tensorflow as tf\n'), ((3641, 3669), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3657, 3669), True, 'import tensorflow as tf\n'), ((3714, 3742), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3730, 3742), True, 'import tensorflow as tf\n'), ((3787, 3815), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.float32'], {}), '(tf.float32)\n', (3803, 3815), True, 'import tensorflow as tf\n'), ((3865, 3892), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.string'], {}), '(tf.string)\n', (3881, 3892), True, 'import tensorflow as tf\n'), ((3941, 3968), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.string'], {}), '(tf.string)\n', (3957, 3968), True, 'import tensorflow as tf\n'), ((4004, 4055), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (4022, 4055), True, 'import tensorflow as tf\n'), ((4090, 4141), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (4108, 4141), True, 'import tensorflow as tf\n'), ((4178, 4229), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (4196, 4229), True, 'import tensorflow as tf\n'), ((4260, 4311), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (4278, 4311), True, 'import tensorflow as tf\n'), ((4349, 4400), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (4367, 4400), True, 'import tensorflow as tf\n'), ((4450, 4476), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.int64'], {}), '(tf.int64)\n', (4466, 4476), True, 'import tensorflow as tf\n'), ((4523, 4549), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.int64'], {}), '(tf.int64)\n', (4539, 4549), True, 'import tensorflow as tf\n'), ((4601, 4628), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.string'], {}), '(tf.string)\n', (4617, 4628), True, 'import tensorflow as tf\n'), ((4689, 4715), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', (['tf.int64'], {}), '(tf.int64)\n', (4705, 4715), True, 'import tensorflow as tf\n'), ((4761, 4812), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (4779, 4812), True, 'import tensorflow as tf\n'), ((4852, 4903), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (4870, 4903), True, 'import tensorflow as tf\n'), ((4945, 4996), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (4963, 4996), True, 'import tensorflow as tf\n'), ((5032, 5083), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (5050, 5083), True, 'import tensorflow as tf\n'), ((5118, 5153), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64', '(1)'], {}), '((), tf.int64, 1)\n', (5136, 5153), True, 'import tensorflow as tf\n'), ((5188, 5223), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.int64', '(1)'], {}), '((), tf.int64, 1)\n', (5206, 5223), True, 'import tensorflow as tf\n'), ((5347, 5371), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (5361, 5371), False, 'import os\n'), ((5376, 5400), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (5390, 5400), False, 'import os\n'), ((5417, 5439), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (5431, 5439), False, 'import os\n'), ((5444, 5465), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (5457, 5465), False, 'import os\n'), ((5480, 5510), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (5495, 5510), False, 'import zipfile\n'), ((5662, 5679), 'os.remove', 'os.remove', (['folder'], {}), '(folder)\n', (5671, 5679), False, 'import os\n'), ((6318, 6357), 'os.path.exists', 'os.path.exists', (['expansion_images_folder'], {}), '(expansion_images_folder)\n', (6332, 6357), False, 'import os\n'), ((6371, 6403), 'utils.mkdir_p', 'mkdir_p', (['expansion_images_folder'], {}), '(expansion_images_folder)\n', (6378, 6403), False, 'from utils import mkdir_p\n'), ((6419, 6460), 'os.path.exists', 'os.path.exists', (['expansion_depthjet_folder'], {}), '(expansion_depthjet_folder)\n', (6433, 6460), False, 'import os\n'), ((6474, 6508), 'utils.mkdir_p', 'mkdir_p', (['expansion_depthjet_folder'], {}), '(expansion_depthjet_folder)\n', (6481, 6508), False, 'from utils import mkdir_p\n'), ((6524, 6563), 'os.path.exists', 'os.path.exists', (['expansion_labels_folder'], {}), '(expansion_labels_folder)\n', (6538, 6563), False, 'import os\n'), ((6577, 6609), 'utils.mkdir_p', 'mkdir_p', (['expansion_labels_folder'], {}), '(expansion_labels_folder)\n', (6584, 6609), False, 'from utils import mkdir_p\n'), ((7079, 7110), 'os.path.exists', 'os.path.exists', (['download_folder'], {}), '(download_folder)\n', (7093, 7110), False, 'import os\n'), ((9496, 9517), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['l'], {}), '(l)\n', (9514, 9517), True, 'import tensorflow as tf\n'), ((12546, 12585), 're.match', 're.match', (['"""^(seq\\\\d)\\\\.txt$"""', 'fold_type'], {}), "('^(seq\\\\d)\\\\.txt$', fold_type)\n", (12554, 12585), False, 'import re\n'), ((12792, 12819), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (12806, 12819), False, 'import os\n'), ((12833, 12853), 'utils.mkdir_p', 'mkdir_p', (['output_path'], {}), '(output_path)\n', (12840, 12853), False, 'from utils import mkdir_p\n'), ((13768, 13780), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13778, 13780), True, 'import tensorflow as tf\n'), ((6813, 6868), 'inoutdoor_dataset_download.InoutdoorDatasetDownload.filter_files', 'InoutdoorDatasetDownload.filter_files', (['full_labels_path'], {}), '(full_labels_path)\n', (6850, 6868), False, 'from inoutdoor_dataset_download import InoutdoorDatasetDownload\n'), ((6895, 6950), 'inoutdoor_dataset_download.InoutdoorDatasetDownload.filter_files', 'InoutdoorDatasetDownload.filter_files', (['full_images_path'], {}), '(full_images_path)\n', (6932, 6950), False, 'from inoutdoor_dataset_download import InoutdoorDatasetDownload\n'), ((7558, 7582), 'utils.mkdir_p', 'mkdir_p', (['download_folder'], {}), '(download_folder)\n', (7565, 7582), False, 'from utils import mkdir_p\n'), ((10072, 10115), 're.search', 're.search', (['"""^(.*)(\\\\.png)$"""', 'image_filename'], {}), "('^(.*)(\\\\.png)$', image_filename)\n", (10081, 10115), False, 'import re\n'), ((12665, 12704), 're.match', 're.match', (['"""^(seq\\\\d)\\\\.txt$"""', 'fold_type'], {}), "('^(seq\\\\d)\\\\.txt$', fold_type)\n", (12673, 12704), False, 'import re\n'), ((13811, 13844), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (13842, 13844), True, 'import tensorflow as tf\n'), ((15984, 16018), 'tensorflow.train.Example', 'tf.train.Example', ([], {'features': 'feature'}), '(features=feature)\n', (16000, 16018), True, 'import tensorflow as tf\n'), ((1384, 1431), 'os.path.join', 'os.path.join', (['self.image_set_definition_path', 'f'], {}), '(self.image_set_definition_path, f)\n', (1396, 1431), False, 'import os\n'), ((1532, 1579), 'os.path.join', 'os.path.join', (['self.image_set_definition_path', 'f'], {}), '(self.image_set_definition_path, f)\n', (1544, 1579), False, 'import os\n'), ((2135, 2170), 'os.path.join', 'os.path.join', (['self.input_path', 'path'], {}), '(self.input_path, path)\n', (2147, 2170), False, 'import os\n'), ((13114, 13165), 'os.path.join', 'os.path.join', (['full_labels_path', "(picture_id + '.yml')"], {}), "(full_labels_path, picture_id + '.yml')\n", (13126, 13165), False, 'import os\n'), ((14632, 14682), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['tmp_filename_tfrecord'], {}), '(tmp_filename_tfrecord)\n', (14659, 14682), True, 'import tensorflow as tf\n'), ((15465, 15498), 'os.path.join', 'os.path.join', (['full_images_path', 'f'], {}), '(full_images_path, f)\n', (15477, 15498), False, 'import os\n'), ((2288, 2323), 'os.path.join', 'os.path.join', (['self.input_path', 'path'], {}), '(self.input_path, path)\n', (2300, 2323), False, 'import os\n'), ((15581, 15616), 'os.path.join', 'os.path.join', (['full_depthjet_path', 'f'], {}), '(full_depthjet_path, f)\n', (15593, 15616), False, 'import os\n'), ((15704, 15737), 'os.path.join', 'os.path.join', (['full_images_path', 'f'], {}), '(full_images_path, f)\n', (15716, 15737), False, 'import os\n'), ((15772, 15807), 'os.path.join', 'os.path.join', (['full_depthjet_path', 'f'], {}), '(full_depthjet_path, f)\n', (15784, 15807), False, 'import os\n'), ((14460, 14483), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14481, 14483), False, 'import datetime\n'), ((14869, 14892), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (14890, 14892), False, 'import datetime\n')]
|
import numpy as np
from arbol import aprint
from dexp.processing.utils.scatter_gather_i2v import scatter_gather_i2v
from dexp.utils.backends import Backend
from dexp.utils.testing.testing import execute_both_backends
from dexp.utils.timeit import timeit
@execute_both_backends
def test_scatter_gather_i2v(ndim=3, length_xy=128, splits=4):
xp = Backend.get_xp_module()
rng = np.random.default_rng()
image1 = rng.uniform(0, 1, size=(length_xy,) * ndim)
image2 = rng.uniform(0, 1, size=(length_xy,) * ndim)
def f(x, y):
return xp.stack([x.min(), x.max()]), xp.stack([y.max(), y.mean(), y.min()])
with timeit("scatter_gather(f)"):
chunks = (length_xy // splits,) * ndim
result1, result2 = scatter_gather_i2v(f, (image1, image2), tiles=chunks, margins=8)
assert result1.ndim == ndim + 1
assert result2.ndim == ndim + 1
assert result1.shape[:-1] == result2.shape[:-1]
assert result1.shape[-1] == 2
assert result2.shape[-1] == 3
result1 -= (0, 1) # expected stats from uniform distribution
result1 = Backend.to_numpy(result1)
error = np.linalg.norm(result1.ravel(), ord=1) / result1.size
aprint(f"Error = {error}")
assert error < 0.001
result2 -= (1, 0.5, 0) # expected stats from uniform distribution
result2 = Backend.to_numpy(result2)
error = np.linalg.norm(result2.ravel(), ord=1) / result2.size
aprint(f"Error = {error}")
assert error < 0.001
|
[
"dexp.utils.backends.Backend.get_xp_module",
"numpy.random.default_rng",
"dexp.utils.backends.Backend.to_numpy",
"arbol.aprint",
"dexp.processing.utils.scatter_gather_i2v.scatter_gather_i2v",
"dexp.utils.timeit.timeit"
] |
[((351, 374), 'dexp.utils.backends.Backend.get_xp_module', 'Backend.get_xp_module', ([], {}), '()\n', (372, 374), False, 'from dexp.utils.backends import Backend\n'), ((385, 408), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (406, 408), True, 'import numpy as np\n'), ((1079, 1104), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['result1'], {}), '(result1)\n', (1095, 1104), False, 'from dexp.utils.backends import Backend\n'), ((1175, 1201), 'arbol.aprint', 'aprint', (['f"""Error = {error}"""'], {}), "(f'Error = {error}')\n", (1181, 1201), False, 'from arbol import aprint\n'), ((1313, 1338), 'dexp.utils.backends.Backend.to_numpy', 'Backend.to_numpy', (['result2'], {}), '(result2)\n', (1329, 1338), False, 'from dexp.utils.backends import Backend\n'), ((1409, 1435), 'arbol.aprint', 'aprint', (['f"""Error = {error}"""'], {}), "(f'Error = {error}')\n", (1415, 1435), False, 'from arbol import aprint\n'), ((636, 663), 'dexp.utils.timeit.timeit', 'timeit', (['"""scatter_gather(f)"""'], {}), "('scatter_gather(f)')\n", (642, 663), False, 'from dexp.utils.timeit import timeit\n'), ((739, 803), 'dexp.processing.utils.scatter_gather_i2v.scatter_gather_i2v', 'scatter_gather_i2v', (['f', '(image1, image2)'], {'tiles': 'chunks', 'margins': '(8)'}), '(f, (image1, image2), tiles=chunks, margins=8)\n', (757, 803), False, 'from dexp.processing.utils.scatter_gather_i2v import scatter_gather_i2v\n')]
|
import argparse
import copy
import datetime
import gym
import numpy as np
import itertools
import torch
import csv
import os
import json
from plane_env import Plane
from sac import SAC
from verify import verify_models, generate_agent_simulator
from torch.utils.tensorboard import SummaryWriter
from replay_memory import ReplayMemory
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env-name', default="HalfCheetah-v2",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--policy', default="Gaussian",
help='Policy Type: Gaussian | Deterministic (default: Gaussian)')
parser.add_argument('--eval', type=int, default=True,
help='Evaluates a policy a policy every 10 episode (default: True)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.005, metavar='G',
help='target smoothing coefficient(ฯ) (default: 0.005)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='G',
help='learning rate (default: 0.0003)')
parser.add_argument('--alpha', type=float, default=0.2, metavar='G',
help='Temperature parameter ฮฑ determines the relative importance of the entropy\
term against the reward (default: 0.2)')
parser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',
help='Automaically adjust ฮฑ (default: False)')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size (default: 256)')
parser.add_argument('--num_steps', type=int, default=1000001, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--hidden_size', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--updates_per_step', type=float, default=1, metavar='N',
help='model updates per simulator step (default: 1)')
parser.add_argument('--start_steps', type=int, default=10000, metavar='N',
help='Steps sampling random actions (default: 10000)')
parser.add_argument('--target_update_interval', type=int, default=1, metavar='N',
help='Value target update per no. of updates per step (default: 1)')
parser.add_argument('--replay_size', type=int, default=100000, metavar='N',
help='size of replay buffer (default: 10000000)')
parser.add_argument('--num_planes', type=int, default=1, metavar='N',
help='number of planes to use in verification (default: 1)')
parser.add_argument('--horizon', type=int, default=10, metavar='N',
help='number of actions to plan ahead before moving on to the next plane')
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
args = parser.parse_args()
# Environment
env = Plane()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Agent
# expert_agent = SAC(env.obs_state_len, env.action_space, args)
# expert_agent.load_checkpoint('winning_config_c3/c3_model')
agent = SAC(env.obs_state_len, env.action_space, args, map_input=(env.bspace.img.shape[2], env.bspace.img.shape[0], env.bspace.img.shape[1]))
run_dir = 'runs/{}_SAC_{}_{}_{}'.format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), args.env_name,
args.policy, "autotune" if args.automatic_entropy_tuning else "")
os.mkdir(run_dir)
reward_file = csv.writer(open(f"{run_dir}/rewards.csv", 'w'), delimiter=',', quoting=csv.QUOTE_MINIMAL, quotechar="|")
reward_file.writerow(['avg_reward', 'crash_rate'])
loss_file = csv.writer(open(f"{run_dir}/training_loss.csv", 'w'), delimiter=',', quoting=csv.QUOTE_MINIMAL, quotechar="|")
loss_file.writerow(['critic1_loss', 'critic2_loss', 'policy_loss', 'ent_loss', 'alpha'])
with open(f'{run_dir}/run_args.cfg', 'w') as conf:
conf.write(json.dumps(vars(args), indent=4, sort_keys=True))
# Memory
memory = ReplayMemory(args.replay_size, args.seed)
# Training Loop
total_numsteps = 0
updates = 0
if args.updates_per_step < 1:
steps_per_update = int(1/args.updates_per_step)
else: steps_per_update = None
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
done = False
state = env.reset()
while not done:
if args.start_steps > total_numsteps:
action = env.action_space.sample() # Sample random action
else:
action = agent.select_action(state) # Sample action from policy
if len(memory) > args.batch_size:
# Number of updates per step in environment
if steps_per_update:
if episode_steps % steps_per_update == 0:
# Update parameters of all the networks
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parameters(memory, args.batch_size, updates)
loss_file.writerow([critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha])
updates += 1
else:
for i in range(int(args.updates_per_step)):
# Update parameters of all the networks
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parameters(memory, args.batch_size, updates)
loss_file.writerow([critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha])
updates += 1
next_state, reward, done, _ = env.step(action) # Step
episode_steps += 1
total_numsteps += 1
episode_reward += reward
# Ignore the "done" signal if it comes from hitting the time horizon.
# (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py)
mask = 1 if episode_steps == env._max_episode_steps else float(not done)
memory.push(state, action, reward, next_state, mask) # Append transition to memory
state = next_state
if total_numsteps > args.num_steps:
break
print("Episode: {}, total numsteps: {}, episode steps: {}, reward: {}".format(i_episode, total_numsteps, episode_steps, round(episode_reward, 2)))
if i_episode % args.eval == 0 and args.eval != 0:
episodes = 21
simulator = generate_agent_simulator(agent, args.horizon)
avg_reward, _, crashed = verify_models(args.num_planes, episodes, simulator, save_path=f"{run_dir}/{i_episode}_", display=False)
reward_file.writerow([avg_reward, crashed])
print("----------------------------------------")
print("Test Episodes: {}, Total updates {}, Avg. Reward: {}, Crash Rate: {}".format(episodes, updates, round(avg_reward, 5), crashed))
print("----------------------------------------")
agent.save_checkpoint(args.env_name, ckpt_path=f"{run_dir}/{i_episode}_model")
env.close()
|
[
"torch.manual_seed",
"verify.generate_agent_simulator",
"argparse.ArgumentParser",
"verify.verify_models",
"plane_env.Plane",
"datetime.datetime.now",
"itertools.count",
"numpy.random.seed",
"os.mkdir",
"sac.SAC",
"replay_memory.ReplayMemory"
] |
[((343, 412), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Soft Actor-Critic Args"""'}), "(description='PyTorch Soft Actor-Critic Args')\n", (366, 412), False, 'import argparse\n'), ((3258, 3265), 'plane_env.Plane', 'Plane', ([], {}), '()\n', (3263, 3265), False, 'from plane_env import Plane\n'), ((3267, 3295), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3284, 3295), False, 'import torch\n'), ((3296, 3321), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3310, 3321), True, 'import numpy as np\n'), ((3465, 3603), 'sac.SAC', 'SAC', (['env.obs_state_len', 'env.action_space', 'args'], {'map_input': '(env.bspace.img.shape[2], env.bspace.img.shape[0], env.bspace.img.shape[1])'}), '(env.obs_state_len, env.action_space, args, map_input=(env.bspace.img.\n shape[2], env.bspace.img.shape[0], env.bspace.img.shape[1]))\n', (3468, 3603), False, 'from sac import SAC\n'), ((3816, 3833), 'os.mkdir', 'os.mkdir', (['run_dir'], {}), '(run_dir)\n', (3824, 3833), False, 'import os\n'), ((4352, 4393), 'replay_memory.ReplayMemory', 'ReplayMemory', (['args.replay_size', 'args.seed'], {}), '(args.replay_size, args.seed)\n', (4364, 4393), False, 'from replay_memory import ReplayMemory\n'), ((4572, 4590), 'itertools.count', 'itertools.count', (['(1)'], {}), '(1)\n', (4587, 4590), False, 'import itertools\n'), ((6684, 6729), 'verify.generate_agent_simulator', 'generate_agent_simulator', (['agent', 'args.horizon'], {}), '(agent, args.horizon)\n', (6708, 6729), False, 'from verify import verify_models, generate_agent_simulator\n'), ((6763, 6871), 'verify.verify_models', 'verify_models', (['args.num_planes', 'episodes', 'simulator'], {'save_path': 'f"""{run_dir}/{i_episode}_"""', 'display': '(False)'}), "(args.num_planes, episodes, simulator, save_path=\n f'{run_dir}/{i_episode}_', display=False)\n", (6776, 6871), False, 'from verify import verify_models, generate_agent_simulator\n'), ((3640, 3663), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3661, 3663), False, 'import datetime\n')]
|
#!/usr/bin/env python2.7
# Run as:
# python setup.py install --user
# -- Standard boilerplate header - begin
import unittest as ut
import sys, os
from os.path import abspath, dirname
from os.path import join as osjoin
cdir = dirname(abspath(__file__)) # sys.argv[0])) = # testdir
pdir = dirname(cdir) #
srcdir = osjoin(pdir, 'mhut')
sys.path.insert(0, srcdir)
pathlist = []
for p in sys.path:
if not p in pathlist: pathlist.append(p)
sys.path = pathlist
from testutils import run_tests, twrap
# -- Standard boilerplate header - end
import pandas as pd
import numpy as np
from datautils import *
# -- golden dataset --
G_dict = {
'Name' : ['A Cat', 'A Dog', 'Neither'],
'my r2' : [ 1, 0, 0],
'my d2' : [ 1, 0, 0],
'other piper' : [ 0, 4, 0],
'solomon' : [ 0, 0, 2]}
G_DF = pd.DataFrame.from_dict( G_dict, orient='index')
G_DF.drop('Name', inplace=True)
G_DF.index.name = 'Name'
G_DF.columns = G_dict['Name']
G_DF = G_DF.reindex(['my r2', 'my d2', 'other piper', 'solomon'])
for c in G_DF.columns: G_DF[c] = pd.to_numeric(G_DF[c])
G1_dict = { # I II III IV V
'a': [2.071527, 1.998107, 2.029159, 1.192129, 1.459613],
'b': [1.465882, 1.242207, 2.122667, 1.587954, 1.842492],
'c': [1.505012, 1.674715, 1.436381, 1.626080, 1.435298],
'd': [2.121946, 2.005520, 2.115850, 1.795292, 2.076429]
}
G1_DF = pd.DataFrame.from_dict(G1_dict, orient='index')
G1_DF.columns = 'I II III IV V'.split()
# -- convenience datasets for checking results --
G1_dict_R0 = {
'a': [2.0, 2.0, 2.0, 1.0, 1.0], 'b': [1.0, 1.0, 2.0, 2.0, 2.0],
'c': [2.0, 2.0, 1.0, 2.0, 1.0], 'd': [2.0, 2.0, 2.0, 2.0, 2.0]
}
G1_DF_R0 = pd.DataFrame.from_dict(G1_dict_R0, orient='index')
G1_DF_R0.columns = G1_DF.columns
G1_dict_R2 = {
'a': [2.07, 2.00, 2.03, 1.19, 1.46],
'b': [1.47, 1.24, 2.12, 1.59, 1.84],
'c': [1.51, 1.67, 1.44, 1.63, 1.44],
'd': [2.12, 2.01, 2.12, 1.80, 2.08]
}
G1_DF_R2 = pd.DataFrame.from_dict(G1_dict_R2, orient='index')
G1_DF_R2.columns = G1_DF.columns
class TestDatautils(ut.TestCase):
# globally available data struct
@classmethod
def setUpClass(cls):
print("class setUp - Nothing to do")
@classmethod
def tearDownClass(cls):
print("class tearDown - Nothing to do")
# applied per method in class
def setUp(self):
pass
def tearDown(self):
pass
## --------------------------------------------------------------------- ##
@twrap
def test_filter_column(self):
df = pd.DataFrame(100*np.random.rand(12).reshape(4,3), columns=list('ABC'))
df1 = df.copy()
df1.index = [10, 10.25, 10.5, 10.75]
self.assertTrue(df.iloc[1].equals (filter_column(df, '1')))
self.assertTrue(df.iloc[1:4].equals (filter_column(df, '1:3')))
self.assertTrue(df.iloc[:3].equals (filter_column(df, ':2')))
self.assertTrue(df.iloc[2:].equals (filter_column(df, '2:')))
self.assertTrue(df[df.index<2].equals (filter_column(df, '<2')))
self.assertTrue(df[df.index <= 1].equals (filter_column(df, '<=1')))
self.assertTrue(df[df.index != 1].equals (filter_column(df, '!=1')))
self.assertTrue(df[(df.index > 1) & (df.index <= 2)].equals(
filter_column(df, '>1 & <=2')))
self.assertTrue(df1.loc[10.5].equals (filter_column(df1, '10.5')))
self.assertTrue(df1.loc[10.25:10.5].equals (filter_column(df1, '10.25:10.5')))
self.assertTrue(df1.loc[:10.5].equals (filter_column(df1, ':10.5')))
self.assertTrue(df1.loc[10.5:].equals (filter_column(df1, '10.5:')))
self.assertTrue(df1[df1.index >= 10.5].equals (filter_column(df1, '>=10.5')))
self.assertTrue(df1[df1.index == 10.25].equals (filter_column(df1, '==10.25')))
self.assertTrue(df1[(df1.index < 10.25) | (df1.index >= 10.75)].equals(
filter_column(df1, '<10.25 | >=10.75')))
# expect errors in some version of pandas
self.assertTrue(df.iloc[1].equals (filter_column(df, '1.0')))
# self.assertTrue(filter_column(df, '1.0') - df.iloc[1]) # 1.0 => 1 (returns non-empty table)
## --------------------------------------------------------------------- ##
@twrap
def test_columnize(self):
tbl = [['Strike', 'Bid', 'Ask'],
[73.0 , 2.65, 2.70],
[73.5 , 2.47, 2.52],
[74.0 , 2.30, 2.36]]
xpct = [[73.0, 73.5, 74.0],
[2.65, 2.47, 2.30],
[2.70, 2.52, 2.36]]
result = columnize(tbl, True) # strip_header
result = [list(r) for r in result] # convert back from np.array to list
self.assertEqual(xpct, result)
# TODO. N/A's are not handled gracefully
#tbl[3][1] = 'N/A' # 2.3 -> 'N/A'
#result = vectorize(tbl)
# self.assertEqual(xpct, result)
## --------------------------------------------------------------------- ##
@twrap
def test_R2(self):
self.assertEqual(4.00, R2(4))
self.assertEqual(4.50, R2(4.5))
self.assertEqual(4.58, R2(4.58))
self.assertEqual(4.59, R2(4.586))
self.assertEqual(4.58, R2(4.584))
## --------------------------------------------------------------------- ##
@twrap
def test_R3(self):
self.assertEqual(4.000, R3(4))
self.assertEqual(4.500, R3(4.5))
self.assertEqual(4.585, R3(4.585))
self.assertEqual(4.586, R3(4.5863))
self.assertEqual(4.587, R3(4.5867))
## --------------------------------------------------------------------- ##
@twrap
def test_RN(self):
self.assertEqual(4.00, RN(4))
self.assertEqual(4.57, RN(4.5736))
self.assertEqual(4.527, RN(4.5268, 3))
self.assertEqual(4.57360, RN(4.5736, 5))
self.assertEqual('abc', RN('abc', 4))
## --------------------------------------------------------------------- ##
@twrap
def test_roundoff_list(self):
alist = [ 2.5768, 'bee', 256]
roundoff_list(alist, 3)
self.assertEqual([2.577, 'bee', 256.000], alist)
alist = [ 2.5768, 'bee2', 256]
roundoff_list(alist)
self.assertEqual([2.58, 'bee2', 256.00], alist)
## --------------------------------------------------------------------- ##
@twrap
def test_roundoff_dict(self):
adict = {'A': 2.576, 'B': 'bee', 'C': 256, 'D': [32.1475, 32, 'fee']}
roundoff_dict(adict, 3)
axpct = {'A': 2.576, 'B': 'bee', 'C': 256.000, 'D': [32.148, 32.000, 'fee']}
self.assertEqual(axpct, adict)
adict = {'A': 2.576, 'B': 'bee', 'C': 256, 'D': [32.1475, 32, 'fee']}
roundoff_dict(adict, 2)
axpct = {'A': 2.58, 'B': 'bee', 'C': 256.00, 'D': [32.15, 32.00, 'fee']}
self.assertEqual(axpct, adict)
## --------------------------------------------------------------------- ##
@twrap
def test_isnumeric(self):
self.assertTrue(isnumeric(23))
self.assertTrue(isnumeric(23.57))
self.assertTrue(isnumeric('.57'))
self.assertTrue(isnumeric('257'))
self.assertFalse(isnumeric('257.a'))
self.assertFalse(isnumeric('a.bc'))
self.assertFalse(isnumeric('a.25bc'))
self.assertFalse(isnumeric('1.25.37'))
## --------------------------------------------------------------------- ##
@twrap
def test_reorder_list(self):
orig_list = ['apple', 'banana', 'cantaloupe', 'guava', 'mango']
des_order = ['banana', 'guava']
new_list = reorder_list(orig_list, des_order) # , 'any'
b_ix = new_list.index('banana')
g_ix = new_list.index('guava')
self.assertEqual(1, g_ix-b_ix)
self.assertEqual(set(orig_list), set(new_list))
self.assertNotEqual(orig_list, new_list)
new_list = reorder_list(orig_list, des_order, 'begin')
self.assertEqual(new_list, 'banana guava apple cantaloupe mango'.split())
new_list = reorder_list(orig_list, des_order, 'end')
self.assertEqual(new_list, 'apple cantaloupe mango banana guava'.split())
new_list = reorder_list(orig_list, des_order, 'before')
self.assertEqual(new_list, 'apple cantaloupe banana guava mango'.split())
new_list = reorder_list(orig_list, des_order, 'after')
self.assertEqual(new_list, 'apple banana guava cantaloupe mango'.split())
new_list = reorder_list(orig_list, 'mango cranberry cantaloupe'.split())
m_ix = new_list.index('mango')
c_ix = new_list.index('cantaloupe')
self.assertEqual(1, c_ix-m_ix)
self.assertEqual(set(orig_list), set(new_list))
des_order = 'banana apple cantaloupe something_else mango guava'.split()
new_list = reorder_list(orig_list, des_order)
self.assertEqual(new_list, ['banana', 'apple', 'cantaloupe', 'mango', 'guava'])
## --------------------------------------------------------------------- ##
@twrap
def test_df_reorder_columns(self):
A,B,C,D,E = 0,1,2,3,4
m = np.random.rand(30).reshape(6,5)
df = pd.DataFrame( m, columns=list('ABCDE') )
df1 = df_reorder_columns(df, orderlist=list('CDAEB'))
m_xpdf = np.array(list(zip(m[:,C],m[:,D],m[:,A],m[:,E],m[:,B])))
xpdf1 = pd.DataFrame( m_xpdf, columns=list('CDAEB') )
df2 = df_reorder_columns(df, list('BD'),'begin')
m_xpdf2 = np.array(list(zip(m[:,B],m[:,D],m[:,A],m[:,C],m[:,E])))
xpdf2 = pd.DataFrame( m_xpdf2, columns=list('BDACE') )
df3 = df_reorder_columns(df, list('CFA'),'end')
m_xpdf3 = np.array(list(zip(m[:,B],m[:,D],m[:,E],m[:,C],m[:,A])))
xpdf3 = pd.DataFrame( m_xpdf3, columns=list('BDECA') )
self.assertTrue(df1.equals(xpdf1))
self.assertTrue(df2.equals(xpdf2))
self.assertTrue(df3.equals(xpdf3))
## --------------------------------------------------------------------- ##
@twrap
def test_txt2df(self):
alltxt = '''
| | |
Name A Cat A Dog Neither
my r2 1 0 0
my d2 1 0 0
other piper 0 4 0
solomon 0 0 2
'''
df = txt2df(alltxt)
for c in df.columns: df[c] = pd.to_numeric(df[c])
self.assertTrue(G_DF.equals(df))
## --------------------------------------------------------------------- ##
@twrap
def test_parse2df(self):
df = parse2df(osjoin(cdir, 'test_parse2df.txt'))
for c in df.columns: df[c] = pd.to_numeric(df[c])
self.assertTrue(G_DF.equals(df))
## --------------------------------------------------------------------- ##
@twrap
def test_broadcast(self):
alist = [1,2,3,4]
aa = np.array(alist)
ma = np.matrix(alist)
sa = pd.Series(alist)
# -- check lists --
x = broadcast(alist, 3, 0)
y = broadcast(alist, 3)
xpct_x = [alist, alist, alist]
xpct_y = [[1,1,1], [2,2,2], [3,3,3], [4,4,4]]
self.assertEqual( xpct_x, x)
self.assertEqual( xpct_y, y)
# -- check arrays --
x = broadcast(aa, 3, 0)
y = broadcast(aa, 3)
xpct_x = np.array( [alist, alist, alist] )
xpct_y = np.array( [[1,1,1], [2,2,2], [3,3,3], [4,4,4]] )
self.assertEqual( (xpct_x-x).sum(), 0)
self.assertEqual( (xpct_y-y).sum(), 0)
# -- check matrices --
x = broadcast(ma, 3, 0)
y = broadcast(ma, 3)
xpct_x = np.matrix( [alist, alist, alist] )
xpct_y = np.matrix( [[1,1,1], [2,2,2], [3,3,3], [4,4,4]] )
self.assertEqual( (xpct_x-x).sum(), 0)
self.assertEqual( (xpct_y-y).sum(), 0)
# -- check series --
x = broadcast(sa, 3, 0)
y = broadcast(sa, 3)
xpct_x = pd.DataFrame( [alist, alist, alist], dtype=float )
xpct_y = pd.DataFrame( [[1,1,1], [2,2,2], [3,3,3], [4,4,4]], dtype=float )
self.assertTrue( xpct_x.equals(x) )
self.assertTrue( xpct_y.equals(y) )
## --------------------------------------------------------------------- ##
@twrap
def test_roundoff_df(self):
df = roundoff_df(G1_DF)
self.assertTrue(df.equals(G1_DF_R0))
df = roundoff_df(G1_DF, 2)
self.assertTrue(df.equals(G1_DF_R2))
df = roundoff_df(G1_DF, 2, columns=['III', 'V'])
G1_rounded = G1_DF.copy()
G1_rounded.reindex( list('abcd') ) # becomes acbd for some reason
G1_rounded['III'] = pd.Series(dict(list(zip(list('abcd'), [2.03, 2.12, 1.44, 2.12] ))))
G1_rounded['V'] = pd.Series(dict(list(zip(list('abcd'), [1.46, 1.84, 1.44, 2.08] ))))
self.assertTrue(df.equals(G1_rounded))
df = roundoff_df(G1_DF, 2, indices=['b', 'd'])
G1_rounded = G1_DF.copy()
G1_rounded.loc['b'] = [1.47, 1.24, 2.12, 1.59, 1.84]
G1_rounded.loc['d'] = [2.12, 2.01, 2.12, 1.80, 2.08]
self.assertTrue(df.equals(G1_rounded))
df = roundoff_df(G1_DF, 2, columns=['III', 'V'], indices=['b', 'd'])
G1_rounded = G1_DF.copy()
G1_rounded.loc['b', ['III', 'V']] = [2.12, 1.84]
G1_rounded.loc['d', ['III', 'V']] = [2.12, 2.08]
self.assertTrue(df.equals(G1_rounded))
if __name__ == '__main__':
# ut.main()
run_tests(TestDatautils)
|
[
"pandas.Series",
"sys.path.insert",
"numpy.random.rand",
"pandas.DataFrame",
"os.path.join",
"pandas.DataFrame.from_dict",
"os.path.dirname",
"numpy.array",
"pandas.to_numeric",
"testutils.run_tests",
"os.path.abspath",
"numpy.matrix"
] |
[((291, 304), 'os.path.dirname', 'dirname', (['cdir'], {}), '(cdir)\n', (298, 304), False, 'from os.path import abspath, dirname\n'), ((331, 351), 'os.path.join', 'osjoin', (['pdir', '"""mhut"""'], {}), "(pdir, 'mhut')\n", (337, 351), True, 'from os.path import join as osjoin\n'), ((352, 378), 'sys.path.insert', 'sys.path.insert', (['(0)', 'srcdir'], {}), '(0, srcdir)\n', (367, 378), False, 'import sys, os\n'), ((890, 936), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['G_dict'], {'orient': '"""index"""'}), "(G_dict, orient='index')\n", (912, 936), True, 'import pandas as pd\n'), ((1458, 1505), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['G1_dict'], {'orient': '"""index"""'}), "(G1_dict, orient='index')\n", (1480, 1505), True, 'import pandas as pd\n'), ((1760, 1810), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['G1_dict_R0'], {'orient': '"""index"""'}), "(G1_dict_R0, orient='index')\n", (1782, 1810), True, 'import pandas as pd\n'), ((2036, 2086), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['G1_dict_R2'], {'orient': '"""index"""'}), "(G1_dict_R2, orient='index')\n", (2058, 2086), True, 'import pandas as pd\n'), ((235, 252), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (242, 252), False, 'from os.path import abspath, dirname\n'), ((1124, 1146), 'pandas.to_numeric', 'pd.to_numeric', (['G_DF[c]'], {}), '(G_DF[c])\n', (1137, 1146), True, 'import pandas as pd\n'), ((13632, 13656), 'testutils.run_tests', 'run_tests', (['TestDatautils'], {}), '(TestDatautils)\n', (13641, 13656), False, 'from testutils import run_tests, twrap\n'), ((11069, 11084), 'numpy.array', 'np.array', (['alist'], {}), '(alist)\n', (11077, 11084), True, 'import numpy as np\n'), ((11098, 11114), 'numpy.matrix', 'np.matrix', (['alist'], {}), '(alist)\n', (11107, 11114), True, 'import numpy as np\n'), ((11128, 11144), 'pandas.Series', 'pd.Series', (['alist'], {}), '(alist)\n', (11137, 11144), True, 'import pandas as pd\n'), ((11519, 11550), 'numpy.array', 'np.array', (['[alist, alist, alist]'], {}), '([alist, alist, alist])\n', (11527, 11550), True, 'import numpy as np\n'), ((11570, 11624), 'numpy.array', 'np.array', (['[[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]'], {}), '([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\n', (11578, 11624), True, 'import numpy as np\n'), ((11825, 11857), 'numpy.matrix', 'np.matrix', (['[alist, alist, alist]'], {}), '([alist, alist, alist])\n', (11834, 11857), True, 'import numpy as np\n'), ((11877, 11932), 'numpy.matrix', 'np.matrix', (['[[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]'], {}), '([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\n', (11886, 11932), True, 'import numpy as np\n'), ((12131, 12179), 'pandas.DataFrame', 'pd.DataFrame', (['[alist, alist, alist]'], {'dtype': 'float'}), '([alist, alist, alist], dtype=float)\n', (12143, 12179), True, 'import pandas as pd\n'), ((12199, 12270), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]'], {'dtype': 'float'}), '([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]], dtype=float)\n', (12211, 12270), True, 'import pandas as pd\n'), ((10567, 10587), 'pandas.to_numeric', 'pd.to_numeric', (['df[c]'], {}), '(df[c])\n', (10580, 10587), True, 'import pandas as pd\n'), ((10773, 10806), 'os.path.join', 'osjoin', (['cdir', '"""test_parse2df.txt"""'], {}), "(cdir, 'test_parse2df.txt')\n", (10779, 10806), True, 'from os.path import join as osjoin\n'), ((10845, 10865), 'pandas.to_numeric', 'pd.to_numeric', (['df[c]'], {}), '(df[c])\n', (10858, 10865), True, 'import pandas as pd\n'), ((9288, 9306), 'numpy.random.rand', 'np.random.rand', (['(30)'], {}), '(30)\n', (9302, 9306), True, 'import numpy as np\n'), ((2643, 2661), 'numpy.random.rand', 'np.random.rand', (['(12)'], {}), '(12)\n', (2657, 2661), True, 'import numpy as np\n')]
|
import pygame
from pygame.locals import *
import cv2
import numpy as np
import sys
import os
from time import sleep
import random
import tensorflow as tf
from utils import visualization_utils as viz_utils
class RockPaperScissors():
def __init__(self):
pygame.init()
# TENSORFLOW MODEL
self.detect_fn = tf.saved_model.load('../tensorflow_object_detection_api/inference_graph/saved_model')
self.category_index = {
1: {'id': 1, 'name': 'rock'},
2: {'id': 2, 'name': 'paper'},
3: {'id': 3, 'name': 'scissors'},
4: {'id': 4, 'name': 'rock'},
5: {'id': 5, 'name': 'quit'}
}
# PYGAME
self.camera = cv2.VideoCapture(0)
pygame.display.set_caption("Rock-Paper-Scissors")
self.screen = pygame.display.set_mode([1000,480])
# IMAGES
self.computer_img = pygame.image.load("icons/computer.png")
self.rock_img = pygame.image.load("icons/rock2.png")
self.rock_img = pygame.transform.scale(self.rock_img, (80, 80))
self.paper_img = pygame.image.load("icons/paper3.png")
self.paper_img = pygame.transform.scale(self.paper_img, (80, 80))
self.scissors_img = pygame.image.load("icons/scissors2.png")
self.scissors_img = pygame.transform.scale(self.scissors_img, (80, 80))
self.results_img = pygame.image.load("icons/results.png")
self.results_img = pygame.transform.scale(self.results_img, (1000-640, 50))
# FONTS
self.font = pygame.font.Font('freesansbold.ttf', 32)
self.countdown_font = pygame.font.Font('freesansbold.ttf', 50)
# COLORS
self.white = (255, 255, 255)
self.gray = (220, 220, 220)
self.red = (255, 0, 0)
self.green = (0, 255, 0)
# GAME VARIABLES
self.SIGNS = ["rock", "paper", "scissors", "quit", "other"]
self.GAME_ON = True
self.START_GAME = False
self.USER_POINTS, self.COMPUTER_POINTS = 0, 0
self.w, self.h = 100, 100
self.comp_center_coords = (170, self.h//2 - 80)
self.computer_choice, self.user_choice = "other", "paper"
self.countdown_started = False
# START GAME
self.main()
### DESTRUCTOR ###
def __del__(self):
pygame.quit()
self.camera.release()
cv2.destroyAllWindows()
sys.exit(0)
### COUNTDOWN TO COMPUTER CHOICE AND SIGNS COMPARISON BETWEEN USER AND COMPUTER ###
def start_countdown(self, start_ticks):
seconds=(pygame.time.get_ticks()-start_ticks)/1000
count = self.countdown_font.render(str(int(seconds)), False, self.white)
self.screen.blit(count, (170, self.h//2 - 80))
if seconds >= 3.99:
return False, seconds
else: return True, seconds
### CHOOSE COMPUTER SIGN AND RETURN ITS ICON ###
def show_computer_choice(self):
choice = random.choice(self.SIGNS[:-2])
if choice == "paper":
choice_img = self.paper_img
elif choice == "rock":
choice_img = self.rock_img
elif choice == "scissors":
choice_img = self.scissors_img
return choice, choice_img
### SHOW COMPUTER AND USER SCORE ON THE BOTTOM ###
def show_points(self):
self.screen.blit(self.results_img, (0, self.h-50))
count = self.font.render(f"{self.COMPUTER_POINTS} {self.USER_POINTS}", False, self.white)
self.screen.blit(count, (80, self.h-40))
### COMPARE COMPUTER'S AND USER'S SIGNS AND JUDGE WHO WINS THE ROUND ###
def compare_signs(self, user_sign, comp_sign, GAME_ON, user_points, comp_points):
if user_sign == "quit":
verdict = "YOU QUITED"
GAME_ON = False
elif user_sign == "other":
comp_points += 1
verdict = "POINT FOR PC!"
elif user_sign == comp_sign:
verdict = " IT'S A DRAW!"
else:
if user_sign == "scissors":
if comp_sign == "rock":
verdict = "POINT FOR PC!"
comp_points += 1
else:
verdict = "POINT FOR YOU!"
user_points += 1
elif user_sign == "rock":
if comp_sign == "paper":
verdict = "POINT FOR PC!"
comp_points += 1
else:
verdict = "POINT FOR YOU!"
user_points += 1
elif user_sign == "paper":
if comp_sign == "scissors":
verdict = "POINT FOR PC!"
comp_points += 1
else:
verdict = "POINT FOR YOU!"
user_points += 1
# choose verdict's colour
if "DRAW" in verdict or "QUIT" in verdict:
color = self.gray
elif "YOU" in verdict:
color = self.green
else:
color = self.red
return GAME_ON, user_points, comp_points, self.font.render(verdict, False, color)
### CONVERT FRAME TO NUMPY ARRAY AND RESHAPE IT ###
def load_image_into_numpy_array(self, image):
(im_height, im_width) = image.shape[:2]
return np.array(image).reshape(
(im_height, im_width, 3)).astype(np.uint8)
### DRAW RECTANGLE ON HAND AND RETURN CHOSEN SIGN ###
def detect_hand(self, frame, game_start):
# if game hasn't started yet, exit the function
if not game_start:
return frame, self.user_choice
frame_np = self.load_image_into_numpy_array(frame)
input_tensor = np.expand_dims(frame_np, 0)
detections = self.detect_fn(input_tensor)
viz_utils.visualize_boxes_and_labels_on_image_array(
frame_np,
detections['detection_boxes'][0].numpy(),
detections['detection_classes'][0].numpy().astype(np.int32),
detections['detection_scores'][0].numpy(),
self.category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=1,
min_score_thresh=.4,
skip_scores=True,
skip_labels=True,
agnostic_mode=False
)
# choose the second detection from the array
user_choice = self.category_index[detections['detection_classes'][0].numpy().astype(np.int32)[1]]
return frame_np, user_choice["name"]
### MAIN FUNCTION ###
def main(self):
while self.GAME_ON:
ret, frame = self.camera.read()
# start detecting hand when user starts the game
frame, self.user_choice = self.detect_hand(frame, self.START_GAME)
# expand the game window on the left by filling it with colour
# and displaying computer icon
self.screen.fill([4, 47, 102])
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.h, self.w = frame.shape[:2]
frame = np.rot90(frame)
frame = pygame.surfarray.make_surface(frame)
self.screen.blit(frame, (1000 - self.w,0))
self.screen.blit(self.computer_img, ( (750 - self.w) // 2,100))
# if game is not started, wait for any key to be pressed
if not self.START_GAME:
start_game1 = self.font.render('Press any key', False, self.white)
smile = self.countdown_font.render(":)", False, self.white)
start_game2 = self.font.render('to START', False, self.white)
self.screen.blit(start_game1, (70, 50))
self.screen.blit(smile, (170, self.h//2 - 80))
self.screen.blit(start_game2, (100, self.h-100))
else:
# if the game is on, show the user and computer score
self.show_points()
user_choice_text = self.font.render(self.user_choice, False, self.white)
self.screen.blit(user_choice_text, (400, 30))
# if the countdown hasn't started yet, begin it
if not self.countdown_started:
start_ticks=pygame.time.get_ticks()
self.countdown_started, secs = self.start_countdown(start_ticks)
# if nearly 4 seconds have passed, compare user's and computer's signs
# show the verdict and update score
if secs >= 3.99:
start_ticks = pygame.time.get_ticks()
self.computer_choice, computer_choice_img = self.show_computer_choice()
self.GAME_ON, self.USER_POINTS, self.COMPUTER_POINTS, VERDICT = self.compare_signs(self.user_choice,
self.computer_choice,
self.GAME_ON,
self.USER_POINTS,
self.COMPUTER_POINTS)
secs2 = 0
while secs2 < 4:
self.screen.blit(computer_choice_img, (145, 140))
self.screen.blit(VERDICT, (60, 50))
pygame.display.update()
secs2 += .001
pygame.display.update()
# exit the game pressing "Q" key
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == pygame.K_q:
self.GAME_ON = False
else:
self.START_GAME = True
if __name__ == "__main__":
rps_game = RockPaperScissors()
|
[
"pygame.init",
"pygame.quit",
"numpy.array",
"cv2.destroyAllWindows",
"numpy.rot90",
"sys.exit",
"pygame.font.Font",
"pygame.surfarray.make_surface",
"pygame.transform.scale",
"tensorflow.saved_model.load",
"pygame.time.get_ticks",
"pygame.display.set_mode",
"pygame.image.load",
"pygame.display.update",
"random.choice",
"cv2.cvtColor",
"pygame.event.get",
"cv2.VideoCapture",
"numpy.expand_dims",
"pygame.display.set_caption"
] |
[((257, 270), 'pygame.init', 'pygame.init', ([], {}), '()\n', (268, 270), False, 'import pygame\n'), ((314, 404), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['"""../tensorflow_object_detection_api/inference_graph/saved_model"""'], {}), "(\n '../tensorflow_object_detection_api/inference_graph/saved_model')\n", (333, 404), True, 'import tensorflow as tf\n'), ((644, 663), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (660, 663), False, 'import cv2\n'), ((666, 715), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Rock-Paper-Scissors"""'], {}), "('Rock-Paper-Scissors')\n", (692, 715), False, 'import pygame\n'), ((732, 768), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[1000, 480]'], {}), '([1000, 480])\n', (755, 768), False, 'import pygame\n'), ((803, 842), 'pygame.image.load', 'pygame.image.load', (['"""icons/computer.png"""'], {}), "('icons/computer.png')\n", (820, 842), False, 'import pygame\n'), ((862, 898), 'pygame.image.load', 'pygame.image.load', (['"""icons/rock2.png"""'], {}), "('icons/rock2.png')\n", (879, 898), False, 'import pygame\n'), ((918, 965), 'pygame.transform.scale', 'pygame.transform.scale', (['self.rock_img', '(80, 80)'], {}), '(self.rock_img, (80, 80))\n', (940, 965), False, 'import pygame\n'), ((986, 1023), 'pygame.image.load', 'pygame.image.load', (['"""icons/paper3.png"""'], {}), "('icons/paper3.png')\n", (1003, 1023), False, 'import pygame\n'), ((1044, 1092), 'pygame.transform.scale', 'pygame.transform.scale', (['self.paper_img', '(80, 80)'], {}), '(self.paper_img, (80, 80))\n', (1066, 1092), False, 'import pygame\n'), ((1116, 1156), 'pygame.image.load', 'pygame.image.load', (['"""icons/scissors2.png"""'], {}), "('icons/scissors2.png')\n", (1133, 1156), False, 'import pygame\n'), ((1180, 1231), 'pygame.transform.scale', 'pygame.transform.scale', (['self.scissors_img', '(80, 80)'], {}), '(self.scissors_img, (80, 80))\n', (1202, 1231), False, 'import pygame\n'), ((1254, 1292), 'pygame.image.load', 'pygame.image.load', (['"""icons/results.png"""'], {}), "('icons/results.png')\n", (1271, 1292), False, 'import pygame\n'), ((1315, 1373), 'pygame.transform.scale', 'pygame.transform.scale', (['self.results_img', '(1000 - 640, 50)'], {}), '(self.results_img, (1000 - 640, 50))\n', (1337, 1373), False, 'import pygame\n'), ((1398, 1438), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(32)'], {}), "('freesansbold.ttf', 32)\n", (1414, 1438), False, 'import pygame\n'), ((1463, 1503), 'pygame.font.Font', 'pygame.font.Font', (['"""freesansbold.ttf"""', '(50)'], {}), "('freesansbold.ttf', 50)\n", (1479, 1503), False, 'import pygame\n'), ((2061, 2074), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2072, 2074), False, 'import pygame\n'), ((2101, 2124), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2122, 2124), False, 'import cv2\n'), ((2127, 2138), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2135, 2138), False, 'import sys\n'), ((2621, 2651), 'random.choice', 'random.choice', (['self.SIGNS[:-2]'], {}), '(self.SIGNS[:-2])\n', (2634, 2651), False, 'import random\n'), ((5092, 5119), 'numpy.expand_dims', 'np.expand_dims', (['frame_np', '(0)'], {}), '(frame_np, 0)\n', (5106, 5119), True, 'import numpy as np\n'), ((6156, 6194), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (6168, 6194), False, 'import cv2\n'), ((6243, 6258), 'numpy.rot90', 'np.rot90', (['frame'], {}), '(frame)\n', (6251, 6258), True, 'import numpy as np\n'), ((6270, 6306), 'pygame.surfarray.make_surface', 'pygame.surfarray.make_surface', (['frame'], {}), '(frame)\n', (6299, 6306), False, 'import pygame\n'), ((8006, 8029), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (8027, 8029), False, 'import pygame\n'), ((8083, 8101), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (8099, 8101), False, 'import pygame\n'), ((2279, 2302), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (2300, 2302), False, 'import pygame\n'), ((7179, 7202), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (7200, 7202), False, 'import pygame\n'), ((7428, 7451), 'pygame.time.get_ticks', 'pygame.time.get_ticks', ([], {}), '()\n', (7449, 7451), False, 'import pygame\n'), ((4741, 4756), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4749, 4756), True, 'import numpy as np\n'), ((7956, 7979), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7977, 7979), False, 'import pygame\n')]
|
""" This module contains the World class. """
from copy import deepcopy
import numpy as np
from models.object import Window
class World:
"""
Contains all objects that are supposed to be drawn in the viewport.
In this class comments, the actual slice of the world that is being
shown, is refered to as "window". The widget that shows the window is
called "viewport", it is an immutable object. On the other hand, the
window can be moved or scaled like any other object.
"""
def __init__(self, window_size):
self._objects = dict()
self.add_object(Window(*window_size))
def __getitem__(self, name):
return self._objects[name]
def viewport_transform(self, viewport_width, viewport_height):
"""
Returns a list of lists of coordinates, ready to be drawn in the
viewport. Basically this returns all world objects normalized to
the viewport coordinates.
"""
virtual_world = deepcopy(self._objects)
# rotate all objects to appear that the window rotated
for obj in virtual_world.values():
obj._transform(
self["window"].inv_rotation_matrix, self["window"].center,
np.negative(self["window"].center).tolist())
# clip objects
for obj in virtual_world.values():
obj.project()
obj.clip(virtual_world["window"])
(x_min, y_min), (x_max, y_max) = \
virtual_world["window"].expanded_boundaries
def transform_point(point):
newx = ((point[0] - x_min)/(x_max - x_min)) * viewport_width
newy = (1 - (point[1] - y_min)/(y_max - y_min)) * viewport_height
return (newx, newy)
# build a list of transformed points for each object
output = []
for obj in virtual_world.values():
new_obj = []
for face in obj.points:
new_obj.append(list(map(transform_point, face)))
output.append((new_obj, obj.color))
return output
@property
def objects(self):
""" Returns the set of objects. """
return self._objects.values()
def add_object(self, obj):
""" Adds a new object. """
self._objects[obj.name] = obj
|
[
"numpy.negative",
"models.object.Window",
"copy.deepcopy"
] |
[((1017, 1040), 'copy.deepcopy', 'deepcopy', (['self._objects'], {}), '(self._objects)\n', (1025, 1040), False, 'from copy import deepcopy\n'), ((618, 638), 'models.object.Window', 'Window', (['*window_size'], {}), '(*window_size)\n', (624, 638), False, 'from models.object import Window\n'), ((1267, 1301), 'numpy.negative', 'np.negative', (["self['window'].center"], {}), "(self['window'].center)\n", (1278, 1301), True, 'import numpy as np\n')]
|
import numpy as np
'''
!! ๊ธฐ๋ณธ์ ์ธ ๊ฐ๋
1์ฐจ์ = ๋ฒกํฐ
2์ฐจ์ = ํ๋ ฌ
3์ฐจ์ = ํ
์
4์ฐจ์ ๋ถํฐ๋ ์ฐ๋ฆฌ๋ 3์ฐจ์์ ์ธ์์์ ์ด๊ณ ์๊ธฐ ๋๋ฌธ์ 4์ฐจ์ ์ด์๋ถํฐ๋ ๋จธ๋ฆฌ๋ก ์๊ฐํ๊ธฐ ์ด๋ ต๋ค.
2์ฐจ์ ํ
์
2์ฐจ์ ํ
์๋ฅผ ํ๋ ฌ์ด๋ผ๊ณ ๋งํ๋ค.
|t| = (batch size, dim)
batch size = "ํ" / dim = "์ด"
3์ฐจ์ ํ
์
3์ฐจ์ ํ
์๋ ๊ทธ๋ฅ ํ
์๋ผ๊ณ ๋ถ๋ฅธ๋ค.
|t| = (batch size, width, height)
batch size = "์ธ๋ก" / width = "๊ฐ๋ก" / height = "๋์ด" (์
์ฒด์ ์ธ ๋ถ๋ถ)
'''
# 1์ฐจ์ ๋ฒกํฐ ๋ง๋ค๊ธฐ
t = np.array([0., 1., 2., 3., 4., 5., 6.])
# ๋ฒกํฐ์ ์ฐจ์๊ณผ ํฌ๊ธฐ๋ฅผ ์ถ๋ ฅ
print("Rank of t:", t.ndim)
print("Shape of t:", t.shape)
'''
ndim์ ๋ช ์ฐจ์์ธ์ง๋ฅผ ์ถ๋ ฅํ๋ค.
shape์ ํฌ๊ธฐ๋ฅผ ์ถ๋ ฅํ๋ค. (1 x 7)์ ํฌ๊ธฐ๋ฅผ ๋งํ๋ค.
'''
# numpy์์ ๊ฐ ๋ฒกํฐ์ ์์์ ์ ๊ทผํ๋ ๋ฐฉ๋ฒ (์ผ๋ฐ์ ์ธ ํ์ด์ฌ ๋ฆฌ์คํธ๋ฅผ ๋ค๋ฃจ๋ ๊ฒ๊ณผ ๋งค์ฐ ์ ์ฌ)
print(t[0], t[1], t[-1])
print(t[:2], t[3:])
# 2์ฐจ์ ํ๋ ฌ ๋ง๋ค๊ธฐ
t = np.array([[1., 2., 3.,], [4., 5., 6.,], [7., 8., 9]])
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import torch
# 1์ฐจ์ ๋ฒกํฐ ๋ง๋ค๊ธฐ
t = torch.FloatTensor([0., 1., 2., 3., 4., 5., 6.])
print(t)
# ํ
์์ ์ฐจ์ ํ์ธํ๊ธฐ
print(t.dim()) # Rank (์ฐจ์)
print(t.shape) # t.size()๋ ๊ฐ๋ฅ
# ํ
์ ์ ๊ทผํ๊ธฐ (์ผ๋ฐ์ ์ธ ํ์ด์ฌ ๋ฆฌ์คํธ ์ ๊ทผ ๋ฐ numpy์ ๊ทผ๊ณผ ๋์ผํ๋ค.)
print(t[0], t[1], t[-1])
print(t[2:5], t[4:-1])
print(t[:2], t[3:])
# PyTorch๋ก 2์ฐจ์ ํ๋ ฌ ๋ง๋ค๊ธฐ
t = torch.FloatTensor([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.],
[10., 11., 12.]])
print(t)
print(t.dim()) # Rank (์ฐจ์)
print(t.shape) # t.size()๋ ๊ฐ๋ฅ
# 2์ฐจ์ ํ
์ ์ฌ๋ผ์ด์ฑ
print(t[1:3, 1]) # ์ฒซ ๋ฒ์งธ ์ฐจ์์์์ ์ฌ๋ผ์ด์ฑ, ๋ ๋ฒ์งธ ์ฐจ์์์์ ์ธ๋ฑ์ค์ ๊ฐ๋ค๋ง ๊ฐ์ ธ์จ๋ค.
print(t[1:3, 1].size())
# ๋ธ๋ก๋์บ์คํ
์ด์ฉํ๊ธฐ
'''
๋ ํ๋ ฌ A, B์์ ๋ง์
์ ๋ ํ๋ ฌ์ ํฌ๊ธฐ๊ฐ ๊ฐ์์ผํ๊ณ , ๊ณฑ์ A์ ๋ง์ง๋ง ์ฐจ์๊ณผ B์ ์ฒซ ๋ฒ์งธ ์ฐจ์์ด ์ผ์นํด์ผํ๋ค.
๊ทธ์น๋ง ๋ฅ ๋ฌ๋์ ์ํํ๋ค๋ณด๋ฉด ๋ถ๊ฐํผํ๊ฒ ํฌ๊ธฐ๊ฐ ๋ค๋ฅธ ํ๋ ฌ(ํ
์)์ ๋ํด์ ์ฌ์น ์ฐ์ฐ์ ์ํํ ํ์๊ฐ ์๊ธด๋ค.
์ด๋ฅผ ์ํด ํ์ดํ ์น์์๋ ์๋์ผ๋ก ํฌ๊ธฐ๋ฅผ ๋ง์ถฐ์ ์ฐ์ฐ์ ์ํํ๊ฒ ๋ง๋๋ "๋ธ๋ก๋์บ์คํ
" ๊ธฐ๋ฅ์ ์ ๊ณตํ๋ค.
'''
# ์ผ๋ฐ์ ์ธ ํ๋ ฌ ๋ง์
m1 = torch.FloatTensor([[3, 3]])
m2 = torch.FloatTensor([[2, 2]])
print(m1 + m2)
# ๋ธ๋ก๋ ์บ์คํ
์ ์ฉ
m2 = torch.FloatTensor([[3], [4]])
print(m1 + m2)
# ํ๋ ฌ ๊ณฑ์
(matmul)๊ณผ ์์ ๋ณ ๊ณฑ์
(mul)์ ์ฐจ์ด
m1 = torch.FloatTensor([[1, 2], [3, 4]])
m2 = torch.FloatTensor([[1], [2]])
print(m1.matmul(m2))
print(m1 * m2)
print(m1.mul(m2))
# ํ๊ท ๊ตฌํ๊ธฐ
t = torch.FloatTensor([1, 2])
print(t.mean())
# 2์ฐจ์ ํ๋ ฌ ํ๊ท ๊ตฌํ๊ธฐ
t = torch.FloatTensor([[1, 2],
[3, 4]])
print(t.mean()) # ์ ์ฒด ์์๋ฅผ ๋์์ผ๋ก ํ๊ท ์ ๊ตฌํ๋ค.
print(t.mean(dim=0)) # ์ฒซ ๋ฒ์งธ ์ฐจ์์ ์ ๊ฑฐํ๊ณ ํ๊ท ์ ๊ตฌํ๋ค. [1, 3]์ ํ๊ท ๊ณผ [2, 4]์ ํ๊ท
print(t.mean(dim=1)) # ๋ ๋ฒ์งธ ์ฐจ์์ ์ ๊ฑฐํ๊ณ ํ๊ท ์ ๊ตฌํ๋ค. [1, 2]์ ํ๊ท ๊ณผ [3, 4]์ ํ๊ท
# ํ๋ ฌ ๋ง์
t = torch.FloatTensor([[1, 2],
[3, 4]])
print(t.sum()) # ์ ์ฒด ์์๋ฅผ ๋์์ผ๋ก ํฉ์ ๊ตฌํ๋ค.
print(t.sum(dim=0)) # ์ฒซ ๋ฒ์งธ ์ฐจ์์ ์ ๊ฑฐํ๊ณ ํฉ์ ๊ตฌํ๋ค. [1, 3]์ ํฉ๊ณผ [2, 4]์ ํฉ
print(t.sum(dim=1)) # ๋ ๋ฒ์งธ ์ฐจ์์ ์ ๊ฑฐํ๊ณ ํฉ์ ๊ตฌํ๋ค. [1, 2]์ ํฉ๊ณผ [3, 4]์ ํฉ
# ์ต๋(Max)์ ์๊ทธ๋งฅ์ค(ArgMax)๊ตฌํ๊ธฐ
t = torch.FloatTensor([[1, 2],
[3, 4]])
print(t.max()) # ์ ์ฒด ์์๋ฅผ ๋์์ผ๋ก max๋ฅผ ๊ตฌํ๋ค.
print(t.max(dim=0)) # ์ฒซ ๋ฒ์งธ ์ฐจ์์ ์ ๊ฑฐํ๊ณ max๋ฅผ ๊ตฌํ๋ค. [1, 3]์ค ์ต๋ ๊ฐ๊ณผ [2, 4]์ค ์ต๋ ๊ฐ
print(t.max(dim=1)) # ๋ ๋ฒ์งธ ์ฐจ์์ ์ ๊ฑฐํ๊ณ max๋ฅผ ๊ตฌํ๋ค. [1, 2]์ค ์ต๋ ๊ฐ๊ณผ [3, 4]์ค ์ต๋ ๊ฐ
'''
max() ํจ์๋ ๋ฐํ ๊ฐ์ด ๋ ๊ฐ์ด๋ค. ๊ฐ๊ณผ idx๋ฅผ ๋ฐํํด์ค๋ค.
'''
# ๋ทฐ(View) - ์์์ ์๋ฅผ ์ ์งํ๋ฉด์ ํ
์์ ํฌ๊ธฐ ๋ณ๊ฒฝ (์ค์ํจ)
'''
pytorch์ ๋ทฐ๋ numpy์์์ reshape์ ๊ฐ์ ์ญํ ์ ํ๋ค. ์ฆ, ํ
์์ ํฌ๊ธฐ๋ฅผ ๋ณ๊ฒฝํด์ฃผ๋ ์ญํ ์ ํ๋ค.
'''
t = np.array([[[0, 1, 2],
[3, 4, 5]],
[[6, 7, 8],
[9, 10, 11]]])
ft = torch.FloatTensor(t)
print(ft.shape)
# ft ํ
์๋ฅผ view๋ฅผ ํตํ์ฌ 2์ฐจ์ ํ
์๋ก ๋ณ๊ฒฝํ๊ธฐ
print(ft.view([-1, 3])) # ftํ
์๋ฅผ (?, 3)์ ํฌ๊ธฐ๋ก ๋ณ๊ฒฝ
'''
view๋ ๊ธฐ๋ณธ์ ์ผ๋ก ๋ณ๊ฒฝ ์ ๊ณผ ํ์ ํ
์ ์์ ์์์ ๊ฐ์๊ฐ ์ ์ง๋์ด์ผ ํ๋ค.
ํ์ดํ ์น์ view๋ ์ฌ์ด์ฆ๊ฐ -1๋ก ์ค์ ๋๋ฉด, ๋ค๋ฅธ ์ฐจ์์ผ๋ก๋ถํฐ ํด๋น ๊ฐ์ ์ ์ถํ๋ค.
'''
# 3์ฐจ์ ํ
์์ ํฌ๊ธฐ ๋ณ๊ฒฝ
'''
3์ฐจ์ ํ
์์์ 3์ฐจ์ ํ
์๋ก ์ฐจ์์ ์ ์งํ๋, ํฌ๊ธฐ(shape)์ ๋ฐ๊ฟ๋ณด์.
'''
print(ft.view([-1, 1, 3]))
# ์คํด์ฆ(squeeze) - 1์ธ ์ฐจ์์ ์ ๊ฑฐ
ft = torch.FloatTensor([[0], [1], [2]])
print(ft.size())
print(ft.squeeze())
print(ft.squeeze().size())
# ์ธ์คํด์ฆ(unsqueeze) - ํน์ ์์น์ 1์ธ ์ฐจ์์ ์ถ๊ฐ
ft = torch.FloatTensor([1, 2, 3])
print(ft.size())
print(ft.unsqueeze(0))
print(ft.unsqueeze(0).size())
# ๋ ํ
์๋ฅผ ์ฐ๊ฒฐํ๊ธฐ (concatenate)
x = torch.FloatTensor([[1, 2], [3, 4]])
y = torch.FloatTensor([[5, 6], [7, 8]])
print(torch.cat([x, y], dim=0)) # dim=0์ ์ฐจ์์ ๋๋ฆฌ๋ผ๋ ์๋ฏธ๋ฅผ ๊ฐ์ง๋ค.
print(torch.cat([x, y], dim=1))
# ์คํํน(stacking)
x = torch.FloatTensor([1, 4])
y = torch.FloatTensor([2, 5])
z = torch.FloatTensor([3, 6])
print(torch.stack([x, y, z]))
print(torch.cat([x.unsqueeze(0), y.unsqueeze(0), z.unsqueeze(0)], dim=0))
'''
์์ฐจ์ ์ผ๋ก ์์ฌ (3 x 2) ํ
์๊ฐ ๋๋ค.
๊ทธ๋ฆฌ๊ณ ๋ ๋ฒ์งธ ํ๋ฆฐํธ ๋ฌธ๊ณผ ๊ฐ์ด, cat์ ์ด์ฉํ์ฌ ์ฐ๊ฒฐํ ๊ฒ ๋ณด๋ค ํจ์ฌ ๊ฐ๊ฒฐํด์ก๋ค.
'''
# 0๊ณผ 1๋ก ์ฑ์์ง ํ
์
x = torch.FloatTensor([[0, 1, 2], [2, 1, 0]])
print(torch.ones_like(x)) # x ํ
์์ ๊ฐ์ ํฌ๊ธฐ์ด์ง๋ง ๊ฐ์ด 1๋ก๋ง ์ฑ์์ง ํ
์๋ฅผ ์์ฑ
print(torch.zeros_like(x)) # x ํ
์์ ๊ฐ์ ํฌ๊ธฐ์ด์ง๋ง ๊ฐ์ด 0๋ก๋ง ์ฑ์์ง ํ
์๋ฅผ ์์ฑ
# In-place operation (๋ฎ์ด์ฐ๊ธฐ ์ฐ์ฐ)
x = torch.FloatTensor([[1, 2], [3, 4]])
print(x.mul(2.))
print(x)
''' ๋ณ๋ x '''
print(x.mul_(2.))
print(x)
|
[
"torch.ones_like",
"torch.stack",
"numpy.array",
"torch.zeros_like",
"torch.FloatTensor",
"torch.cat"
] |
[((342, 387), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]'], {}), '([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n', (350, 387), True, 'import numpy as np\n'), ((641, 700), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9]])\n', (649, 700), True, 'import numpy as np\n'), ((820, 874), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]'], {}), '([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n', (837, 874), False, 'import torch\n'), ((1090, 1184), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0,\n 11.0, 12.0]])\n', (1107, 1184), False, 'import torch\n'), ((1634, 1661), 'torch.FloatTensor', 'torch.FloatTensor', (['[[3, 3]]'], {}), '([[3, 3]])\n', (1651, 1661), False, 'import torch\n'), ((1667, 1694), 'torch.FloatTensor', 'torch.FloatTensor', (['[[2, 2]]'], {}), '([[2, 2]])\n', (1684, 1694), False, 'import torch\n'), ((1728, 1757), 'torch.FloatTensor', 'torch.FloatTensor', (['[[3], [4]]'], {}), '([[3], [4]])\n', (1745, 1757), False, 'import torch\n'), ((1813, 1848), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1830, 1848), False, 'import torch\n'), ((1854, 1883), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1], [2]]'], {}), '([[1], [2]])\n', (1871, 1883), False, 'import torch\n'), ((1952, 1977), 'torch.FloatTensor', 'torch.FloatTensor', (['[1, 2]'], {}), '([1, 2])\n', (1969, 1977), False, 'import torch\n'), ((2015, 2050), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (2032, 2050), False, 'import torch\n'), ((2264, 2299), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (2281, 2299), False, 'import torch\n'), ((2522, 2557), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (2539, 2557), False, 'import torch\n'), ((2932, 2992), 'numpy.array', 'np.array', (['[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]'], {}), '([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]])\n', (2940, 2992), True, 'import numpy as np\n'), ((3042, 3062), 'torch.FloatTensor', 'torch.FloatTensor', (['t'], {}), '(t)\n', (3059, 3062), False, 'import torch\n'), ((3390, 3424), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0], [1], [2]]'], {}), '([[0], [1], [2]])\n', (3407, 3424), False, 'import torch\n'), ((3532, 3560), 'torch.FloatTensor', 'torch.FloatTensor', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (3549, 3560), False, 'import torch\n'), ((3663, 3698), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3680, 3698), False, 'import torch\n'), ((3703, 3738), 'torch.FloatTensor', 'torch.FloatTensor', (['[[5, 6], [7, 8]]'], {}), '([[5, 6], [7, 8]])\n', (3720, 3738), False, 'import torch\n'), ((3851, 3876), 'torch.FloatTensor', 'torch.FloatTensor', (['[1, 4]'], {}), '([1, 4])\n', (3868, 3876), False, 'import torch\n'), ((3881, 3906), 'torch.FloatTensor', 'torch.FloatTensor', (['[2, 5]'], {}), '([2, 5])\n', (3898, 3906), False, 'import torch\n'), ((3911, 3936), 'torch.FloatTensor', 'torch.FloatTensor', (['[3, 6]'], {}), '([3, 6])\n', (3928, 3936), False, 'import torch\n'), ((4144, 4185), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0, 1, 2], [2, 1, 0]]'], {}), '([[0, 1, 2], [2, 1, 0]])\n', (4161, 4185), False, 'import torch\n'), ((4345, 4380), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (4362, 4380), False, 'import torch\n'), ((3745, 3769), 'torch.cat', 'torch.cat', (['[x, y]'], {'dim': '(0)'}), '([x, y], dim=0)\n', (3754, 3769), False, 'import torch\n'), ((3804, 3828), 'torch.cat', 'torch.cat', (['[x, y]'], {'dim': '(1)'}), '([x, y], dim=1)\n', (3813, 3828), False, 'import torch\n'), ((3943, 3965), 'torch.stack', 'torch.stack', (['[x, y, z]'], {}), '([x, y, z])\n', (3954, 3965), False, 'import torch\n'), ((4192, 4210), 'torch.ones_like', 'torch.ones_like', (['x'], {}), '(x)\n', (4207, 4210), False, 'import torch\n'), ((4253, 4272), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (4269, 4272), False, 'import torch\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.